code
stringlengths 658
1.05M
|
---|
# -*- coding: utf-8 -*-
import re
from module.plugins.Crypter import Crypter
class LinkdecrypterCom(Crypter):
__name__ = "LinkdecrypterCom"
__type__ = "crypter"
__version__ = "0.29"
__pattern__ = r'^unmatchable$'
__config__ = [("use_subfolder", "bool", "Save package to subfolder", True),
("subfolder_per_package", "bool", "Create a subfolder for each package", True)]
__description__ = """Linkdecrypter.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("flowlee", None)]
TEXTAREA_PATTERN = r'<textarea name="links" wrap="off" readonly="1" class="caja_des">(.+)</textarea>'
PASSWORD_PATTERN = r'<input type="text" name="password"'
CAPTCHA_PATTERN = r'<img class="captcha" src="(.+?)"(.*?)>'
REDIR_PATTERN = r'<i>(Click <a href="./">here</a> if your browser does not redirect you).</i>'
def setup(self):
self.password = self.getPassword()
self.req.setOption("timeout", 300)
def decrypt(self, pyfile):
retries = 5
post_dict = {"link_cache": "on", "pro_links": pyfile.url, "modo_links": "text"}
self.html = self.load('http://linkdecrypter.com/', post=post_dict, cookies=True, decode=True)
while retries:
m = re.search(self.TEXTAREA_PATTERN, self.html, flags=re.S)
if m:
self.urls = [x for x in m.group(1).splitlines() if '[LINK-ERROR]' not in x]
m = re.search(self.CAPTCHA_PATTERN, self.html)
if m:
captcha_url = 'http://linkdecrypter.com/' + m.group(1)
result_type = "positional" if "getPos" in m.group(2) else "textual"
m = re.search(r"<p><i><b>([^<]+)</b></i></p>", self.html)
msg = m.group(1) if m else ""
self.logInfo(_("Captcha protected link"), result_type, msg)
captcha = self.decryptCaptcha(captcha_url, result_type=result_type)
if result_type == "positional":
captcha = "%d|%d" % captcha
self.html = self.load('http://linkdecrypter.com/', post={"captcha": captcha}, decode=True)
retries -= 1
elif self.PASSWORD_PATTERN in self.html:
if self.password:
self.logInfo(_("Password protected link"))
self.html = self.load('http://linkdecrypter.com/', post={'password': self.password}, decode=True)
else:
self.fail(_("Missing password"))
else:
retries -= 1
self.html = self.load('http://linkdecrypter.com/', cookies=True, decode=True)
|
import itertools
from collections import defaultdict, namedtuple
from xml.sax.saxutils import escape
import numpy
from AnyQt import QtGui
from AnyQt.QtCore import Qt
import Orange.data
from Orange.widgets import gui
from . import gui as guiutils
ColumnGroup = namedtuple("ColumnGroup", ["name", "key", "values"])
RowGroup = namedtuple("RowGroup", ["name", "var", "values"])
def group_candidates(data):
items = [attr.attributes.items() for attr in data.domain.attributes]
items = list(itertools.chain(*items))
targets = defaultdict(set)
for label, value in items:
targets[label].add(value)
# Need at least 2 distinct values or key
targets = [(key, sorted(vals)) for key, vals in targets.items() \
if len(vals) >= 2]
column_groups = [ColumnGroup(key, key, values)
for key, values in sorted(targets)]
disc_vars = [var for var in data.domain.class_vars + data.domain.metas
if isinstance(var, Orange.data.DiscreteVariable)
and len(var.values) >= 2]
row_groups = [RowGroup(var.name, var, var.values)
for var in disc_vars]
return column_groups, row_groups
@guiutils.standarditem_from.register(ColumnGroup)
def standarditem_from_columngroup(colgroup):
item = QtGui.QStandardItem(colgroup.name)
# item.setIcon(pkg_path('columnset.svg'))
item.setToolTip("Split by column label: '{!s}'"
.format(escape(colgroup.name)))
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setData(colgroup, Qt.UserRole)
children = [guiutils.standarditem_from(val)
for val in colgroup.values]
item.appendRows(children)
return item
@guiutils.standarditem_from.register(RowGroup)
def standarditem_from_rowgroup(rowgroup):
item = QtGui.QStandardItem(rowgroup.name)
icon, _ = gui.attributeItem(rowgroup.var)
item.setIcon(icon)
item.setToolTip(guiutils.variable_tooltip(rowgroup.var))
item.setData(rowgroup, Qt.UserRole)
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
children = [guiutils.standarditem_from(val)
for val in rowgroup.values]
item.appendRows(children)
return item
def group_selection_mask(data, group, indices):
"""
Return the selection masks for the group.
"""
if isinstance(group, ColumnGroup):
selected = [group.values[i] for i in indices]
target = set([(group.key, value) for value in selected])
I = [bool(set(var.attributes.items()).intersection(target))
for var in data.domain.attributes]
return numpy.array(I, dtype=bool)
elif isinstance(group, RowGroup):
target = set(indices)
X, _ = data.get_column_view(group.var)
I = numpy.zeros_like(X, dtype=bool)
for i in target:
I |= X == i
return I
else:
raise TypeError("ColumnGroup or RowGroup expected, got {}"
.format(type(group).__name__))
|
#!/usr/bin/python3
import datetime
import glob
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
import misaka
import psutil
import requests
from feedgen.feed import FeedGenerator
from pytube import YouTube
from tornado import gen, httputil, ioloop, iostream, process, web
from tornado.locks import Semaphore
__version__ = '3.0'
key = None
video_links = {}
playlist_feed = {}
channel_feed = {}
conversion_queue = {}
converting_lock = Semaphore(2)
def get_youtube_url(video):
if video in video_links and video_links[video]['expire'] > datetime.datetime.now():
return video_links[video]['url']
yt = YouTube('http://www.youtube.com/watch?v=' + video)
vid = yt.streams.get_highest_resolution().url
parts = {
part.split('=')[0]: part.split('=')[1]
for part in vid.split('?')[-1].split('&')
}
link = {
'url': vid,
'expire': datetime.datetime.fromtimestamp(int(parts['expire']))
}
video_links[video] = link
return link['url']
class ChannelHandler(web.RequestHandler):
@gen.coroutine
def head(self, channel):
self.set_header('Content-type', 'application/rss+xml')
self.set_header('Accept-Ranges', 'bytes')
@gen.coroutine
def get(self, channel):
channel = channel.split('/')
if len(channel) < 2:
channel.append('video')
channel_name = ['/'.join(channel)]
self.set_header('Content-type', 'application/rss+xml')
if channel_name[0] in channel_feed and channel_feed[channel_name[0]]['expire'] > datetime.datetime.now():
self.write(channel_feed[channel_name[0]]['feed'])
self.finish()
return
fg = None
video = None
calls = 0
response = {'nextPageToken': ''}
while 'nextPageToken' in response.keys():
next_page = response['nextPageToken']
payload = {
'part': 'snippet,contentDetails',
'maxResults': 50,
'channelId': channel[0],
'key': key,
'pageToken': next_page
}
request = requests.get(
'https://www.googleapis.com/youtube/v3/activities',
params=payload
)
calls += 1
if request.status_code != 200:
payload = {
'part': 'snippet',
'maxResults': 1,
'forUsername': channel[0],
'key': key
}
request = requests.get(
'https://www.googleapis.com/youtube/v3/channels',
params=payload
)
response = request.json()
channel[0] = response['items'][0]['id']
channel_name.append('/'.join(channel))
payload = {
'part': 'snippet,contentDetails',
'maxResults': 50,
'channelId': channel[0],
'key': key,
'pageToken': next_page
}
request = requests.get(
'https://www.googleapis.com/youtube/v3/activities',
params=payload
)
calls += 2
response = request.json()
if request.status_code == 200:
logging.debug('Downloaded Channel Information')
else:
logging.error('Error Downloading Channel: %s', request.reason)
self.send_error(reason='Error Downloading Channel')
return
if not fg:
fg = FeedGenerator()
fg.load_extension('podcast')
fg.generator(
'PodTube (python-feedgen)',
__version__,
'https://github.com/aquacash5/PodTube'
)
for item in response['items']:
if item['snippet']['type'] != 'upload':
continue
elif 'Private' in item['snippet']['title']:
continue
else:
snippet = item['snippet']
break
logging.info(
'Channel: %s (%s)',
channel[0],
snippet['channelTitle']
)
icon = max(
snippet['thumbnails'],
key=lambda x: snippet['thumbnails'][x]['width']
)
fg.title(snippet['channelTitle'])
fg.id('http://' + self.request.host + self.request.uri)
fg.description(snippet['description'] or ' ')
fg.author(
name='Podtube',
email='[email protected]',
uri='https://github.com/aquacash5/PodTube')
fg.podcast.itunes_author(snippet['channelTitle'])
fg.image(snippet['thumbnails'][icon]['url'])
fg.link(
href=f'http://youtube.com/channel/{channel}',
rel='self'
)
fg.language('en-US')
fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
fg.podcast.itunes_explicit('no')
fg.podcast.itunes_owner(
name='Podtube',
email='[email protected]'
)
fg.podcast.itunes_summary(snippet['description'])
fg.podcast.itunes_category(cat='Technology')
fg.updated(str(datetime.datetime.utcnow()) + 'Z')
for item in response['items']:
snippet = item['snippet']
if snippet['type'] != 'upload':
continue
if 'private' in snippet['title'].lower():
continue
current_video = item['contentDetails']['upload']['videoId']
logging.debug(
'ChannelVideo: %s (%s)',
current_video,
snippet['title']
)
fe = fg.add_entry()
fe.title(snippet['title'])
fe.id(current_video)
icon = max(
snippet['thumbnails'],
key=lambda x: snippet['thumbnails'][x]['width'])
fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
fe.updated(snippet['publishedAt'])
if channel[1] == 'video':
fe.enclosure(
url=f'http://{self.request.host}/video/{current_video}',
type="video/mp4"
)
elif channel[1] == 'audio':
fe.enclosure(
url=f'http://{self.request.host}/audio/{current_video}',
type="audio/mpeg"
)
fe.author(name=snippet['channelTitle'])
fe.podcast.itunes_author(snippet['channelTitle'])
fe.pubDate(snippet['publishedAt'])
fe.link(
href=f'http://www.youtube.com/watch?v={current_video}',
title=snippet['title']
)
fe.podcast.itunes_summary(snippet['description'])
fe.description(snippet['description'])
if not video or video['expire'] < fe.pubDate():
video = {'video': fe.id(), 'expire': fe.pubDate()}
feed = {
'feed': fg.rss_str(),
'expire': datetime.datetime.now() + datetime.timedelta(hours=calls)
}
for chan in channel_name:
channel_feed[chan] = feed
self.write(feed['feed'])
self.finish()
video = video['video']
mp3_file = 'audio/{}.mp3'.format(video)
if channel[1] == 'audio' and not os.path.exists(mp3_file) and video not in conversion_queue.keys():
conversion_queue[video] = {
'status': False,
'added': datetime.datetime.now()
}
class PlaylistHandler(web.RequestHandler):
@gen.coroutine
def head(self, playlist):
self.set_header('Content-type', 'application/rss+xml')
self.set_header('Accept-Ranges', 'bytes')
@gen.coroutine
def get(self, playlist):
playlist = playlist.split('/')
if len(playlist) < 2:
playlist.append('video')
playlist_name = '/'.join(playlist)
self.set_header('Content-type', 'application/rss+xml')
if playlist_name in playlist_feed and playlist_feed[playlist_name]['expire'] > datetime.datetime.now():
self.write(playlist_feed[playlist_name]['feed'])
self.finish()
return
calls = 0
payload = {
'part': 'snippet',
'id': playlist[0],
'key': key
}
request = requests.get(
'https://www.googleapis.com/youtube/v3/playlists',
params=payload
)
calls += 1
response = request.json()
if request.status_code == 200:
logging.debug('Downloaded Playlist Information')
else:
logging.error('Error Downloading Playlist: %s', request.reason)
self.send_error(reason='Error Downloading Playlist')
return
fg = FeedGenerator()
fg.load_extension('podcast')
fg.generator(
'PodTube (python-feedgen)',
__version__,
'https://github.com/aquacash5/PodTube'
)
snippet = response['items'][0]['snippet']
icon = max(
snippet['thumbnails'],
key=lambda x: snippet['thumbnails'][x]['width']
)
logging.info(
'Playlist: %s (%s)',
playlist[0],
snippet['title']
)
fg.title(snippet['title'])
fg.id('http://' + self.request.host + self.request.uri)
fg.description(snippet['description'] or ' ')
fg.author(
name='Podtube',
email='[email protected]',
uri='https://github.com/aquacash5/PodTube'
)
fg.podcast.itunes_author(snippet['channelTitle'])
fg.image(snippet['thumbnails'][icon]['url'])
fg.link(
href=f'http://youtube.com/playlist/?list={playlist}',
rel='self'
)
fg.language('en-US')
fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
fg.podcast.itunes_explicit('no')
fg.podcast.itunes_owner(
name='Podtube',
email='[email protected]'
)
fg.podcast.itunes_summary(snippet['description'])
fg.podcast.itunes_category(cat='Technology')
fg.updated(str(datetime.datetime.utcnow()) + 'Z')
video = None
response = {'nextPageToken': ''}
while 'nextPageToken' in response.keys():
payload = {
'part': 'snippet',
'maxResults': 50,
'playlistId': playlist[0],
'key': key,
'pageToken': response['nextPageToken']
}
request = requests.get(
'https://www.googleapis.com/youtube/v3/playlistItems',
params=payload
)
calls += 1
response = request.json()
if request.status_code == 200:
logging.debug('Downloaded Playlist Information')
else:
logging.error('Error Downloading Playlist: %s', request.reason)
self.send_error(reason='Error Downloading Playlist Items')
return
for item in response['items']:
snippet = item['snippet']
current_video = snippet['resourceId']['videoId']
if 'Private' in snippet['title']:
continue
logging.debug(
'PlaylistVideo: %s (%s)',
current_video,
snippet['title']
)
fe = fg.add_entry()
fe.title(snippet['title'])
fe.id(current_video)
icon = max(
snippet['thumbnails'],
key=lambda x: snippet['thumbnails'][x]['width']
)
fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
fe.updated(snippet['publishedAt'])
if playlist[1] == 'video':
fe.enclosure(
url=f'http://{self.request.host}/video/{current_video}',
type="video/mp4"
)
elif playlist[1] == 'audio':
fe.enclosure(
url=f'http://{self.request.host}/audio/{current_video}',
type="audio/mpeg"
)
fe.author(name=snippet['channelTitle'])
fe.podcast.itunes_author(snippet['channelTitle'])
fe.pubDate(snippet['publishedAt'])
fe.link(
href=f'http://www.youtube.com/watch?v={current_video}',
title=snippet['title']
)
fe.podcast.itunes_summary(snippet['description'])
fe.description(snippet['description'])
if not video or video['expire'] < fe.pubDate():
video = {'video': fe.id(), 'expire': fe.pubDate()}
feed = {
'feed': fg.rss_str(),
'expire': datetime.datetime.now() + datetime.timedelta(hours=calls)
}
playlist_feed[playlist_name] = feed
self.write(feed['feed'])
self.finish()
video = video['video']
mp3_file = 'audio/{}.mp3'.format(video)
if playlist[1] == 'audio' and not os.path.exists(mp3_file) and video not in conversion_queue.keys():
conversion_queue[video] = {
'status': False,
'added': datetime.datetime.now()
}
class VideoHandler(web.RequestHandler):
def get(self, video):
logging.info('Video: %s', video)
self.redirect(get_youtube_url(video))
class AudioHandler(web.RequestHandler):
@gen.coroutine
def head(self, channel):
self.set_header('Accept-Ranges', 'bytes')
self.set_header("Content-Type", "audio/mpeg")
@gen.coroutine
def get(self, audio):
logging.info('Audio: %s (%s)', audio, self.request.remote_ip)
mp3_file = './audio/{}.mp3'.format(audio)
if not os.path.exists(mp3_file):
if audio not in conversion_queue.keys():
conversion_queue[audio] = {
'status': False,
'added': datetime.datetime.now()
}
while audio in conversion_queue:
yield gen.sleep(0.5)
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = os.stat(mp3_file).st_size
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "audio/mpeg")
self.set_header("Content-Range", "bytes */%s" % (size,))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header(
"Content-Range",
httputil._get_content_range(start, end, size)
)
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Accept-Ranges", "bytes")
self.set_header("Content-Length", content_length)
self.set_header('Content-Type', 'audio/mpeg')
content = self.get_content(mp3_file, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
Path(abspath).touch(exist_ok=True)
with open(abspath, "rb") as audio_file:
if start is not None:
audio_file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 1024 ** 2
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = audio_file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
def on_connection_close(self):
logging.info(
'Audio: User quit during transcoding (%s)',
self.request.remote_ip)
class FileHandler(web.RequestHandler):
def get(self):
logging.info('ReadMe (%s)', self.request.remote_ip)
self.write('<html><head><title>PodTube (v')
self.write(__version__)
self.write(')</title><link rel="shortcut icon" href="favicon.ico"><link rel="stylesheet" type="text/css" href="markdown.css"></head><body>')
with open('README.md') as text:
self.write(
misaka.html(
text.read(),
extensions=('tables', 'fenced-code')
)
)
self.write('</body></html>')
def cleanup():
# Globals
global video_links
global playlist_feed
global channel_feed
current_time = datetime.datetime.now()
# Video Links
video_links_length = len(video_links)
video_links = {
video:
info
for video, info in video_links.items()
if info['expire'] > current_time
}
video_links_length -= len(video_links)
if video_links_length:
logging.info('Cleaned %s items from video list', video_links_length)
# Playlist Feeds
playlist_feed_length = len(playlist_feed)
playlist_feed = {
playlist:
info
for playlist, info in playlist_feed.items()
if info['expire'] > current_time
}
playlist_feed_length -= len(playlist_feed)
if playlist_feed_length:
logging.info(
'Cleaned %s items from playlist feeds',
playlist_feed_length
)
# Channel Feeds
channel_feed_length = len(channel_feed)
channel_feed = {
channel:
info
for channel, info in channel_feed.items()
if info['expire'] > current_time
}
channel_feed_length -= len(channel_feed)
if channel_feed_length:
logging.info(
'Cleaned %s items from channel feeds',
channel_feed_length
)
# Space Check
size = psutil.disk_usage('./audio')
if size.free < 536870912:
for f in sorted(glob.glob('./audio/*mp3'), key=lambda a_file: os.path.getctime(a_file)):
os.remove(f)
logging.info('Deleted %s', f)
size = psutil.disk_usage('./audio')
if size.free > 16106127360:
return
@gen.coroutine
def convert_videos():
global conversion_queue
global converting_lock
try:
remaining = [
key
for key in conversion_queue.keys()
if not conversion_queue[key]['status']
]
video = sorted(
remaining,
key=lambda v: conversion_queue[v]['added']
)[0]
conversion_queue[video]['status'] = True
except Exception:
return
with (yield converting_lock.acquire()):
logging.info('Converting: %s', video)
audio_file = './audio/{}.mp3'.format(video)
ffmpeg_process = process.Subprocess([
'ffmpeg',
'-loglevel', 'panic',
'-y',
'-i', get_youtube_url(video),
'-f', 'mp3', audio_file + '.temp'
])
try:
yield ffmpeg_process.wait_for_exit()
os.rename(audio_file + '.temp', audio_file)
except Exception as ex:
logging.error('Error converting file: %s', ex.reason)
os.remove(audio_file + '.temp')
finally:
del conversion_queue[video]
def make_app():
return web.Application([
(r'/playlist/(.*)', PlaylistHandler),
(r'/channel/(.*)', ChannelHandler),
(r'/video/(.*)', VideoHandler),
(r'/audio/(.*)', AudioHandler),
(r'/', FileHandler),
(r'/(.*)', web.StaticFileHandler, {'path': '.'})
], compress_response=True)
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists('./audio'):
os.mkdir('audio')
parser = ArgumentParser(prog='PodTube')
parser.add_argument(
'key',
help='Google\'s API Key'
)
parser.add_argument(
'port',
type=int,
default=80,
nargs='?',
help='Port Number to listen on'
)
parser.add_argument(
'--log-file',
type=str,
default='podtube.log',
metavar='FILE',
help='Location and name of log file'
)
parser.add_argument(
'--log-format',
type=str,
default='%(asctime)-15s %(message)s',
metavar='FORMAT',
help='Logging format using syntax for python logging module'
)
parser.add_argument(
'-v', '--version',
action='version',
version="%(prog)s " + __version__
)
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format=args.log_format,
filename=args.log_file,
filemode='a'
)
key = args.key
for file in glob.glob('audio/*.temp'):
os.remove(file)
app = make_app()
app.listen(args.port)
logging.info(f'Started listening on {args.port}')
ioloop.PeriodicCallback(
callback=cleanup,
callback_time=1000
).start()
ioloop.PeriodicCallback(
callback=convert_videos,
callback_time=1000
).start()
ioloop.IOLoop.instance().start()
|
import pandas as pd
import numpy as np
import re
from nltk import word_tokenize
from nltk.corpus import wordnet
import pickle
def feature_extraction_approach_2(name):
consonants = ['b','c','d','f','g','h','j','k','l','m','n','p','q','r','s','t','v','w','x','y','z']
vowels = ['a','e','i','o','u']
bobua_consonants = ['b','l','m','n']
bobua_vowels = ['u','o']
kiki_consonants = ['k','p','t']
kiki_vowels = ['i','e']
number_of_consonants = 0
number_of_vowels = 0
number_of_bobua_consonants = 0
number_of_bobua_vowels = 0
number_of_kiki_consonants = 0
number_of_kiki_vowels = 0
last_character = 0
len_of_name = 0
featuress = []
name_array = list(name)
for name in name_array:
if name in consonants:
number_of_consonants = number_of_consonants + 1
if name in bobua_consonants:
number_of_bobua_consonants = number_of_bobua_consonants + 1
elif name in kiki_consonants:
number_of_kiki_consonants = number_of_kiki_consonants + 1
elif name in vowels:
number_of_vowels = number_of_vowels + 1
if name in bobua_vowels:
number_of_bobua_vowels = number_of_bobua_vowels + 1
elif name in kiki_vowels:
number_of_kiki_vowels = number_of_kiki_vowels + 1
if name[-1] in vowels:
last_character = 1
len_of_name = len(name_array)
features = [number_of_consonants,number_of_vowels,number_of_bobua_consonants,number_of_bobua_vowels,number_of_kiki_consonants,number_of_kiki_vowels,last_character,len_of_name]
return features
def model(data):
dataframe_to_parse = data
dataframe_to_parse['noc'] = 0
dataframe_to_parse['nov'] = 0
dataframe_to_parse['nobc'] = 0
dataframe_to_parse['nobv'] = 0
dataframe_to_parse['nokc'] = 0
dataframe_to_parse['nokv'] = 0
dataframe_to_parse['last'] = 0
dataframe_to_parse['len'] = 0
#data['one-gram'] = [one_gram[ii] for ii in range(len(one_gram))]
#data['bi-gram'] = [bi_gram[ii] for ii in range(len(bi_gram))]
#data['tri-gram'] = [tri_gram[ii] for ii in range(len(tri_gram))]
noc = []
nov = []
nobc = []
nobv = []
nokc = []
nokv = []
last = []
leng = []
#print "Starting feature Extractiont"
name_list = []
for name in data.Firstname:
name_list.append(name)
for ii in range(len(name_list)):
feature = feature_extraction_approach_2(name_list[ii])
noc.append(feature[0])
nov.append(feature[1])
nobc.append(feature[2])
nobv.append(feature[3])
nokc.append(feature[4])
nokv.append(feature[5])
last.append(feature[6])
leng.append(feature[7])
#print "In between feature Extraction"
data['noc'] = [noc[ii] for ii in range(len(noc))]
data['nov'] = [nov[ii] for ii in range(len(nov))]
data['nobc'] = [nobc[ii] for ii in range(len(nobc))]
data['nobv'] = [nobv[ii] for ii in range(len(nobv))]
data['nokc'] = [nokc[ii] for ii in range(len(nokc))]
data['nokv'] = [nokv[ii] for ii in range(len(nokv))]
data['last'] = [last[ii] for ii in range(len(last))]
data['len'] = [leng[ii] for ii in range(len(leng))]
dataframe_to_parse = dataframe_to_parse.drop(['OrderId','Firstname'],axis = 1)
#print "Running model on data"
dataframe_to_parse = dataframe_to_parse.values
loaded_model = pickle.load(open('dataModel.sav','rb'))
result = loaded_model.predict(dataframe_to_parse)
data['Gender'] = 0
data['Gender'] = [result[ii] for ii in range(len(result))]
data = data.drop(['noc','nov','nobc','nobv','nokc','nokv','last','len'],axis = 1)
return data
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import posixpath
import threading
from django.utils.translation import ugettext as _
from desktop.conf import DEFAULT_USER
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from hadoop import cluster
LOG = logging.getLogger(__name__)
_API_VERSION = 'v1'
_JSON_CONTENT_TYPE = 'application/json'
API_CACHE = None
API_CACHE_LOCK = threading.Lock()
def get_resource_manager(username=None):
global API_CACHE
if API_CACHE is None:
API_CACHE_LOCK.acquire()
try:
if API_CACHE is None:
yarn_cluster = cluster.get_cluster_conf_for_job_submission()
if yarn_cluster is None:
raise PopupException(_('No Resource Manager are available.'))
API_CACHE = ResourceManagerApi(yarn_cluster.RESOURCE_MANAGER_API_URL.get(), yarn_cluster.SECURITY_ENABLED.get(), yarn_cluster.SSL_CERT_CA_VERIFY.get())
finally:
API_CACHE_LOCK.release()
API_CACHE.setuser(username) # Set the correct user
return API_CACHE
class YarnFailoverOccurred(Exception):
pass
class ResourceManagerApi(object):
def __init__(self, rm_url, security_enabled=False, ssl_cert_ca_verify=False):
self._url = posixpath.join(rm_url, 'ws', _API_VERSION)
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
self._security_enabled = security_enabled
self._thread_local = threading.local() # To store user info
if self._security_enabled:
self._client.set_kerberos_auth()
self._client.set_verify(ssl_cert_ca_verify)
def _get_params(self):
params = {}
if self.username != DEFAULT_USER.get(): # We impersonate if needed
params['doAs'] = self.username
if not self.security_enabled:
params['user.name'] = DEFAULT_USER.get()
return params
def __str__(self):
return "ResourceManagerApi at %s" % (self._url,)
def setuser(self, user):
curr = self.user
self._thread_local.user = user
return curr
@property
def user(self):
return self.username # Backward compatibility
@property
def username(self):
try:
return self._thread_local.user
except AttributeError:
return DEFAULT_USER.get()
@property
def url(self):
return self._url
@property
def security_enabled(self):
return self._security_enabled
def cluster(self, **kwargs):
params = self._get_params()
return self._execute(self._root.get, 'cluster/info', params=params, headers={'Accept': _JSON_CONTENT_TYPE})
def apps(self, **kwargs):
params = self._get_params()
params.update(kwargs)
return self._execute(self._root.get, 'cluster/apps', params=params, headers={'Accept': _JSON_CONTENT_TYPE})
def app(self, app_id):
params = self._get_params()
return self._execute(self._root.get, 'cluster/apps/%(app_id)s' % {'app_id': app_id}, params=params, headers={'Accept': _JSON_CONTENT_TYPE})
def kill(self, app_id):
data = {'state': 'KILLED'}
token = None
# Tokens are managed within the kill method but should be moved out when not alpha anymore or we support submitting an app.
if self.security_enabled and False:
full_token = self.delegation_token()
if 'token' not in full_token:
raise PopupException(_('YARN did not return any token field.'), detail=smart_str(full_token))
data['X-Hadoop-Delegation-Token'] = token = full_token.pop('token')
LOG.debug('Received delegation token %s' % full_token)
try:
params = self._get_params()
return self._execute(self._root.put, 'cluster/apps/%(app_id)s/state' % {'app_id': app_id}, params=params, data=json.dumps(data), contenttype=_JSON_CONTENT_TYPE)
finally:
if token:
self.cancel_token(token)
def delegation_token(self):
params = self._get_params()
data = {'renewer': self.username}
return self._execute(self._root.post, 'cluster/delegation-token', params=params, data=json.dumps(data), contenttype=_JSON_CONTENT_TYPE)
def cancel_token(self, token):
params = self._get_params()
headers = {'Hadoop-YARN-RM-Delegation-Token': token}
LOG.debug('Canceling delegation token of ' % self.username)
return self._execute(self._root.delete, 'cluster/delegation-token', params=params, headers=headers)
def _execute(self, function, *args, **kwargs):
response = None
try:
response = function(*args, **kwargs)
except Exception, e:
raise PopupException(_('YARN RM returned a failed response: %s') % e)
return response
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# Copyright (C) 2006 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Matthew Good <[email protected]>
from __future__ import print_function
import errno
import fileinput
import sys
from getpass import getpass
from hashlib import md5
from optparse import OptionParser
def ask_pass():
pass1 = getpass('New password: ')
pass2 = getpass('Re-type new password: ')
if pass1 != pass2:
print("They don't match, sorry", file=sys.stderr)
sys.exit(1)
return pass1
def get_digest(userprefix, password=None):
if password is None:
password = ask_pass()
return make_digest(userprefix, password)
def make_digest(userprefix, password):
return userprefix + md5(userprefix + password).hexdigest()
usage = "%prog [-c] [-b] passwordfile realm username"
parser = OptionParser(usage=usage)
parser.add_option('-c', action='store_true', dest='create', default=False,
help="Create a new file")
parser.add_option('-b', action='store_true', dest='batch', default=False,
help="Batch mode, password on the commandline.")
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
opts, args = parser.parse_args()
try:
if opts.batch:
filename, realm, username, password = args
else:
filename, realm, username = args
password = None
except ValueError:
parser.error("Wrong number of arguments")
prefix = '%s:%s:' % (username, realm)
if opts.create:
try:
f = open(filename, 'w')
except EnvironmentError as e:
if e.errno == errno.EACCES:
print("Unable to update file", filename, file=sys.stderr)
sys.exit(1)
else:
raise
try:
print(get_digest(prefix, password), file=f)
finally:
f.close()
else:
try:
matched = False
for line in fileinput.input(filename, inplace=True):
if line.startswith(prefix):
if not matched:
print(get_digest(prefix, password))
matched = True
else:
print(line.rstrip())
if not matched:
with open(filename, 'a') as f:
print(get_digest(prefix, password), file=f)
except EnvironmentError as e:
if e.errno == errno.ENOENT:
print("Could not open passwd file %s for reading." % filename,
file=sys.stderr)
print("Use -c option to create a new one.", file=sys.stderr)
sys.exit(1)
elif e.errno == errno.EACCES:
print("Unable to update file", filename, file=sys.stderr)
sys.exit(1)
else:
raise
|
from __future__ import print_function
import numpy as np
from pylab import show, imshow, colorbar, plot
from landlab import RasterModelGrid
from landlab.components.flow_routing.flow_routing_D8 import RouteFlowD8
from landlab.components.flow_accum.flow_accumulation2 import AccumFlow
from landlab.plot.imshow import imshow_grid
from landlab.components.dem_support.dem_boundary_conditions import WatershedBoundaryConditions
from random import uniform
#reload(flow_routing_D8)
#reload(flow_accumulation)
#reload(raster)
def main():
nr = 5
nc = 6
nnodes = nr*nc
dx=3
#instantiate grid
rg = RasterModelGrid(nr, nc, dx)
#rg.set_inactive_boundaries(False, False, True, True)
nodata_val=-1
z = nodata_val*np.ones( nnodes )
#set-up interior elevations with random numbers
#for i in range(0, nnodes):
# if rg.is_interior(i):
# elevations[i]=random.random_sample()
#set-up with prescribed elevations to test drainage area calcualtion
helper = [7,8,9,10,13,14,15,16]
for i in xrange(0, len(helper)):
#print 'helper[i]', helper[i]
z[helper[i]]=2+uniform(-0.5,0.5)
helper = [19,20,21,22]
for i in xrange(0, len(helper)):
z[helper[i]]=3+uniform(-0.5,0.5)
z[7]=1
bc=WatershedBoundaryConditions()
bc.set_bc_find_outlet(rg, z, nodata_val)
#instantiate variable of type RouteFlowD8 Class
flow_router = RouteFlowD8(len(z))
#initial flow direction
flowdirs, max_slopes = flow_router.calc_flowdirs(rg,z)
#insantiate variable of type AccumFlow Class
accumulator = AccumFlow(rg)
#initial flow accumulation
drain_area = accumulator.calc_flowacc(rg, z, flowdirs)
print("elevations ", rg.node_vector_to_raster(z))
print("flowdirs ", rg.node_vector_to_raster(flowdirs))
print("drain_area ", rg.node_vector_to_raster(drain_area))
if __name__ == '__main__':
main()
|
'''Get video URLs.
I take in IDs such as ``how-better-housekeeper-12537238``
'''
import json
import re
import sys
import urllib
import urllib2
import string
HTML_PAGE = 'http://voices.yahoo.com/video/{0}.html'
YQL = 'SELECT * FROM yahoo.media.video.streams WHERE id="{video_id}" AND format="mp4,flv,f4m" AND protocol="http" AND rt="flash" AND plrs="Gi_RxaWhgXECOj6ukNwZbO" AND acctid="{user_id}" AND plidl="{context}" AND pspid="{pagespace}" AND offnetwork="false" AND site="" AND lang="en-US" AND region="US" AND override="none" AND plist="" AND hlspre="false" AND ssl="false" AND synd="";'
YQL_URL = 'http://video.query.yahoo.com/v1/public/yql?q={0}&env=prod&format=json'
def main():
video_id = sys.argv[1]
response = urllib2.urlopen(HTML_PAGE.format(video_id))
assert response.geturl() == HTML_PAGE.format(video_id)
data = response.read()
match = re.search(r"VideoPlayer\(({.+?})\);", data)
snippet = match.group(1)
snippet = snippet.replace('/* REQUIRED CONFIG ITEMS */', '')
snippet = snippet.replace('/* OPTIONAL CONFIG ITEMS */', '')
def rep(match):
name = match.group(1)
if name not in ('http', 'https') and name[0] in string.ascii_lowercase or name == 'YVAP':
return '"{0}":'.format(name)
else:
return '{0}:'.format(name)
snippet = re.sub('([a-zA-Z0-9]+) ?:', rep, snippet)
try:
doc = json.loads(snippet)
except ValueError as error:
if '.flv' in snippet:
match = re.search(r'http://.+?\.flv', snippet)
print(match.group(0))
return
else:
raise error
if 'streamUrl' in doc['playlist']['mediaItems'][0]:
print(doc['playlist']['mediaItems'][0]['streamUrl'])
return
video_id = doc['playlist']['mediaItems'][0]['id']
pagespace = doc['pageSpaceId']
context = doc['YVAP']['playContext']
user_id = doc['YVAP']['accountId']
yql = YQL.format(video_id=video_id, pagespace=pagespace, context=context, user_id=user_id)
json_url = YQL_URL.format(urllib.quote(yql))
print(json_url)
response = urllib2.urlopen(json_url)
doc = json.loads(response.read())
# print(json.dumps(doc, indent=2))
streams = doc['query']['results']['mediaObj'][0]['streams']
streams = list(sorted(streams, key=lambda x: x['bitrate']))
stream = streams[-1]
stream_url = stream['host'] + stream['path']
print(stream_url)
if __name__ == '__main__':
main()
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
PropertiesPage maps properties forms to Package options
"""
import logging
import json
from exe.webui.renderable import Renderable
from twisted.web.resource import Resource
from exe.engine.path import toUnicode, Path
from exe.engine.lom import lomsubs
from exe.webui import common
import re
log = logging.getLogger(__name__)
def get_nodeFromList(root, onum):
index = 0
for element in root:
if isinstance(element, dict) and element['__numberid__'] == onum:
return index
index += 1
return False
def get_nameNum(name):
c = True
i = len(name) - 1
while c and i >= 0:
c = name[i].isdigit()
if c and i > 0:
i -= 1
else:
c = False
if i == 0:
n = False
else:
n = name[:i + 1]
if i == 0:
num = name
elif i < len(name) - 1:
num = name[i + 1:]
else:
num = False
if n == '':
n = False
return n, num
def _pass_field(fields, field, val):
"""
@return:
True: not process field (continue)
False: process field
"""
ret = False
if val.strip() == '':
#Entity Control
if re.findall("_entity[0-9]*_[name,organization,email]+$", field):
field2 = field
cnt = False
for r in ['name', 'email', 'organization']:
a = re.sub('[name,organization,email]+$', r, field2)
if a in fields.keys() and fields[a][0].strip() != '':
cnt = True
if not cnt:
ret = True
else:
#Duration Control
if re.findall("_[duration,typicalLearningTime]+_[years,months,days,hours,minutes,seconds]+$", field):
field2 = field
cnt = False
for r in ['years', 'months', 'days', 'hours', 'minutes', 'seconds']:
a = re.sub('[years,months,days,hours,minutes,seconds]+$', r, field2)
if a in fields.keys() and fields[a][0].strip() != '':
cnt = True
if not cnt:
ret = True
else:
# Other fields with value ''
ret = True
return ret
def processForm2Lom(fields, label, source):
lomdict = {}
for field in sorted(fields.iterkeys()):
#print field
val = fields[field]
if not field.startswith('%s_' % label):
continue
else:
val = val[0].strip()
if _pass_field(fields, field, val):
continue
nodes = field.split('_')
nodes.remove(label)
rootvalue = lomdict
i = 0
rootparentparent = False
rootparent = False
for node in nodes:
index = False
parentindex = False
value = {}
if node[-1].isdigit():
name, num = get_nameNum(node)
node = name
if name not in rootvalue:
value = {'__numberid__': num}
index = 0
else:
index = get_nodeFromList(rootvalue[name], num)
if isinstance(index, bool):
value = {'__numberid__': num}
index = len(rootvalue)
else:
value = {}
if isinstance(rootvalue, list):
name, num = get_nameNum(nodes[i - 1])
parentindex = get_nodeFromList(rootvalue, num)
rootparentparent = rootparent
rootparent = rootvalue
if not isinstance(index, bool):
if node not in rootvalue[parentindex]:
rootvalue[parentindex][node] = []
rootvalue[parentindex][node].append(value)
else:
if '__numberid__' in value:
b = get_nodeFromList(rootvalue[parentindex][node], value['__numberid__'])
if isinstance(b, bool):
rootvalue[parentindex][node].append(value)
else:
if node not in rootvalue[parentindex]:
rootvalue[parentindex][node] = value
rootvalue = rootvalue[parentindex][node]
else:
if not isinstance(index, bool):
if node not in rootvalue:
rootvalue[node] = []
rootvalue[node].append(value)
else:
if '__numberid__' in value:
b = get_nodeFromList(rootvalue[node], value['__numberid__'])
if isinstance(b, bool):
rootvalue[node].append(value)
else:
if node not in rootvalue:
rootvalue[node] = value
rootparentparent = rootparent
rootparent = rootvalue
rootvalue = rootvalue[node]
i += 1
pnodes = [node]
if node == 'value':
pnodes.append('source')
for node in pnodes:
if node == 'source':
val = source
if isinstance(rootvalue, list):
if node.startswith('string'):
rootvalue = rootvalue[index]
rootvalue['valueOf_'] = val
else:
if isinstance(rootparent, list):
rootparent[parentindex][node].append(val)
for e in rootparent[parentindex][node]:
if isinstance(e, dict):
rootparent[parentindex][node].remove(e)
else:
rootparent[node].append(val)
for e in rootparent[node]:
if isinstance(e, dict):
rootparent[node].remove(e)
else:
if isinstance(rootparent, list):
if re.findall("_date$", field):
rootparent[parentindex][node]['dateTime'] = val
elif re.findall("_entity[0-9]*_[name,organization,email]+$", field):
rootparent[parentindex][node] = val
if 'name' in rootparent[parentindex] and 'organization' in rootparent[parentindex] and \
'email' in rootparent[parentindex]:
val2 = 'BEGIN:VCARD VERSION:3.0 FN:%s EMAIL;TYPE=INTERNET:%s ORG:%s END:VCARD' \
% (rootparent[parentindex]['name'], rootparent[parentindex]['email'],
rootparent[parentindex]['organization'])
rootparent.pop(parentindex)
rootparent.append(val2)
else:
rootparent[parentindex][node] = val
else:
if re.findall("_date$", field):
rootparent[node]['dateTime'] = val
elif re.findall("_entity[0-9]*_[name,organization,email]+$", field):
rootparent[node] = val
if 'name' in rootparent and 'organization' in rootparent and 'email' in rootparent:
val2 = 'BEGIN:VCARD VERSION:3.0 FN:%s EMAIL;TYPE=INTERNET:%s ORG:%s END:VCARD' \
% (rootparent['name'], rootparent['email'], rootparent['organization'])
name, num = get_nameNum(nodes[len(nodes) - 3])
parentindex = get_nodeFromList(rootparentparent, num)
rootparentparent[parentindex]['entity'] = val2
elif re.findall("_[duration,typicalLearningTime]+_[years,months,days,hours,minutes,seconds]+$", field):
rootparent[node] = val
if 'years' in rootparent and 'months' in rootparent and 'days' in rootparent\
and 'hours' in rootparent and 'minutes' in rootparent and 'seconds' in rootparent:
val2 = 'P%sY%sM%sDT%sH%sM%sS' % (rootparent['years'] or '0', rootparent['months'] or '0', rootparent['days'] or '0',\
rootparent['hours'] or '0', rootparent['minutes'] or '0', rootparent['seconds'] or '0')
rootparent['duration'] = val2
for key in ['years', 'months', 'days', 'hours', 'minutes', 'seconds']:
del rootparent[key]
else:
rootparent[node] = val
return lomdict
def processLom2Form2(form, lom):
data = {}
for field in form:
if field.startswith('lom'):
data[field] = lom.getval(field)
return data
# ===========================================================================
class PropertiesPage(Renderable, Resource):
"""
PropertiesPage maps properties forms to Package options
"""
name = 'properties'
booleanFieldNames = ('pp_scolinks', 'pp_backgroundImgTile', 'pp_scowsinglepage', 'pp_scowwebsite', 'pp_exportSource',
'pp_intendedEndUserRoleGroup', 'pp_intendedEndUserRoleTutor', 'pp_compatibleWithVersion9')
imgFieldNames = ('pp_backgroundImg', 'pp_coverImg')
def __init__(self, parent):
"""
Initialize
"""
Renderable.__init__(self, parent)
if parent:
self.parent.putChild(self.name, self)
Resource.__init__(self)
def fieldId2obj(self, fieldId):
"""
Takes a field id of the form xx_name and returns the object associated
with xx and name. These can be used with getattr and setattr
"""
if '_' in fieldId:
part, name = fieldId.split('_', 1)
# Get the object
if part == 'pp':
obj = self.package
if part == 'dc':
obj = self.package.dublinCore
if part == 'eo':
obj = self.package.exportOptions
if hasattr(obj, name):
return obj, name
else:
if fieldId in ['pp_scowsinglepage', 'pp_scowwebsite', 'pp_exportSource']:
setattr(obj, name, False)
return obj, name
raise ValueError("field id '%s' doesn't refer "
"to a valid object attribute" % fieldId)
def setLom(self, fields):
lom = processForm2Lom(fields, 'lom', 'LOMv1.0')
rootLom = lomsubs.lomSub.factory()
rootLom.addChilds(lom)
self.package.lom = rootLom
return True
def setLomes(self, fields):
lom = processForm2Lom(fields, 'lomes', 'LOM-ESv1.0')
rootLom = lomsubs.lomSub.factory()
rootLom.addChilds(lom)
self.package.lomEs = rootLom
return True
def render_GET(self, request=None):
log.debug("render_GET")
data = {}
try:
if 'lom_general_title_string1' in request.args.keys():
self.package.lom.genForm('lom', self.package.lom, data)
elif 'lomes_general_title_string1' in request.args.keys():
self.package.lom.genForm('lomes', self.package.lomEs, data)
else:
for key in request.args.keys():
if key != "_dc":
obj, name = self.fieldId2obj(key)
if key in self.imgFieldNames:
if getattr(obj, name):
data[key] = getattr(obj, name).basename()
else:
if name=='docType':
data[key]=self.package.getExportDocType()
else:
data[key] = getattr(obj, name)
except Exception as e:
log.exception(e)
return json.dumps({'success': False, 'errorMessage': _("Failed to get properties")})
return json.dumps({'success': True, 'data': data})
def render_POST(self, request=None):
log.debug("render_POST")
data = {}
try:
clear = False
no_autosave = False
if 'clear' in request.args:
clear = True
request.args.pop('clear')
if "no_autosave" in request.args:
no_autosave = True
request.args.pop("no_autosave")
if 'lom_general_title_string1' in request.args:
if clear:
self.package.setLomDefaults()
else:
self.setLom(request.args)
elif 'lomes_general_title_string1' in request.args:
if clear:
self.package.setLomEsDefaults()
else:
self.setLomes(request.args)
else:
items = request.args.items()
if 'pp_lang' in request.args:
value = request.args['pp_lang']
item = ('pp_lang', value)
items.remove(item)
items.insert(0, item)
for key, value in items:
obj, name = self.fieldId2obj(key)
if key in self.booleanFieldNames:
setattr(obj, name, value[0] == 'true')
else:
if key in self.imgFieldNames:
path = Path(value[0])
if path.isfile():
setattr(obj, name, toUnicode(value[0]))
data[key] = getattr(obj, name).basename()
else:
if getattr(obj, name):
if getattr(obj, name).basename() != path:
setattr(obj, name, None)
else:
#if name=='docType': common.setExportDocType(toUnicode(value[0]))
setattr(obj, name, toUnicode(value[0]))
except Exception as e:
log.exception(e)
return json.dumps({'success': False, 'errorMessage': _("Failed to save properties")})
if self.package.filename == u'' or no_autosave is True:
self.package.isChanged = True
else:
self.package.save()
return json.dumps({'success': True, 'data': data})
# ===========================================================================
|
# Copyright (c) 2013, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Helper functions to work with bitfields.
Documentation frequently writes bitfields as the inclusive range [msb:lsb];
this module provides functions to work with bitfields using msb and lsb rather
than manually computing shifts and masks from those."""
def bitfield_max(msb, lsb=None):
"""Return the largest value that fits in the bitfield [msb:lsb] (or [msb] if lsb is None)"""
if lsb is None:
lsb = msb
return (1 << (msb - lsb + 1)) - 1
def bitmask(msb, lsb=None):
"""Creates a mask with bits [msb:lsb] (or [msb] if lsb is None) set."""
if lsb is None:
lsb = msb
return bitfield_max(msb, lsb) << lsb
def bitfield(value, msb, lsb=None):
"""Shift value to fit in the bitfield [msb:lsb] (or [msb] if lsb is None).
Raise OverflowError if value does not fit in that bitfield."""
if lsb is None:
lsb = msb
if value > bitfield_max(msb, lsb):
if msb == lsb:
field = "[{0}]".format(msb)
else:
field = "[{0}:{1}]".format(msb, lsb)
raise OverflowError("Value {value:#x} too big for bitfield {field}".format(**locals()))
return value << lsb
def getbits(value, msb, lsb=None):
"""From the specified value, extract the bitfield [msb:lsb] (or [msb] if lsb is None)"""
if lsb is None:
lsb = msb
return (value >> lsb) & bitfield_max(msb, lsb)
|
'''
This module defines the list of TPAM API methods. The dictionary keys are
the method names which are dynamically added to the client.SSHClient() object,
and the values are lists where:
v[0] = type<str> [string|table] to indicate what the API method returns, and
v[1] = type<list> [type<str>, ...] to indicate a list of strings that the API
method returns upon successful completion (or an existing successful condition)
Note that v[1] is not necessary for API methods that return a table.
'''
from collections import OrderedDict
api_2_5_11 = OrderedDict()
api_2_5_11['AddAccount'] = ["string", ["saved successfully"]]
api_2_5_11['AddCollectionMember'] = ["string", ["saved successfully"]]
api_2_5_11['AddCollection'] = ["string", ["created successfully"]]
api_2_5_11['AddGroupMember'] = ["string", ["saved successfully"]]
api_2_5_11['AddGroup'] = ["string", ["created successfully"]]
api_2_5_11['AddPwdRequest'] = ["string", ["active/approved"]]
api_2_5_11['AddSystem'] = ["string", ["saved successfully"]]
api_2_5_11['AddSyncPass'] = ["string", ["created successfully"]]
api_2_5_11['AddSyncPwdSub'] = ["string", ["added successfully", "already subscribed"]]
api_2_5_11['AddUser'] = ["string", ["added successfully"]]
api_2_5_11['Approve'] = ["string", ["submitted successfully"]]
api_2_5_11['Cancel'] = ["string", ["submitted successfully"]]
api_2_5_11['ChangeUserPassword'] = ["string", ["successfully changed"]]
api_2_5_11['CheckPassword'] = ["string", ["processed the password check"]]
api_2_5_11['DeleteAccount'] = ["string", ["successfully deleted"]]
api_2_5_11['DeleteSyncPass'] = ["string", ["successfully deleted"]]
api_2_5_11['DeleteSystem'] = ["string", ["successfully deleted"]]
api_2_5_11['DeleteUser'] = ["string", ["successfully deleted"]]
api_2_5_11['DropCollectionMember'] = ["string", ["saved successfully"]]
api_2_5_11['DropCollection'] = ["string", ["successfully deleted"]]
api_2_5_11['DropGroupMember'] = ["string", ["saved successfully"]]
api_2_5_11['DropGroup'] = ["string", ["successfully deleted"]]
api_2_5_11['DropSyncPwdSub'] = ["string", ["removed successfully"]]
api_2_5_11['ForceReset'] = ["string", ["processed the password change"]]
api_2_5_11['GetPwdRequest'] = ["table", []]
api_2_5_11['ListAccounts'] = ["table", []]
api_2_5_11['ListAcctsForPwdRequest'] = ["table", []]
api_2_5_11['ListAssignedPolicies'] = ["table", []]
api_2_5_11['ListCollectionMembership'] = ["table", []]
api_2_5_11['ListCollections'] = ["table", []]
api_2_5_11['ListDependentSystems'] = ["table", []]
api_2_5_11['ListGroupMembership'] = ["table", []]
api_2_5_11['ListGroups'] = ["table", []]
api_2_5_11['ListRequestDetails'] = ["table", []] # NOT TESTED
api_2_5_11['ListReasonCodes'] = ["table", []] # NOT TESTED
api_2_5_11['ListRequest'] = ["table", []]
api_2_5_11['ListSystems'] = ["table", []]
api_2_5_11['ListSynchronizedPasswords'] = ["table", []]
api_2_5_11['ListSyncPwdSubscribers'] = ["table", []]
api_2_5_11['ListUsers'] = ["table", []]
api_2_5_11['ReportActivity'] = ["table", []]
api_2_5_11['Retrieve'] = ["string", []]
api_2_5_11['SetAccessPolicy'] = ["string", ["successfully!"]]
api_2_5_11['SSHKey'] = ["string", ["private key"]] # NOT TESTED
api_2_5_11['SyncPassForceReset'] = ["string", ["successfully scheduled"]]
api_2_5_11['TestSystem'] = ["string", ["was successful"]]
api_2_5_11['UnlockUser'] = ["string", ["unlocked successfully", "not currently locked"]]
api_2_5_11['UpdateAccount'] = ["string", ["saved successfully"]]
api_2_5_11['UpdateDependentSystems'] = ["string", ["saved successfully"]] # NOT TESTED
api_2_5_11['UpdateSyncPass'] = ["string", ["updated successfully"]]
api_2_5_11['UpdateSystem'] = ["string", ["saved successfully"]]
api_2_5_11['UpdateUser'] = ["string", ["updated successfully"]]
api_2_5_11['UserSSHKey'] = ["string", ["private key"]]
|
#!/usr/bin/env python
"""
Author: Joris van Steenbrugge
Function: COnverts a RNA sequence into a protein sequence
"""
from sys import argv
def getRNAseq(fileName):
rna = ""
with open(fileName) as infile:
for line in infile:
rna += line.strip()
return rna
def getCodons(rna):
n = 3
triplets = [rna[i:i+n] for i in range(0, len(rna), n)]
return triplets
def translate(triplets):
prot = ""
codons = {"UUU":"F", "UUC":"F", "UUA":"L", "UUG":"L",
"UCU":"S", "UCC":"s", "UCA":"S", "UCG":"S",
"UAU":"Y", "UAC":"Y", "UAA":"STOP", "UAG":"STOP",
"UGU":"C", "UGC":"C", "UGA":"STOP", "UGG":"W",
"CUU":"L", "CUC":"L", "CUA":"L", "CUG":"L",
"CCU":"P", "CCC":"P", "CCA":"P", "CCG":"P",
"CAU":"H", "CAC":"H", "CAA":"Q", "CAG":"Q",
"CGU":"R", "CGC":"R", "CGA":"R", "CGG":"R",
"AUU":"I", "AUC":"I", "AUA":"I", "AUG":"M",
"ACU":"T", "ACC":"T", "ACA":"T", "ACG":"T",
"AAU":"N", "AAC":"N", "AAA":"K", "AAG":"K",
"AGU":"S", "AGC":"S", "AGA":"R", "AGG":"R",
"GUU":"V", "GUC":"V", "GUA":"V", "GUG":"V",
"GCU":"A", "GCC":"A", "GCA":"A", "GCG":"A",
"GAU":"D", "GAC":"D", "GAA":"E", "GAG":"E",
"GGU":"G", "GGC":"G", "GGA":"G", "GGG":"G",}
for i in triplets:
if len(i) == 3:
aa = codons[i]
if aa == "STOP":
pass
else:
prot += aa.upper()
print(prot)
if __name__ == "__main__":
fileName = argv[1]
rna = getRNAseq(fileName)
triplets = getCodons(rna)
translate(triplets)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2016,小忆机器人
All rights reserved.
摘 要:脑筋急转弯语义
创 建 者:余菲
创建日期:16/12/17
"""
from dict.dict import pronoun, adverb, modals, stop_words, honorific, any_w
from nlu.rule import Rule
from utils.utils import attach_perperty, attach_name, o, e, range_tag
class Trick(object):
# 标识是trick领域
service = 'trick'
pronoun = pronoun.join_all
can_words = modals.join_all
adverb = adverb.join_all
ask = honorific.join_all
want = '(要|想|想要|需要)'
# 语义意图,仅支持一种,听
listen = '(听)(一)?(个)?'
# 语义意图
tell = '(讲|来)(一)?(个)?'
# 脑筋急转弯
trick = '(脑筋急转弯|急转弯|脑经急转弯)'
# robot
robot = '(你|机器人|小忆)'
# me
me = '(我|我们|咱|咱们|俺)'
# 再,又
again = '(再|又|多)'
# 个
an = '(个|一个)'
# 给
give = '(给|为)'
# 我要听个急转弯
case_1 = e(robot) + e(me) + e(want) + e(again) + listen + e(an) + trick + e(stop_words)
rule_1 = Rule(attach_perperty(case_1, {'scene': 'trick', 'operation': 'trick', 'rule': 1}))
# 给我讲个急转弯
case_2 = e(robot) + e(give) + e(me) + tell + e(an) + trick + e(stop_words)
rule_2 = Rule(attach_perperty(case_2, {'scene': 'trick', 'operation': 'trick', 'rule': 2}))
# 再来一个
case_3 = attach_perperty('(再来一个)', {'scene': 'trick', 'operation': 'trick', 'rule': 3})
rule_3 = Rule(case_3, {'status': 'trick'})
|
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the GSoC Callback."""
from soc.modules.gsoc.models import program as program_model
from soc.modules.gsoc.tasks import accept_proposals as accept_proposals_tasks
from soc.modules.gsoc.tasks import grading_survey_group as grading_survey_group_tasks
from soc.modules.gsoc.tasks import proposal_duplicates as proposal_duplicates_tasks
from soc.modules.gsoc.tasks import survey_reminders as survey_reminders_tasks
from soc.modules.gsoc.views import accept_proposals
from soc.modules.gsoc.views import accept_withdraw_projects
from soc.modules.gsoc.views import admin
from soc.modules.gsoc.views import dashboard
from soc.modules.gsoc.views import document
from soc.modules.gsoc.views import duplicates
from soc.modules.gsoc.views import grading_record_details
from soc.modules.gsoc.views import homepage
from soc.modules.gsoc.views import mentor_evaluation
from soc.modules.gsoc.views import org_app
from soc.modules.gsoc.views import participants
from soc.modules.gsoc.views import program
from soc.modules.gsoc.views import project_details
from soc.modules.gsoc.views import projects_list
from soc.modules.gsoc.views import proposal
from soc.modules.gsoc.views import proposal_review
from soc.modules.gsoc.views import search
from soc.modules.gsoc.views import slot_allocation
from soc.modules.gsoc.views import slot_transfer
from soc.modules.gsoc.views import slot_transfer_admin
from soc.modules.gsoc.views import student_evaluation
from soc.modules.gsoc.views import student_forms
class Callback(object):
"""Callback object that handles interaction between the core."""
# This constant is required by soc.modules.core module. If its values
# does not match the one defined there, the callback is rejected.
API_VERSION = 1
def __init__(self, core):
"""Initializes a new Callback object for the specified core."""
self.core = core
self.views = []
def registerViews(self):
"""Instantiates all view objects."""
self.views.append(accept_proposals.AcceptProposalsPage())
self.views.append(accept_withdraw_projects.AcceptProposals())
self.views.append(accept_withdraw_projects.WithdrawProjects())
self.views.append(admin.DashboardPage())
self.views.append(admin.LookupLinkIdPage())
self.views.append(admin.ManageProjectsListPage())
self.views.append(admin.ProjectsListPage())
self.views.append(admin.ProposalsPage())
self.views.append(admin.SurveyReminderPage())
self.views.append(dashboard.DashboardPage())
self.views.append(document.DocumentListPage())
self.views.append(document.DocumentPage())
self.views.append(document.EditDocumentPage())
self.views.append(document.EventsPage())
self.views.append(duplicates.DuplicatesPage())
self.views.append(grading_record_details.GradingGroupCreate())
self.views.append(grading_record_details.GradingRecordDetails())
self.views.append(grading_record_details.GradingRecordsOverview())
self.views.append(homepage.Homepage())
self.views.append(mentor_evaluation.GSoCMentorEvaluationEditPage())
self.views.append(mentor_evaluation.GSoCMentorEvaluationPreviewPage())
self.views.append(mentor_evaluation.GSoCMentorEvaluationRecordsList())
self.views.append(mentor_evaluation.GSoCMentorEvaluationShowPage())
self.views.append(mentor_evaluation.GSoCMentorEvaluationTakePage())
self.views.append(org_app.GSoCOrgAppEditPage())
self.views.append(org_app.GSoCOrgAppPreviewPage())
self.views.append(org_app.GSoCOrgAppRecordsList())
self.views.append(org_app.GSoCOrgAppShowPage())
self.views.append(participants.MentorsListAdminPage())
self.views.append(participants.StudentsListPage())
self.views.append(program.DownloadSchoolsHandler())
self.views.append(program.GSoCCreateProgramPage())
self.views.append(program.GSoCEditProgramPage())
self.views.append(program.GSoCProgramMessagesPage())
self.views.append(program.TimelinePage())
self.views.append(program.UploadSchoolsPage())
self.views.append(project_details.AssignMentors())
self.views.append(project_details.CodeSampleDeleteFilePost())
self.views.append(project_details.CodeSampleDownloadFileGet())
self.views.append(project_details.CodeSampleUploadFilePost())
self.views.append(project_details.FeaturedProject())
self.views.append(project_details.ProjectDetails())
self.views.append(project_details.ProjectDetailsUpdate())
self.views.append(projects_list.ListProjects())
self.views.append(proposal.ProposalPage())
self.views.append(proposal.UpdateProposal())
self.views.append(proposal_review.AcceptProposal())
self.views.append(proposal_review.AssignMentor())
self.views.append(proposal_review.IgnoreProposal())
self.views.append(proposal_review.PostComment())
self.views.append(proposal_review.PostScore())
self.views.append(proposal_review.ProposalModificationPostDeadline())
self.views.append(proposal_review.ProposalPubliclyVisible())
self.views.append(proposal_review.ProposalStatusSetter())
self.views.append(proposal_review.ReviewProposal())
self.views.append(proposal_review.WishToMentor())
self.views.append(search.SearchGsocPage())
self.views.append(slot_allocation.SlotsPage())
self.views.append(slot_transfer.SlotTransferPage())
self.views.append(slot_transfer.UpdateSlotTransferPage())
self.views.append(slot_transfer_admin.SlotsTransferAdminPage())
self.views.append(student_evaluation.GSoCStudentEvaluationEditPage())
self.views.append(student_evaluation.GSoCStudentEvaluationPreviewPage())
self.views.append(student_evaluation.GSoCStudentEvaluationRecordsList())
self.views.append(student_evaluation.GSoCStudentEvaluationShowPage())
self.views.append(student_evaluation.GSoCStudentEvaluationTakePage())
self.views.append(student_forms.DownloadForm())
self.views.append(student_forms.FormPage())
# Appengine Task related views
self.views.append(accept_proposals_tasks.ProposalAcceptanceTask())
self.views.append(grading_survey_group_tasks.GradingRecordTasks())
self.views.append(proposal_duplicates_tasks.ProposalDuplicatesTask())
self.views.append(survey_reminders_tasks.SurveyReminderTask())
def registerWithSitemap(self):
"""Called by the server when sitemap entries should be registered."""
self.core.requireUniqueService('registerWithSitemap')
# Redesigned view registration
for view in self.views:
self.core.registerSitemapEntry(view.djangoURLPatterns())
def registerWithProgramMap(self):
"""Called by the server when program_map entries should be registered."""
self.core.requireUniqueService('registerWithProgramMap')
program_entities = program_model.GSoCProgram.all().fetch(1000)
program_map = ('GSoC Programs', [
(str(e.key()), e.name) for e in program_entities])
self.core.registerProgramEntry(program_map)
|
import traceback
from horizon import workflows, forms, exceptions
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.integra.workflows import utils
class ExecuteWorkflowAction(workflows.Action):
scheduleId = forms.IntegerField(
label=_("WorkflowId)"),
required=True,
min_value=1,
max_value=9999999,
help_text=_("WorkflowId"))
class Meta:
name = _("Execute Details")
def __init__(self, request, context, *args, **kwargs):
self.request = request
self.context = context
super(ExecuteWorkflowAction, self).__init__(
request, context, *args, **kwargs)
class ExecuteWorkflowDetails(workflows.Step):
action_class = ExecuteWorkflowAction
contributes = ("workflowId", )
def contribute(self, data, context):
if data:
context['workflowId'] = data.get("workflowId", "")
return context
# =====
# Create the post
# =====
class ExecuteWorkflow(workflows.Workflow):
slug = "execute"
name = _("Execute")
finalize_button_name = _("Execute")
success_message = _('Execute workflow "%s".')
failure_message = _('Unable to execute workflow "%s".')
success_url = "horizon:integra:workflows:index"
failure_url = "horizon:integra:workflows:index"
default_steps = (ExecuteWorkflowDetails,)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown workflow')
def handle(self, request, context):
try:
for k, v in context.items():
print(k, v)
print("-----------------")
print("===================")
utils.executeWorkflow(self, request, context)
return True
except Exception:
print traceback.format_exc()
exceptions.handle(request, _("Unable to execute workflow"))
return False
|
#! usr/bin/python3
#программа для создания backups
import os, zipfile
def backupToZip(folder):
"""
создает резервную копию всего содержимого папки folder
"""
folder = os.path.abspath(folder)
number=1
while True:
zipFilename = os.path.basename(folder)+'_'+str(number)+'.zip'
if not os.path.exists(zipFilename):
break
number += 1
print('Создается файл %s...' % (zipFilename))
backupZip = zipfile.ZipFile(zipFilename, 'w')
for foldername, subfolders, filenames in os.walk(folder):
print('Добавления файла из папки %s...' % (foldername))
#Добавить в zip-файл текущую папку.
backupZip.write(foldername)
#Добавить в Zip-файл все файлы из данной папки
for filename in filenames:
newBase= os.path.basename(folder) + '_'
if filename.startswith(newBase) and filename.endswith('.zip'):
continue # не создавать резервной копии
backupZip.write(os.path.join(foldername, filename))
backupZip.close()
print('Все готово!!!')
if __name__ == '__main__':
n = input('Введите путь к папке для создания из нее Zip-архива \n>>>')
backupToZip(n)
|
from tapp.core.exception import Error
class BaseTemplateQuery(object):
@classmethod
def query(cls, query_name, context=None):
# init data
class_name = cls.__class__.__name__
context = context if context is not None else {}
# search query
query_func = getattr(cls, query_name, None)
if not query_func:
Error('UNKNOWN', ('query', query_name,)).raise_exception('TemplateQuery')
# build query
query_args = []
args = query_func.__code__.co_varnames[:query_func.func_code.co_argcount]
for arg in args:
query_args.append(context.get(arg, None))
# run query
result = None
try:
result = query_func(*query_args)
except (NameError, TypeError, ValueError):
Error(Error.format_exception()).raise_exception('TemplateQuery')
# check query
if result.strip() == "":
Error('INVALID', ('Query', "%s.%s" % (class_name, query_name,),)).raise_exception('TemplateQuery')
else:
return result
|
import logging
import threading
import os
from rest_framework.views import APIView
from rest_framework.response import Response
import json
import pika
import time
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from EOSS.data.problem_specific import assignation_problems, partition_problems
from EOSS.models import Design
from EOSS.vassar.api import VASSARClient
from EOSS.vassar.interface.ttypes import BinaryInputArchitecture, DiscreteInputArchitecture
from auth_API.helpers import get_or_create_user_information
from EOSS.explorer.helpers import send_archs_from_queue_to_main_dataset, send_archs_back, \
generate_background_search_message
from EOSS.data.design_helpers import add_design
# Get an instance of a logger
logger = logging.getLogger('EOSS.explorer')
class StartGA(APIView):
def post(self, request, format=None):
if request.user.is_authenticated:
try:
# Start connection with VASSAR
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
port = user_info.eosscontext.vassar_port
client = VASSARClient(port)
client.start_connection()
problem = request.data['problem']
# Restart archs queue before starting the GA again
Design.objects.filter(activecontext__exact=user_info.eosscontext.activecontext).delete()
user_info.eosscontext.last_arch_id = user_info.eosscontext.design_set.count()
user_info.eosscontext.save()
# Convert the architecture list and wait for threads to be available (ask for stop again just in case)
thrift_list = []
inputs_unique_set = set()
if problem in assignation_problems:
for arch in user_info.eosscontext.design_set.all():
thrift_list.append(
BinaryInputArchitecture(arch.id, json.loads(arch.inputs), json.loads(arch.outputs)))
hashed_input = hash(tuple(json.loads(arch.inputs)))
inputs_unique_set.add(hashed_input)
elif problem in partition_problems:
for arch in user_info.eosscontext.design_set.all():
thrift_list.append(
DiscreteInputArchitecture(arch.id, json.loads(arch.inputs), json.loads(arch.outputs)))
hashed_input = hash(tuple(json.loads(arch.inputs)))
inputs_unique_set.add(hashed_input)
else:
raise ValueError('Unrecognized problem type: {0}'.format(problem))
if user_info.eosscontext.ga_id is not None:
client.stop_ga(user_info.eosscontext.ga_id)
ga_id = client.start_ga(problem, request.user.username, thrift_list)
user_info.eosscontext.ga_id = ga_id
user_info.eosscontext.save()
# End the connection before return statement
client.end_connection()
# Start listening for redis inputs to share through websockets
connection = pika.BlockingConnection(pika.ConnectionParameters(host=os.environ['RABBITMQ_HOST']))
channel = connection.channel()
channel.queue_declare(queue=ga_id + '_gabrain')
def callback(ch, method, properties, body):
thread_user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
message = json.loads(body)
if message['type'] == 'new_arch':
print('Processing some new archs!')
nonlocal inputs_unique_set
# Archs are added one by one
new_archs = [message['data']]
send_back = []
# Add archs to the context data before sending back to user
for arch in new_archs:
hashed_input = hash(tuple(arch['inputs']))
if hashed_input not in inputs_unique_set:
full_arch = {
'inputs': arch['inputs'],
'outputs': arch['outputs']
}
if thread_user_info.eosscontext.activecontext.show_background_search_feedback:
full_arch = add_design(full_arch, request.session, request.user, False)
else:
full_arch = add_design(full_arch, request.session, request.user, True)
send_back.append(full_arch)
inputs_unique_set.add(hashed_input)
thread_user_info.save()
# Look for channel to send back to user
channel_layer = get_channel_layer()
background_queue_qs = Design.objects.filter(
activecontext_id__exact=thread_user_info.eosscontext.activecontext.id)
if background_queue_qs.count() == 10:
ws_message = generate_background_search_message(thread_user_info)
async_to_sync(channel_layer.send)(thread_user_info.channel_name,
{
'type': 'active.message',
'message': ws_message
})
if thread_user_info.eosscontext.activecontext.show_background_search_feedback:
back_list = send_archs_from_queue_to_main_dataset(thread_user_info)
send_back.extend(back_list)
send_archs_back(channel_layer, thread_user_info.channel_name, send_back)
if message['type'] == 'ga_started':
# Look for channel to send back to user
channel_layer = get_channel_layer()
async_to_sync(channel_layer.send)(thread_user_info.channel_name,
{
'type': 'ga.started'
})
if message['type'] == 'ga_done':
channel_layer = get_channel_layer()
async_to_sync(channel_layer.send)(thread_user_info.channel_name,
{
'type': 'ga.finished'
})
print('Ending the thread!')
channel.stop_consuming()
channel.basic_consume(queue=ga_id + '_gabrain',
on_message_callback=callback,
auto_ack=True)
thread = threading.Thread(target=channel.start_consuming)
thread.start()
return Response({
"status": 'GA started correctly!'
})
except Exception as exc:
logger.exception('Exception in starting the GA!')
client.end_connection()
return Response({
"error": "Error starting the GA",
"exception": str(exc)
})
else:
return Response({
"error": "This is only available to registered users!"
})
class StopGA(APIView):
def post(self, request, format=None):
if request.user.is_authenticated:
try:
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
# Start connection with VASSAR
port = user_info.eosscontext.vassar_port
client = VASSARClient(port)
client.start_connection()
# Call the GA stop function on Engineer
client.stop_ga(user_info.eosscontext.ga_id)
user_info.eosscontext.ga_id = None
user_info.eosscontext.save()
# End the connection before return statement
client.end_connection()
return Response({
"status": 'GA stopped correctly!'
})
except Exception as exc:
logger.exception('Exception in stopping the GA!')
client.end_connection()
return Response({
"error": "Error stopping the GA",
"exception": str(exc)
})
else:
return Response({
"error": "This is only available to registered users!"
})
class CheckGA(APIView):
def post(self, request, format=None):
if request.user.is_authenticated:
try:
# Start connection with VASSAR
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
port = user_info.eosscontext.vassar_port
client = VASSARClient(port)
client.start_connection()
status = client.is_ga_running(user_info.eosscontext.ga_id)
# End the connection before return statement
client.end_connection()
return Response({
'ga_status': status
})
except Exception as exc:
logger.exception('Exception while checking GA status!')
client.end_connection()
return Response({
"error": "Error checking the GA status",
"exception": str(exc)
})
else:
return Response({
"error": "This is only available to registered users!"
})
|
"""Awx user helper module."""
from tower_cli.exceptions import Found, NotFound
from ..base import AwxBase
# TODO: Add in additional parameters that are optional for all methods.
class AwxUser(AwxBase):
"""Awx user class."""
__resource_name__ = 'user'
def __init__(self):
"""Constructor."""
super(AwxUser, self).__init__()
@property
def users(self):
"""Return list of users."""
return self.resource.list()
def create(self, name, password, email, first_name, last_name,
superuser=False, system_auditor=False):
"""Create a user.
:param name: Username.
:type name: str
:param password: Password.
:type password: str
:param email: Email address.
:type email: str
:param first_name: First name.
:type first_name: str
:param last_name: Last name.
:type last_name: str
:param superuser: Superuser field.
:type superuser: bool
:param system_auditor: System auditor field.
:type system_auditor: bool
"""
self.logger.info('Creating user %s.' % name)
try:
self.resource.create(
username=name,
password=password,
email=email,
first_name=first_name,
last_name=last_name,
is_superuser=superuser,
is_system_auditor=system_auditor,
fail_on_found=True
)
except Found as ex:
self.logger.error('User %s already exists!' % name)
raise Exception(ex.message)
self.logger.info('User %s successfully created!' % name)
def delete(self, name):
"""Delete a user.
:param name: Username.
:type name: str
"""
self.logger.info('Deleting user %s.' % name)
self.resource.delete(username=name)
self.logger.info('User %s successfully deleted.' % name)
def get(self, name):
"""Get a user.
:param name: Username.
:type name: str
"""
try:
return self.resource.get(username=name)
except NotFound as ex:
raise Exception(ex.message)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2019, AGB & GC
# Full license can be found in License.md
# -----------------------------------------------------------------------------
""" Download and format DMSP SSJ boundary files
References
----------
.. [2] Angeline Burrell, Christer van der Meeren, & Karl M. Laundal. (2020).
aburrell/aacgmv2 (All Versions). Zenodo. doi:10.5281/zenodo.1212694.
.. [5] Kilcommons, L.M., et al. (2017), A new DMSP magnetometer and auroral
boundary data set and estimates of field-aligned currents in dynamic auroral
boundary coordinates, J. Geophys. Res.: Space Phys., 122, pp 9068-9079,
doi:10.1002/2016ja023342.
"""
import datetime as dt
import numpy as np
import os
import ocbpy
# AGB: The TypeError exception below is necessary due to a bug in
# ssj_auroral_boundary that was introduced by a change in matplotlib
# behaviour. Once this bug is fixed (issue #11), it will be removed
err = ''.join(['unable to load the DMSP SSJ module; ssj_auroral_boundary ',
'is available at: ',
'https://github.com/lkilcommons/ssj_auroral_boundary'])
try:
from spacepy import pycdf
import aacgmv2
import ssj_auroral_boundary as ssj
except (ImportError, TypeError) as ierr:
raise ImportError("{:s}\n{:}".format(err, ierr))
def fetch_ssj_files(stime, etime, out_dir=None, sat_nums=None):
""" Download DMSP SSJ files and place them in a specified directory
Parameters
----------
stime : dt.datetime
Start time
etime : dt.datetime
End time
out_dir : str or NoneType
Output directory or None to download to ocbpy boundary directory
(default=None)
sat_nums : list or NoneType
Satellite numbers or None for all satellites (default=None)
Returns
-------
out_files : list
List of filenames corresponding to downloaded files
Raises
------
ValueError
If an unknown satellite ID is provided.
Notes
-----
If a file already exists, the routine will add the file to the output list
without downloading it again.
"""
# Get and test the output directory
if out_dir is None:
out_dir = ocbpy.boundaries.files.get_boundary_directory()
if not os.path.isdir(out_dir):
raise ValueError("can't find the output directory")
# SSJ5 was carried on F16 onwards. F19 was short lived, F20 was not
# launched. Ref: https://space.skyrocket.de/doc_sdat/dmsp-5d3.htm
sat_launch = {16: dt.datetime(2003, 10, 18),
17: dt.datetime(2006, 11, 4),
18: dt.datetime(2009, 10, 18)}
sat_reentry = {16: dt.datetime(3000, 1, 1),
17: dt.datetime(3000, 1, 1),
18: dt.datetime(3000, 1, 1)}
# Ensure the input parameters are appropriate
if sat_nums is None:
sat_nums = list(sat_launch.keys())
if not np.all([snum in list(sat_launch.keys()) for snum in sat_nums]):
raise ValueError("".join(["unknown satellite ID in ",
"{:} use {:}".format(sat_nums,
sat_launch.keys())]))
# Initialize the output
out_files = list()
# Cycle through the satellite IDs, downloading each day's file
for snum in sat_nums:
ctime = stime if stime >= sat_launch[snum] else sat_launch[snum]
ltime = etime if etime <= sat_reentry[snum] else sat_reentry[snum]
while ctime < ltime:
# Construct the remote and local filenames
remote, fname = ssj.files.cdf_url_and_filename(snum, ctime.year,
ctime.month,
ctime.day)
local = os.path.join(out_dir, fname)
# Download the remote file if it doesn't exist locally
if os.path.isfile(local):
out_files.append(local)
else:
try:
ssj.files.download_cdf_from_noaa(remote, local)
out_files.append(local)
except RuntimeError as err:
ocbpy.logger.info(err)
# Cycle by one day
ctime += dt.timedelta(days=1)
# Return list of available files for these satellites and times
return out_files
def create_ssj_boundary_files(cdf_files, out_dir=None,
out_cols=['glat', 'glon'],
make_plots=False, plot_dir=None):
""" Create SSJ boundary files for a list of DMSP SSJ daily satellite files
Parameters
----------
cdf_files : array-like
List of daily satellite files
out_dir : str or NoneType
Output directory for the boundary files or None to use the ocbpy
boundary directory (default=None)
out_cols : list
List of output columns. Permitted are CDF file variable names or any
of mlat, mlt, glat, glon, diff_flux (default=['glat', 'glon'])
make_plots : bool
Make plots for the boundary passes (default=False)
plot_dir : str or NoneType
If plots are made, this is their output directory. If None, will be
set to the same value as out_dir. (default=None)
Returns
-------
out_files : list
List of output .csv boundary files
Raises
------
ValueError
If incorrect input is provided
"""
# Test the directory inputs
if out_dir is None:
out_dir = ocbpy.boundaries.files.get_boundary_directory()
if not os.path.isdir(out_dir):
raise ValueError("unknown output directory: {:}".format(out_dir))
if plot_dir is None:
plot_dir = out_dir
if make_plots and not os.path.isdir(plot_dir):
raise ValueError("unknown plot directory: {:}".format(plot_dir))
# Error catch for input being a filename
cdf_files = np.asarray(cdf_files)
if len(cdf_files.shape) == 0:
cdf_files = np.asarray([cdf_files])
# Geographic lat and lon are currently provided through the CDF column name
if 'glat' in out_cols:
out_cols[out_cols.index('glat')] = 'SC_GEOCENTRIC_LAT'
if 'glon' in out_cols:
out_cols[out_cols.index('glon')] = 'SC_GEOCENTRIC_LON'
# Cycle through all the CDF files, creating the desired CSV files
out_files = list()
for cdffn in np.asarray(cdf_files):
if os.path.isfile(cdffn):
try:
with np.errstate(invalid='ignore', divide='ignore',
over='ignore', under='ignore'):
absd = ssj.absatday.absatday(cdffn, csvdir=out_dir,
imgdir=plot_dir,
make_plot=make_plots,
csvvars=out_cols)
out_files.append(absd.csv.csvfn)
except pycdf.CDFError as err:
ocbpy.logger.warning("{:}".format(err))
except Warning as war:
ocbpy.logger.warning("{:}".format(war))
else:
ocbpy.logger.warning("bad input file {:}".format(cdffn))
return out_files
def format_ssj_boundary_files(csv_files, ref_alt=830.0,
method='GEOCENTRIC|ALLOWTRACE'):
""" Create SSJ boundary files for a list of DMSP SSJ daily satellite files
Parameters
----------
csv_files : list
List of SSJ CSV boundary files with directory structure
ref_alt : float
Reference altitude for boundary locations in km (default=830.0)
method : str
AACGMV2 method, may use 'TRACE', 'ALLOWTRACE', 'BADIDEA', 'GEOCENTRIC'
[2]_ (default='GEOCENTRIC|ALLOWTRACE')
Returns
-------
bound_files : list
List of successfully updated .csv boundary files
Notes
-----
Output format is 'sc date time r x y fom x_1 y_1 x_2 y_2'
where:
===== ==================================================================
sc Spacecraft number
date YYYY-MM-DD
time HH:MM:SS of midpoint between the two measurements for this pass
r Half the distance between the two pass boundaries
x Distance between the midpoint of the two pass boundaries
and the AACGMV2 pole in degrees along the dusk-dawn meridian
y distance between the midpoint of the two pass boundaries and the
AACGMV2 pole in degrees along the midnight-noon meridian
fom FOM for the boundaries found along this pass
x_1 x coordinate of the first boundary
y_1 y coordinate of the first boundary
x_2 x coordinate of the second boundary
y_2 y coordinate of the second boundary
===== ==================================================================
Because two points are not enough to define the OCB or EAB across all local
times, a circle that intersects the two boundary pass points is defined and
the boundary location saved. The DMSP SSJ boundary correction function
will use this information to only return values within a small distance of
the boundary locations [5]_.
Separate files are created for each boundary and hemisphere, dates and
spacecraft are combined.
See Also
--------
aacgmv2
"""
# Error catch for input being a filename
csv_files = np.asarray(csv_files)
if len(csv_files.shape) == 0:
csv_files = np.asarray([csv_files])
# Remove any bad files
good_files = list()
for i, infile in enumerate(csv_files):
if not os.path.isfile(infile):
ocbpy.logger.warning("bad input file: {:}".format(infile))
else:
good_files.append(i)
csv_files = csv_files[good_files]
if len(csv_files) == 0:
raise ValueError("empty list of input CSV files")
# Set the hemisphere suffix and boundary prefix
hemi_suffix = {1: "north", -1: "south"}
bound_prefix = {'PO': '.ocb', 'EQ': '.eab'}
# Initialize the file lists
bad_files = list()
# Initialize the output header
out_head = "#sc date time r x y fom x_1 y_1 x_2 y_2\n"
# Specify the output file information
outfile_prefix = os.path.commonprefix(list(csv_files)).split('-f')[0]
filename_sec = os.path.split(
csv_files[0])[-1].split('dmsp-f')[-1].split('_')
sdate = filename_sec[3]
filename_sec = os.path.split(
csv_files[-1])[-1].split('dmsp-f')[-1].split('_')
edate = filename_sec[3]
bound_files = {hh: {bb: "".join([outfile_prefix, "-", filename_sec[1], "_",
hemi_suffix[hh], "_", sdate, "_", edate,
"_", filename_sec[4], bound_prefix[bb]])
for bb in bound_prefix.keys()}
for hh in hemi_suffix.keys()}
fpout = {hh: {bb: None for bb in bound_prefix.keys()}
for hh in hemi_suffix.keys()}
with open(bound_files[1]['PO'], 'w') as fpout[1]['PO'], \
open(bound_files[-1]['PO'], 'w') as fpout[-1]['PO'], \
open(bound_files[1]['EQ'], 'w') as fpout[1]['EQ'], \
open(bound_files[-1]['EQ'], 'w') as fpout[-1]['EQ']:
# Output the header in each file
for hh in hemi_suffix.keys():
for bb in bound_prefix.keys():
fpout[hh][bb].write(out_head)
# Cycle through all the SSJ CSV files, outputing appropriate data into
# the desired boundary and hemisphere file
for infile in csv_files:
# Get spacecraft number and date from filename
filename_sec = os.path.split(infile)[-1].split(
'dmsp-f')[-1].split('_')
sc = int(filename_sec[0])
file_date = dt.datetime.strptime(filename_sec[3], '%Y%m%d')
# Get the header line for the data and determine the number of
# comment lines preceeding the header
skiprows = 1
with open(infile, 'r') as fpin:
head_line = fpin.readline()
while head_line.find("#") == 0:
skiprows += 1
head_line = fpin.readline()
header_list = head_line.split("\n")[0].split(",")
# Load the file data
data = np.loadtxt(infile, skiprows=skiprows, delimiter=',')
if data.shape[1] != len(header_list):
bad_files.append(infile)
else:
# Establish the desired data indices
time_ind = {bb: [header_list.index('UTSec {:s}{:d}'.format(
bb, i)) for i in [1, 2]] for bb in bound_prefix.keys()}
lat_ind = {bb:
[header_list.index(
'SC_GEOCENTRIC_LAT {:s}{:d}'.format(bb, i))
for i in [1, 2]] for bb in bound_prefix.keys()}
lon_ind = {bb:
[header_list.index(
'SC_GEOCENTRIC_LON {:s}{:d}'.format(bb, i))
for i in [1, 2]] for bb in bound_prefix.keys()}
# Calculate the midpoint seconds of day
mid_utsec = {bb: 0.5 * (data[:, time_ind[bb][1]]
+ data[:, time_ind[bb][0]])
for bb in time_ind.keys()}
# Select the hemisphere and FOM
hemi = data[:, header_list.index('hemisphere')]
fom = data[:, header_list.index('FOM')]
# Cycle through each line of data, calculating the
# necessary information
for iline, data_line in enumerate(data):
hh = hemi[iline]
# Get the boundary locations using the midpoint time
# (sufficiently accurate at current sec. var.) for each
# boundary
for bb in bound_prefix.keys():
mid_time = file_date + dt.timedelta(
seconds=mid_utsec[bb][iline])
mloc = aacgmv2.get_aacgm_coord_arr(
data_line[lat_ind[bb]], data_line[lon_ind[bb]],
ref_alt, mid_time, method=method)
# Determine the circle radius in degrees
rad = 0.5 * abs(mloc[0][0] - mloc[0][1])
# Get the X-Y coordinates of each pass where X is
# positive towards dawn and Y is positive towards noon
theta = np.radians(mloc[2] * 15.0 - 90.0)
x = (90.0 - abs(mloc[0])) * np.cos(theta)
y = (90.0 - abs(mloc[0])) * np.sin(theta)
# The midpoint is the center of this circle
mid_x = 0.5 * sum(x)
mid_y = 0.5 * sum(y)
# Prepare the output line, which has the format:
# sc bound hemi date time r x y fom x_1 y_1
# x_2 y_2
out_line = " ".join(
["{:d}".format(sc),
mid_time.strftime('%Y-%m-%d %H:%M:%S'),
" ".join(["{:.3f}".format(val)
for val in [rad, mid_x, mid_y,
fom[iline], x[0], y[0],
x[1], y[1]]]), "\n"])
# Write the output line to the file
fpout[hh][bb].write(out_line)
# If some input files were not processed, inform the user
if len(bad_files) > 0:
ocbpy.logger.warning("unable to format {:d} input files: {:}".format(
len(bad_files), bad_files))
# Recast the output file dictionary as a flat list
bound_files = np.array([[fname for fname in ff.values()]
for ff in bound_files.values()])
return list(bound_files.flatten())
def fetch_format_ssj_boundary_files(stime, etime, out_dir=None, rm_temp=True,
ref_alt=830.0,
method='GEOCENTRIC|ALLOWTRACE'):
""" Download DMSP SSJ data and create boundary files for each hemisphere
Parameters
----------
stime : dt.datetime
Start time
etime : dt.datetime
End time
out_dir : str or NoneType
Output directory or None to download to ocbpy boundary directory
(default=None)
rm_temp : bool
Remove all files that are not the final boundary files (default=True)
ref_alt : float
Reference altitude for boundary locations in km (default=830.0)
method : str
AACGMV2 method, may use 'TRACE', 'ALLOWTRACE', 'BADIDEA', 'GEOCENTRIC'
[2]_ (default='GEOCENTRIC|ALLOWTRACE')
Returns
-------
bound_files : list
List of the boundary file names
See Also
--------
aacgmv2
"""
# Fetch the DMSP SSJ files for all available DMSP satellites
dmsp_files = fetch_ssj_files(stime, etime, out_dir=out_dir)
if len(dmsp_files) == 0:
raise ValueError("".join(["unable to download any DMSP SSJ files ",
"between {:} and {:}".format(stime, etime)]))
# Create CSV files with geographic coordinates for the boundary locations
csv_files = create_ssj_boundary_files(dmsp_files, out_dir=out_dir)
# Test to see if there are any DMSP processed files
if len(csv_files) == 0:
raise ValueError("".join(["unable to process the downloaded SSJ files",
" {:}".format(dmsp_files)]))
# Remove the DMSP files, as their data has been processed
if rm_temp:
for tmp_file in dmsp_files:
os.remove(tmp_file)
# Create the boundary files
bound_files = format_ssj_boundary_files(csv_files, ref_alt=ref_alt,
method=method)
# Remove the CSV files, as their data has been processed
if rm_temp:
for tmp_file in csv_files:
os.remove(tmp_file)
return bound_files
|
# Copyright (C) 2015 Stefan C. Mueller
import unittest
import pytest
import ifaddr
import ifaddr.netifaces
try:
import netifaces
except ImportError:
skip_netifaces = True
else:
skip_netifaces = False
class TestIfaddr(unittest.TestCase):
"""
Unittests for :mod:`ifaddr`.
There isn't much unit-testing that can be done without making assumptions
on the system or mocking of operating system APIs. So this just contains
a sanity check for the moment.
"""
def test_get_adapters_contains_localhost(self) -> None:
found = False
adapters = ifaddr.get_adapters()
for adapter in adapters:
for ip in adapter.ips:
if ip.ip == "127.0.0.1":
found = True
self.assertTrue(found, "No adapter has IP 127.0.0.1: %s" % str(adapters))
@pytest.mark.skipif(skip_netifaces, reason='netifaces not installed')
def test_netifaces_compatibility() -> None:
interfaces = ifaddr.netifaces.interfaces()
assert interfaces == netifaces.interfaces()
# TODO: implement those as well
# for interface in interfaces:
# print(interface)
# assert ifaddr.netifaces.ifaddresses(interface) == netifaces.ifaddresses(interface)
# assert ifaddr.netifaces.gateways() == netifaces.gateways()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-20 11:42
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0112_offeryear_offer_type'),
]
operations = [
migrations.CreateModel(
name='Entity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('organization', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='base.Organization')),
],
),
migrations.CreateModel(
name='EntityAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=20, null=True)),
('location', models.CharField(max_length=255, null=True)),
('postal_code', models.CharField(max_length=20, null=True)),
('city', models.CharField(max_length=255, null=True)),
('country', models.CharField(max_length=255, null=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Entity')),
],
),
migrations.CreateModel(
name='EntityLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(db_index=True)),
('end_date', models.DateField(db_index=True)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child', to='base.Entity')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent', to='base.Entity')),
],
),
migrations.CreateModel(
name='EntityVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('acronym', models.CharField(max_length=20)),
('entity_type', models.CharField(choices=[('SECTOR', 'SECTOR'), ('FACULTY', 'FACULTY'), ('SCHOOL', 'SCHOOL'), ('INSTITUTE', 'INSTITUTE'), ('POLE', 'POLE'), ('DOCTORAL_COMMISSION', 'DOCTORAL_COMMISSION'), ('PLATFORM', 'PLATFORM'), ('LOGISTICS_ENTITY', 'LOGISTICS_ENTITY'), ('UNDEFINED', 'UNDEFINED')], db_index=True, max_length=50)),
('start_date', models.DateField(db_index=True)),
('end_date', models.DateField(db_index=True)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Entity')),
],
),
]
|
"""passlib.crypto._blowfish - pure-python eks-blowfish implementation for bcrypt
This is a pure-python implementation of the EKS-Blowfish algorithm described by
Provos and Mazieres in `A Future-Adaptable Password Scheme
<http://www.openbsd.org/papers/bcrypt-paper.ps>`_.
This package contains two submodules:
* ``_blowfish/base.py`` contains a class implementing the eks-blowfish algorithm
using easy-to-examine code.
* ``_blowfish/unrolled.py`` contains a subclass which replaces some methods
of the original class with sped-up versions, mainly using unrolled loops
and local variables. this is the class which is actually used by
Passlib to perform BCrypt in pure python.
This module is auto-generated by a script, ``_blowfish/_gen_files.py``.
Status
------
This implementation is usable, but is an order of magnitude too slow to be
usable with real security. For "ok" security, BCrypt hashes should have at
least 2**11 rounds (as of 2011). Assuming a desired response time <= 100ms,
this means a BCrypt implementation should get at least 20 rounds/ms in order
to be both usable *and* secure. On a 2 ghz cpu, this implementation gets
roughly 0.09 rounds/ms under CPython (220x too slow), and 1.9 rounds/ms
under PyPy (10x too slow).
History
-------
While subsequently modified considerly for Passlib, this code was originally
based on `jBcrypt 0.2 <http://www.mindrot.org/projects/jBCrypt/>`_, which was
released under the BSD license::
Copyright (c) 2006 Damien Miller <[email protected]>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
#=============================================================================
# imports
#=============================================================================
# core
from itertools import chain
import struct
# pkg
from passlib.utils import getrandbytes, rng
from passlib.utils.binary import bcrypt64
from passlib.utils.compat import BytesIO, unicode, u, native_string_types
from passlib.crypto._blowfish.unrolled import BlowfishEngine
# local
__all__ = [
'BlowfishEngine',
'raw_bcrypt',
]
#=============================================================================
# bcrypt constants
#=============================================================================
# bcrypt constant data "OrpheanBeholderScryDoubt" as 6 integers
BCRYPT_CDATA = [
0x4f727068, 0x65616e42, 0x65686f6c,
0x64657253, 0x63727944, 0x6f756274
]
# struct used to encode ciphertext as digest (last output byte discarded)
digest_struct = struct.Struct(">6I")
#=============================================================================
# base bcrypt helper
#
# interface designed only for use by passlib.handlers.bcrypt:BCrypt
# probably not suitable for other purposes
#=============================================================================
BNULL = b'\x00'
def raw_bcrypt(password, ident, salt, log_rounds):
"""perform central password hashing step in bcrypt scheme.
:param password: the password to hash
:param ident: identifier w/ minor version (e.g. 2, 2a)
:param salt: the binary salt to use (encoded in bcrypt-base64)
:param log_rounds: the log2 of the number of rounds (as int)
:returns: bcrypt-base64 encoded checksum
"""
#===================================================================
# parse inputs
#===================================================================
# parse ident
assert isinstance(ident, native_string_types)
add_null_padding = True
if ident == u('2a') or ident == u('2y') or ident == u('2b'):
pass
elif ident == u('2'):
add_null_padding = False
elif ident == u('2x'):
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
else:
raise ValueError("unknown ident: %r" % (ident,))
# decode & validate salt
assert isinstance(salt, bytes)
salt = bcrypt64.decode_bytes(salt)
if len(salt) < 16:
raise ValueError("Missing salt bytes")
elif len(salt) > 16:
salt = salt[:16]
# prepare password
assert isinstance(password, bytes)
if add_null_padding:
password += BNULL
# validate rounds
if log_rounds < 4 or log_rounds > 31:
raise ValueError("Bad number of rounds")
#===================================================================
#
# run EKS-Blowfish algorithm
#
# This uses the "enhanced key schedule" step described by
# Provos and Mazieres in "A Future-Adaptable Password Scheme"
# http://www.openbsd.org/papers/bcrypt-paper.ps
#
#===================================================================
engine = BlowfishEngine()
# convert password & salt into list of 18 32-bit integers (72 bytes total).
pass_words = engine.key_to_words(password)
salt_words = engine.key_to_words(salt)
# truncate salt_words to original 16 byte salt, or loop won't wrap
# correctly when passed to .eks_salted_expand()
salt_words16 = salt_words[:4]
# do EKS key schedule setup
engine.eks_salted_expand(pass_words, salt_words16)
# apply password & salt keys to key schedule a bunch more times.
rounds = 1<<log_rounds
engine.eks_repeated_expand(pass_words, salt_words, rounds)
# encipher constant data, and encode to bytes as digest.
data = list(BCRYPT_CDATA)
i = 0
while i < 6:
data[i], data[i+1] = engine.repeat_encipher(data[i], data[i+1], 64)
i += 2
raw = digest_struct.pack(*data)[:-1]
return bcrypt64.encode_bytes(raw)
#=============================================================================
# eof
#=============================================================================
|
"""SCons.Tool.dvips
Tool-specific initialization for dvips.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvips.py 4720 2010/03/24 03:14:11 jars"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
from opengl_gui.widget import Widget
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import numpy
class WidgetFrame(Widget):
def __init__(self, visualisation, variables, textures, params):
super().__init__(visualisation, variables, textures, params)
self.width = float(self.params["width"])
self.height = float(self.params["height"])
self.label = str(self.params["label"])
self.filled = bool(self.params["filled"])
self.frame_width = float(self.params["frame_width"])
def render(self):
if self.visible:
self.visualisation.push()
self.visualisation.translate(self.x, self.y, self.z)
self.render_frame()
for i in range(len(self.child_widgets)):
self.child_widgets[i].render()
self.visualisation.pop()
def render_frame(self):
if self.filled:
self.visualisation.push()
self.visualisation.set_color(self.r*0.01, self.g*0.01, self.b*0.01)
self.visualisation.paint_rectangle(self.width, self.height)
self.visualisation.pop()
self.visualisation.push()
self.visualisation.set_color(self.r, self.g, self.b)
self.visualisation.translate(0, -self.height/2.0 - self.frame_width/2.0, 0.0)
self.visualisation.paint_rectangle(self.width, self.frame_width)
self.visualisation.pop()
self.visualisation.push()
self.visualisation.set_color(self.r, self.g, self.b)
self.visualisation.translate(0, self.height/2.0 - self.frame_width/2.0, 0.0)
self.visualisation.paint_rectangle(self.width, self.frame_width)
self.visualisation.pop()
self.visualisation.push()
self.visualisation.set_color(self.r, self.g, self.b)
self.visualisation.translate(-self.width/2.0 + self.frame_width/2.0, 0.0, 0.0)
self.visualisation.paint_rectangle(self.frame_width, self.height)
self.visualisation.pop()
self.visualisation.push()
self.visualisation.set_color(self.r, self.g, self.b)
self.visualisation.translate(self.width/2.0 - self.frame_width/2.0, 0.0, 0.0)
self.visualisation.paint_rectangle(self.frame_width, self.height)
self.visualisation.pop()
self.visualisation.push()
frame_width = 0.2
height_ = frame_width/2.0
k1 = 1.0
k2 = 0.5
self.visualisation.translate(0, self.height/2.0 + frame_width/4.0, 0.0)
glBegin(GL_QUADS)
glColor3f(self.r*k1, self.g*k1, self.b*k1)
glVertex3f( + self.width/2.0, + height_/2.0, 0.0)
glColor3f(self.r*k2, self.g*k2, self.b*k2)
glVertex3f( - self.width/2.0, + height_/2.0, 0.0)
glColor3f(self.r*k2, self.g*k2, self.b*k2)
glVertex3f( - self.width/2.0, - height_/2.0, 0.0)
glColor3f(self.r*k1, self.g*k1, self.b*k1)
glVertex3f( + self.width/2.0, - height_/2.0, 0.0)
glEnd()
self.visualisation.pop()
self.visualisation.push()
self.visualisation.set_color(1, 1, 1)
self.visualisation.print_string(0.02 + -self.width/2, self.height/2 + 0.025, 0, self.label, self.font_size)
self.visualisation.pop()
|
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import requests
import ms_module as ms
#
#
session = requests.Session()
#
#
pep_info = pd.read_csv("../peptides.xls",sep='\t')
# interesting column names ...
# cols = ['Protein accession numbers','Assigned','Other Proteins']
spec_info = pd.read_csv("../specs.xls",sep='\t')
#
#
# BRIEF PEPTIDE SUMMARY ...
peptides_num = pep_info.shape[0]
peptides_unambiguously_assigned = pep_info['Assigned'].sum()
# getting the number of extractable Uid-s ...
check_uid = lambda line: bool( line.split('|')[1] if len(line.split('|'))>1 else None )
extractable_uids = pep_info['Protein accession numbers'].apply(check_uid).sum()
# number of peptides matched amboguously ...
check_ambig_uid = lambda row: bool(True if (len(row[0].split('|'))>1)and(len(str(row[1]).split('|'))>1) else False)
ambig_uids_extracted = pep_info[['Protein accession numbers','Other Proteins']].apply(check_ambig_uid,axis='columns').sum()
#
print
print "--- PEPTIDE REPORT ---"
print
print "Total peptides detected %d"%peptides_num
print "There are %d Unknown (not-mapped) peptides out of these %d"%((peptides_num - extractable_uids),peptides_num)
print "Accordingly, %d peptides have at least 1 associated protein in the databsae."%extractable_uids
print "Among them, %d are claimed unassigned (ambiguously assigned), while %d of them have extractable ids"%((peptides_num - peptides_unambiguously_assigned),ambig_uids_extracted)
print "However, there are many peptides assigned to more than 1 proteins (i.e., multiple Uniprot ids)"
print "Yet, these ids are usually reffering to the same exact protein of its isoform."
print
print "--- PEPTIDE REPORT COMPLETE ---"
print
#
##########################################################
print
print "comparing spectrum data with the peptide data ..."
unique_ids_pept = pep_info['Protein accession numbers'].unique().shape[0]
unique_ids_spec = spec_info['Protein accession numbers'].unique().shape[0]
###############################################################
combine_uids = lambda row: ','.join(map(str,row))# if row[1] is not None else row[0]
###############################################################
unique_ids_pept_ambig = pep_info[['Protein accession numbers','Other Proteins']].apply(combine_uids,axis='columns').unique().shape[0]
unique_ids_spec_ambig = spec_info[['Protein accession numbers','Other Proteins']].apply(combine_uids,axis='columns').unique().shape[0]
###############################################################
print "Number of unique uniprot ids in SPECTRUM: %d and in PEPTIDE: %d"%(unique_ids_spec,unique_ids_pept)
print "Same, but combining original accessions with 'Other Proteins'"
print "Number of unique uniprot ids in SPECTRUM: %d and in PEPTIDE: %d"%(unique_ids_spec_ambig,unique_ids_pept_ambig)
print "Those numbers included Unknowns!"
##########################################################
#
##########################################################
def combine_uids(row):
first = '' if pd.isnull(row[0]) else ('' if len(row[0].split('|'))<=1 else row[0])
second = row[1] if pd.notnull(row[1]) else ''
return ','.join([first,second])
# # let's generate exhaustive list of all Uniprot Ids that are present in the pe_info ...
uid_list = []
for uids in pep_info[['Protein accession numbers','Other Proteins']].apply(combine_uids,axis='columns').unique():
for uid in uids.strip(',').split(','):
uid_list.append(uid)
# once uids are extracted ...
extract_uid = lambda _: _.split('|')[1]
unique_uid_list = [extract_uid(_) for _ in np.unique(uid_list)]
# to be continued ...
# to be continued ...
# to be continued ...
# to be continued ...
# to be continued ...
# to be continued ...
# to be continued ...
# to be continued ...
# extracting UID from protein accession numbers ...
# this way we return None for the Unknown entries ...
extract_uid = lambda line: line.split('|')[1] if len(line.split('|'))>1 else None
# get a single unique Uniprot ID ...
pep_info['uid'] = pep_info['Protein accession numbers'].apply(extract_uid)
# fetch protein sequence for each of the Uid-s ...
fetching = False
if fetching:
print "fetching from uniprot.org ..."
pep_info['fasta'] = pep_info['uid'].apply(lambda _: ms.get_uniprot(session,_))
print "fetching complete"
# Align peptide sequence to the extracted protein sequence and find the peptide starting position ...
pep_info['my_start'] = pep_info[ ['Peptide sequence','fasta'] ].apply(lambda _:ms.stupid_aligner(*_),axis='columns')
# c = ['Protein name',
# 'Protein accession numbers',
# 'Database sources',
# 'Exclusive unique peptide count',
# 'Peptide sequence',
# 'Previous amino acid',
# 'Next amino acid',
# 'Peptide start index',
# 'Peptide stop index',
# 'Star Category',
# 'Assigned',
# 'Other Proteins',
# 'uid']
##########################################
# c2 = ['Protein name',
# 'Peptide sequence',
# 'Other Proteins']
##########################################
# ['Protein name','Other Proteins']
##########################################
# sss = ">sp|P15586-2|GNS_HUMAN Isoform 2 of N-acetylglucosamine-6-sulfatase OS=Homo sapiens GN=GNS\n\
# MRLLPLAPGRLRRGSPRHLPSCSPALLLLVLGGCLGVFGVAAGTRRPNVVLLLTDDQDEV\
# LGGMYVPSALCCPSRASILTGKYPHNHHVVNNTLEGNCSSKSWQKIQEPNTFPAILRSMC\
# GYQTFFAGKYLNEYGAPDAGGLEHVPLGWSYWYALEKNSKYYNYTLSINGKARKHGENYS\
# VDYLTDVLANVSLDFLDYKSNFEPFFMMIATPAPHSPWTAAPQYQKAFQNVFAPRNKNFN\
# IHGTNKHWLIRQAKTPMTNSSIQFLDNAFRKRWQTLLSVDDLVEKLVKRLEFTGELNNTY\
# IFYTSDNGYHTGQFSLPIDKRQLYEFDIKVPLLVRGPGIKPNQTSKMLVANIDLGPTILD\
# IAGYDLNKTQMDGMSLLPILRGASNLTWRSDVLVEYQGEGRNVTDPTCPSLSPGVSQCFP\
# DCVCEDAYNNTYACVRTMSALWNLQYCEFDDQEVFVEVYNLTADPDQITNIAKTIDPELL\
# GKMNYRLMMLQSCSGPTCRTPGVFDPGYRFDPRLMFSNRGSVRTRRFSKHLL"
|
"""
Plot a single combined plot for kinematic analysis on planar sliding, wedge
sliding and flexural toppling
Reference:
Wyllie, D.C. and Mah, C.W. (2004) Rock Slope Engineering. 4th Edition,
E & FN Spon, London, 431. (P.39)
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet.kinematic_analysis as kinematic
from mplstereonet import stereonet_math
# Import data
discontinuity = np.loadtxt('kinematic_data1.txt', delimiter=',')
intersections = np.loadtxt('kinematic_data2.txt', delimiter=',')
jstrikes = discontinuity[:,1] - 90
jdips = discontinuity[:,0]
ibearings = intersections[:,1]
iplunges = intersections[:,0]
# set up analyses
strike, dip = 90, 75
P4 = kinematic.PlanarSliding(strike, dip)
T4 = kinematic.FlexuralToppling(strike, dip)
# Plot the kinematic analysis elements
P4.plot_kinematic(main_kws={'label':'Planar / Wedge Sliding'},
secondary_kws={'label':'Wedge Sliding'})
T4.plot_kinematic(ax=plt.gca(), slopeface=False, construction_lines=False,
main_kws={'color':'blue', 'label':'Flexural Toppling'})
# Plot data (here intersections should be pltted as poles too)
ax=plt.gca()
ax.pole(jstrikes, jdips, ms=2, label='Discontiuities (Poles)')
ax.pole(ibearings-90, iplunges, '+r', label='Intersections (Poles)')
ax.legend(loc='lower left', bbox_to_anchor=(0.75, 0.9))
plt.show()
|
#!/usr/bin/python
# Creado by n0ipr0cs
import getpass
import hashlib
import sys
print ""
print " / )( \ / _\ / ___)/ )( \/ ___)( __) / _\ ( \/ )( __)"
print " ) __ (/ \\___ \) __ (\___ \ ) _) / \/ \/ \ ) _)"
print " \_)(_/\_/\_/(____/\_)(_/(____/(____)\_/\_/\_)(_/(____)"
print "***********************by n0ipr0cs***************************"
#creacion de las funciones de los hash
def MD5_HASH():
passwd = getpass.getpass()
HASH_MD5 = hashlib.md5(passwd).hexdigest()
print HASH_MD5
return
def SHA1_HASH():
passwd = getpass.getpass()
HASH_MD5 = hashlib.sha1(passwd).hexdigest()
print HASH_MD5
return
def SHA256_HASH():
passwd = getpass.getpass()
HASH_MD5 = hashlib.sha256(passwd).hexdigest()
print HASH_MD5
return
def SHA512_HASH():
passwd = getpass.getpass()
HASH_MD5 = hashlib.sha512(passwd).hexdigest()
print HASH_MD5
return
print " 1 .md5"
print " 2 .sha1"
print " 3 .sha256"
print " 4 .sha512"
VAR_1 = raw_input("Elige un algoritmo para tu password")
if VAR_1 == "1":
MD5_HASH()
elif VAR_1 == "2":
SHA1_HASH()
elif VAR_1 == "3":
SHA256_HASH()
elif VAR_1 == "4":
SHA512_HASH()
else:
print "ERROR!!! Elige una opcion valida"
sys.exit(1)
sys.exit(0)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module for running workflows."""
from __future__ import absolute_import, print_function
import pkg_resources
from werkzeug.utils import cached_property
from .utils import obj_or_import_string
class _WorkflowState(object):
"""State of registered workflows."""
def __init__(self, app, entry_point_group=None, cache=None):
"""Initialize state."""
self.app = app
self.workflows = {}
if entry_point_group:
self.load_entry_point_group(entry_point_group)
@cached_property
def workflow_object_class(self):
return obj_or_import_string(
self.app.config.get('WORKFLOWS_OBJECT_CLASS')
)
def register_workflow(self, name, workflow):
"""Register an workflow to be showed in the workflows list."""
assert name not in self.workflows
self.workflows[name] = workflow
def load_entry_point_group(self, entry_point_group):
"""Load workflows from an entry point group."""
for ep in pkg_resources.iter_entry_points(group=entry_point_group):
self.register_workflow(ep.name, ep.load())
class InvenioWorkflows(object):
"""invenio-workflows extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization."""
if app:
self._state = self.init_app(app, **kwargs)
def init_app(self, app,
entry_point_group='invenio_workflows.workflows',
**kwargs):
"""Flask application initialization."""
app.config.setdefault(
"WORKFLOWS_OBJECT_CLASS",
"invenio_workflows.api.WorkflowObject"
)
state = _WorkflowState(
app, entry_point_group=entry_point_group, **kwargs
)
app.extensions['invenio-workflows'] = state
return state
def __getattr__(self, name):
"""Proxy to state object."""
return getattr(self._state, name, None)
|
import sys
from twisted.internet import defer, reactor, error
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.conch.endpoints import SSHCommandClientEndpoint, _CommandChannel
from twisted.internet.protocol import Factory, Protocol
from blinker import signal
from plait.task import Task
from plait.spool import SpoolingSignalProtocol, SpoolingProtocol
from plait.errors import TimeoutError, TaskError
from plait.utils import parse_host_string, QuietConsoleUI, timeout, AttributeString
# default channel does send ext bytes to protocol (stderr)
class WorkerChannel(_CommandChannel):
def extReceived(self, dataType, data):
if hasattr(self._protocol, 'extReceived'):
self._protocol.extReceived(dataType, data)
# endpoint that utilizes channel above
class WorkerEndpoint(SSHCommandClientEndpoint):
commandConnected = defer.Deferred()
def _executeCommand(self, connection, protocolFactory):
commandConnected = defer.Deferred()
def disconnectOnFailure(passthrough):
immediate = passthrough.check(defer.CancelledError)
self._creator.cleanupConnection(connection, immediate)
return passthrough
commandConnected.addErrback(disconnectOnFailure)
channel = WorkerChannel(
self._creator, self._command, protocolFactory, commandConnected)
connection.openChannel(channel)
return commandConnected
class PlaitWorker(Factory):
"""
Executes a sequence of tasks against a remote host.
When run, an initial SSH connection is established to the remote host.
For efficiency's sake, all subsequent remote operations reuse the
same connection and execute over a new channel.
Each task is executed in a daemon thread which will be killed when the
main thread exits. When the task runs a remote operation it blocks on
a call on the worker inside the main reactor thread where the network
operations are negotiated. The result is then returned to the thread
and it resumes execution.
There are a number of signals emitted for workers:
- timeout : seconds
- fail : failure
- connect : user, host, port
- task_start : task
- task_end : result
- stdout : line
- stderr : line
- complete :
"""
def __init__(self, tasks, keys, agent, known_hosts, timeout, all_tasks=False):
self.proto = None
self.host_string = None
self.user = None
self.host = None
self.port = None
self.tasks = tasks
self.keys = keys
self.agent = None
self.known_hosts = None
self.timeout = timeout
self.all_tasks = all_tasks
self.lines = 0
self.tasks_by_uid = dict()
def __str__(self):
return self.host_string
def buildProtocol(self, addr):
# construct protocol and wire up io signals
self.protocol = SpoolingSignalProtocol('stdout', 'stderr', sender=self.host_string)
return self.protocol
def makeConnectEndpoint(self):
"""
Endpoint for initial SSH host connection.
"""
return WorkerEndpoint.newConnection(
reactor, b"cat",
self.user, self.host, self.port,
keys=self.keys, agentEndpoint=None,
knownHosts=None, ui=QuietConsoleUI())
def makeCommandEndpoint(self, command):
"""
Endpoint for remotely executing operations.
"""
return WorkerEndpoint.existingConnection(
self.protocol.transport.conn, command.encode('utf8'))
@defer.inlineCallbacks
def connect(self, host_string):
"""
Establish initial SSH connection to remote host.
"""
self.parse_host_string(host_string)
endpoint = self.makeConnectEndpoint()
yield timeout(self.timeout, endpoint.connect(self))
signal('worker_connect').send(self)
def parse_host_string(self, host_string):
self.host_string = host_string
self.user, self.host, self.port = parse_host_string(host_string)
@property
def label(self):
return "{}@{}".format(self.user, self.host)
def stdout(self, thread_name, data=None):
task = self.tasks_by_uid[thread_name]
task.has_output = True
signal('worker_stdout').send(self, data=data)
def stderr(self, thread_name, data=None):
task = self.tasks_by_uid[thread_name]
task.has_output = True
signal('worker_stderr').send(self, data=data)
def runTask(self, task):
# listen to the output of this task
signal('stdout').connect(self.stdout, sender=task.uid)
signal('stderr').connect(self.stderr, sender=task.uid)
# signal that the task has begun
signal('task_start').send(self, task=task)
return task.run()
@defer.inlineCallbacks
def run(self):
"""
Execute each task in a Task thread.
"""
# execute each task in sequence
for name, func, args, kwargs in self.tasks:
task = Task(self, name, func, args, kwargs)
self.tasks_by_uid[task.uid] = task
result = yield self.runTask(task)
# tasks will return an Exception is there was a failure
if isinstance(result, BaseException):
# wrap it so the runner recognizes this as an expected exception
# and doesn't emit generic worker exception signals
e = TaskError("Task `{}` failed.".format(name))
e.task = task
e.failure = result
raise e
# otherwise it may optionally return a completion value
elif self.all_tasks and not (result or task.has_output):
e = TaskError("Task returned empty result.")
e.task = task
e.failure = e
raise e
else:
signal('task_finish').send(self, task=task, result=result)
@defer.inlineCallbacks
def execFromThread(self, command):
"""
API for tasks to execute ssh commands.
"""
ep = self.makeCommandEndpoint(command)
yield ep.connect(self)
failed = False
try:
yield self.protocol.finished
except error.ProcessTerminated as e:
failed = True
# flush output from proto accumulated during execution
stdout, stderr = self.protocol.flush()
result = AttributeString(stdout)
result.stderr = stderr
result.failed = failed
result.succeeded = not failed
result.command = command
defer.returnValue(result)
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <[email protected]>
# Copyright (c) 2015-2021 Alberto Gacías <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import gettext as _
from rest_framework import status, permissions
from rest_framework.decorators import action, permission_classes
from rest_framework.response import Response
from ...client.models import StatusLog
from .events import event_by_month, month_interval, EventViewSet
@permission_classes((permissions.IsAuthenticated,))
class StatusLogStatsViewSet(EventViewSet):
@action(methods=['get'], detail=False, url_path='status')
def by_status(self, request, format=None):
data = StatusLog.by_status(request.user.userprofile)
return Response(
{
'title': _('Status Logs / Status'),
'total': data['total'],
'inner': data['inner'],
'outer': data['outer']
},
status=status.HTTP_200_OK
)
@action(methods=['get'], detail=False, url_path='month')
def status_by_month(self, request, format=None):
begin_date, end_date = month_interval()
data = event_by_month(
StatusLog.stacked_by_month(
request.user.userprofile, begin_date, field='status'
),
begin_date,
end_date,
'statuslog',
field='status'
)
return Response(
data,
status=status.HTTP_200_OK
)
|
#!/usr/bin/python
# (c) 2012, Elliott Foster <[email protected]>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
user:
description:
- The name of the user to add or remove
required: true
default: null
password:
description:
- The password to use for the user
required: false
default: null
roles:
version_added: "1.3"
description:
- "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
- This param requires mongodb 2.4+ and pymongo 2.5+
required: false
default: "readWrite"
state:
state:
description:
- The database user state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: Elliott Foster
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user: database=burgers name=bob password=12345 state=present
# Delete 'burgers' database user with name 'bob'.
- mongodb_user: database=burgers name=bob state=absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present
- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present
'''
import ConfigParser
from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
# =========================================
# MongoDB module specific support methods.
#
def user_add(module, client, db_name, user, password, roles):
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
try:
db.add_user(user, password, None, roles=roles)
except OperationFailure, e:
err_msg = str(e)
if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)'
module.fail_json(msg=err_msg)
def user_remove(client, db_name, user):
db = client[db_name]
db.remove_user(user)
def load_mongocnf():
config = ConfigParser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (ConfigParser.NoOptionError, IOError):
return False
return creds
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
user=dict(required=True, aliases=['name']),
password=dict(aliases=['pass']),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['user']
password = module.params['password']
roles = module.params['roles']
state = module.params['state']
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set)
else:
client = MongoClient(login_host, int(login_port))
# try to authenticate as a target user to check if it already exists
try:
client[db_name].authenticate(user, password)
if state == 'present':
module.exit_json(changed=False, user=user)
except OperationFailure:
if state == 'absent':
module.exit_json(changed=False, user=user)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None and login_user is not None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password)
except ConnectionFailure, e:
module.fail_json(msg='unable to connect to database: %s' % str(e))
if state == 'present':
if password is None:
module.fail_json(msg='password parameter required when adding a user')
try:
user_add(module, client, db_name, user, password, roles)
except OperationFailure, e:
module.fail_json(msg='Unable to add or update user: %s' % str(e))
elif state == 'absent':
try:
user_remove(client, db_name, user)
except OperationFailure, e:
module.fail_json(msg='Unable to remove user: %s' % str(e))
module.exit_json(changed=True, user=user)
# import module snippets
from ansible.module_utils.basic import *
main()
|
# Copyright (C) 2007-2012 Andrea Francia Trivolzio(PV) Italy
from distutils.core import setup
import sys
def main():
sys.path.append('.')
from trashcli import trash
scripts.add_script('trash' , 'trashcli.put' , 'main')
scripts.add_script('trash-put' , 'trashcli.put' , 'main')
scripts.add_script('trash-list' , 'trashcli.cmds', 'list')
scripts.add_script('trash-restore', 'trashcli.cmds', 'restore')
scripts.add_script('trash-empty' , 'trashcli.cmds', 'empty')
scripts.add_script('trash-rm' , 'trashcli.rm' , 'main')
setup(
name = 'trash-cli' , version = trash.version ,
author = 'Andrea Francia' , author_email = '[email protected]' ,
url = 'https://github.com/andreafrancia/trash-cli',
description = 'Command line interface to FreeDesktop.org Trash.',
long_description = file("README.rst").read(),
license = 'GPL v2',
packages = ['trashcli'],
scripts = scripts.created_scripts,
data_files = [('share/man/man1', ['man/man1/trash-empty.1',
'man/man1/trash-list.1',
'man/man1/trash-restore.1',
'man/man1/trash-put.1',
'man/man1/trash-rm.1'])],
)
from textwrap import dedent
class Scripts:
def __init__(self, write_file, make_file_executable):
self.write_file = write_file
self.make_file_executable = make_file_executable
self.created_scripts = []
def add_script(self, name, module, main_function):
script_contents = dedent("""\
#!/usr/bin/env python
from __future__ import absolute_import
import sys
from %(module)s import %(main_function)s as main
sys.exit(main())
""") % locals()
self.write_file(name, script_contents)
self.make_file_executable(name)
self.created_scripts.append(name)
import os,stat
def make_file_executable(path):
os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR)
def write_file(name, contents):
file(name, 'w').write(contents)
scripts = Scripts(write_file, make_file_executable)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import httplib2
import base64
from xmlrpc import client as xmlrpclib
class SpecialTransport(xmlrpclib.Transport):
def send_content(self, connection, request_body):
connection.putheader("Impersonate-User", "mister.mould")
super().send_content(connection, request_body)
ticket = {
'summary': '2. new Ticket via API simple',
'description': 'nice desc',
'priority': None,
'assigned': None,
}
rpc_srv = xmlrpclib.ServerProxy('http://admin:[email protected]/rpc/', allow_none=True, use_datetime=True, transport=SpecialTransport())
info = rpc_srv.ticket.createSimple(ticket, True)
print('ticket created #%s' % info[0])
h = httplib2.Http()
headers = {
'User-Agent': 'miadi',
'Authorization': 'Basic YWRtaW46YWRtaW4=',
'Content-Type': 'text/plain',
'Impersonate-User': 'mister.mould',
}
(resp, content) = h.request("http://192.168.33.11/tickets/upload/%s/?filename=test.txt" % info[0],
"PUT", body="This is text",
headers=headers)
print(resp)
|
import OpenPNM
import scipy as sp
class ThermalConductivityTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[3, 3, 3])
self.phase = OpenPNM.Phases.GenericPhase(network=self.net)
self.phase['pore.temperature'] = 298.0 # K
self.phase['pore.salinity'] = 0.0 # g/kg
self.phase['pore.viscosity'] = 0.001 # Pa.s
self.phase['pore.critical_temperature'] = 647.15 # K
self.phase['pore.molecular_weight'] = 0.018 # kg/mol
self.phase['pore.boiling_point'] = 373.15 # K
self.phase['pore.heat_capacity'] = 75.28 # J/mol K
self.phase['pore.acentric_factor'] = 11.5 # J/mol K
def test_water(self):
f = OpenPNM.Phases.models.thermal_conductivity.water
self.phase.models.add(propname='pore.thermal_conductivity',
model=f)
assert sp.allclose(self.phase['pore.thermal_conductivity'], 0.61047611)
def test_chung(self):
f = OpenPNM.Phases.models.thermal_conductivity.chung
self.phase.models.add(propname='pore.thermal_conductivity',
model=f)
assert sp.allclose(self.phase['pore.thermal_conductivity'], 0.62063913)
def test_sato(self):
f = OpenPNM.Phases.models.thermal_conductivity.sato
self.phase.models.add(propname='pore.thermal_conductivity',
model=f)
assert sp.allclose(self.phase['pore.thermal_conductivity'], 0.29787023)
|
import numpy as np
from .AudioGraph import Node
class Op:
"""
Encapsulate a generic operation as a node of the graph.
Each time it is called generate a new node (OpNode) and returns its w_out
Example:
op = Op(np.add)
w_out3 = op(w_out1, w_out2)
"""
def __init__(self, fn):
"""
Parameters
----------
fn : callable fn(*ndarrays) -> ndarray
combines a certain number of ndarrays producing a ndarray for output
"""
self.fn = fn
def __call__(self, *w_out_tuple):
"""
Parameters
----------
w_out_tuple : variable number of arguments of type OutWire
these will be used as input to the new Node
Returns
-------
out : OutWire
the output of the newly created Node
"""
n_w_out = len(w_out_tuple)
assert (n_w_out > 0)
max_len = 0
w = w_out_tuple[0].parent.world
for w_out in w_out_tuple:
assert (w_out.__class__.__name__ == "OutWire")
assert (w == w_out.parent.world)
max_len = max(max_len, w_out.data().shape[1])
op_node = OpNode(w, self.fn, n_in=n_w_out, out_len=max_len)
return op_node(*w_out_tuple)
class OpNode(Node):
"""
Generic operation node. Private class, used by Op.
Creates a Node with the given function and n_in inputs, output buffer is out_len long
"""
def __init__(self, world, fn, n_in, out_len):
super().__init__(world)
self.fn = fn
self.n_in = n_in
self.w_in = [InWire(self) for _ in range(n_in)]
self.w_out = OutWire(self, out_len)
def calc_func(self):
in_arrays = [w_in_.get_data() for w_in_ in self.w_in]
self.w_out.set_data(self.fn(*in_arrays))
def __call__(self, *out_wires_tuple):
nout_wires = len(out_wires_tuple)
assert (nout_wires == self.n_in)
for ow, iw in zip(out_wires_tuple, self.w_in):
ow.plug_into(iw)
return self.w_out
class OutWire:
"""
To be safe, do not modify the object returned by InWire.get_data but only a copy of it.
"""
def __init__(self, parent, buf_len=1):
"""
Parameters
----------
parent : Node
the parent Node
buf_len : int
the length of the array
usually this is world.buf_len for audio buffer, 1 (default) for control buffer
"""
self.parent = parent
self._data = np.zeros((1, buf_len), dtype=np.float32)
self._in_wires = []
self.parent.out_wires.append(self)
def in_wires(self): return self._in_wires
def set_data(self, in_data):
"""
Set the internal buffer
Parameters
----------
in_data : ndarray (audio buffer) or float (control buffer)
data that will be copied to the internal buffer
"""
self._data[:, :] = in_data
def data(self):
return self._data
def plug_into(self, in_wire):
"""
Connect to a given InWire of another Node
Parameters
----------
in_wire : InWire
the InWire where this OutWire is going to be connected
"""
if self.parent == in_wire.parent:
raise ValueError("trying to connect a node to itself")
self._in_wires.append(in_wire)
in_wire.set_out_wire(self)
def __add__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.add)
return op(self, other)
else:
op = Op(lambda x: x + other)
return op(self)
__radd__ = __add__
def __mul__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.multiply)
return op(self, other)
else:
op = Op(lambda x: x * other)
return op(self)
__rmul__ = __mul__
def __sub__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.subtract)
return op(self, other)
else:
op = Op(lambda x: x - other)
return op(self)
def __rsub__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.subtract)
return op(other, self)
else:
op = Op(lambda x: other - x)
return op(self)
def __truediv__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.divide)
return op(self, other)
else:
op = Op(lambda x: x / other)
return op(self)
def __rtruediv__(self, other):
if other.__class__.__name__ == "OutWire":
op = Op(np.divide)
return op(other, self)
else:
op = Op(lambda x: other / x)
return op(self)
def clip(self, a_min, a_max):
op = Op(lambda x: np.clip(x, a_min, a_max))
return op(self)
def range_to_unit(self, a_min, a_max, invert=False):
a = 1 if invert else 0
b = -1 if invert else 1
op = Op(lambda x: a + b * (1 / (a_max - a_min)) * (np.clip(x, a_min, a_max) - a_min))
return op(self)
class InWire:
"""
When overriding calc_func in a new node, do not modify the object returned by InWire.get_data but only a copy of it.
"""
def __init__(self, parent, default_data=None):
self.parent = parent
self._default_data = default_data
self._out_wire = None # not connected
self.parent.in_wires.append(self)
def get_data(self):
if self._out_wire is None: # not connected
return self._default_data
out_data = self._out_wire.data()
if all([s_ == 1 for s_ in out_data.shape]): # control (scalar) out_data
return np.squeeze(out_data)
return out_data # audio (vector) out_data
def set_out_wire(self, out_wire):
self._out_wire = out_wire
def out_wire(self):
return self._out_wire
class ObjOutWire:
"""
Similar to OutWire, but does not manage any numeric buffer.
Its internal _data attribute is simply a pointer to data managed elsewhere (typically in the parent Node)
Specifically, in case of immutable objects, they are copied. For mutable they are referenced.
Do not modify the object returned by ObjInWire.get_data but only a copy of it.
"""
def __init__(self, parent):
self.parent = parent
self._data = None
self._in_wires = []
self.parent.out_wires.append(self)
def in_wires(self): return self._in_wires
def set_data(self, in_data):
self._data = in_data
def data(self):
return self._data
def plug_into(self, in_wire):
if self.parent == in_wire.parent:
raise ValueError("trying to connect a node to itself")
self._in_wires.append(in_wire)
in_wire.set_out_wire(self)
class ObjInWire:
"""
Similar to InWire, but used for objects instead of numpy arrays
To be safe, do not modify the object returned by ObjInWire.get_data but only a copy of it.
"""
def __init__(self, parent, default_data=None):
"""
Create an ObjInWire from parent Node and a default_data object.
- default_data object should be mandaged by the parent Node
- when the node is connected, get_data() returns an object managed by the parent of the connected ObjOutWire
Parameters
----------
parent : Node
parent Node
default_data : object
the object to use if no ObjOutWire was connected
"""
self.parent = parent
self._default_data = default_data
self._out_wire = None # not connected
self.parent.in_wires.append(self)
def get_data(self):
"""
Return the default data (if connected), or the object of the connected ObjOutWire
Returns
-------
out : object
default object if not connected, or the object of the connected ObjOutWire
"""
if self._out_wire is None: # not connected
return self._default_data
return self._out_wire.data()
def set_out_wire(self, out_wire):
self._out_wire = out_wire
def out_wire(self):
return self._out_wire
def pass_thru(parent, out_wire):
"""
Connect out_wire though this unit.
out_wire.parent Node will be connected through a dummy InWire: it will become a "dependency" of parent
Creates a dummy InWire and adds it to parent.in_wires, for graph sorting.
The out_wire is returned, for an understandable syntax.
Parameters
----------
parent: Node
out_wire: OutWire
Returns
-------
out_wire: OutWire
the input parameter
"""
w_in = InWire(parent) # In wire is dummy used only for graph sorting
out_wire.plug_into(w_in)
return out_wire
class InWireAdaptor(InWire):
"""
To be used in Group, to expose an InWire from an inner Node
"""
def __init__(self, parent, in_wire):
super().__init__(parent)
self.in_wire = in_wire
def get_data(self): return self.in_wire.get_data()
def out_wire(self): return self.in_wire.out_wire()
def set_out_wire(self, out_wire): self.in_wire.set_out_wire(out_wire)
class OutWireAdaptor(OutWire):
"""
To be used in Group, to expose an OutWire from an inner Node
"""
def __init__(self, parent, out_wire):
super().__init__(parent)
self.out_wire = out_wire
def in_wires(self): return self.out_wire.in_wires()
def set_data(self, in_data): self.out_wire.set_data(in_data)
def data(self): return self.out_wire.data()
def plug_into(self, in_wire): self.out_wire.plug_into(in_wire)
class ObjInWireAdaptor(ObjInWire):
"""
To be used in Group, to expose an ObjInWire from an inner Node
"""
def __init__(self, parent, in_wire):
super().__init__(parent)
self.in_wire = in_wire
def get_data(self): return self.in_wire.get_data()
def out_wire(self): return self.in_wire.out_wire()
def set_out_wire(self, out_wire): self.in_wire.set_out_wire(out_wire)
class ObjOutWireAdaptor(ObjOutWire):
"""
To be used in Group, to expose an ObjOutWire from an inner Node
"""
def __init__(self, parent, out_wire):
super().__init__(parent)
self.out_wire = out_wire
def in_wires(self): return self.out_wire.in_wires()
def set_data(self, in_data): self.out_wire.set_data(in_data)
def data(self): return self.out_wire.data()
def plug_into(self, in_wire): self.out_wire.plug_into(in_wire)
|
# Copyright 2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import csv
import pytest
import tempfile
from shutil import rmtree
from contextlib import contextmanager, closing
# from nnabla
from nnabla.config import nnabla_config
from nnabla.utils.create_cache import CreateCache
from nnabla.utils.data_source_implements import CacheDataSource, CsvDataSource
from nnabla.testing import assert_allclose
from .conftest import test_data_csv_csv_20, test_data_csv_png_20
@contextmanager
def create_temp_with_dir():
tmpdir = tempfile.mkdtemp()
yield tmpdir
rmtree(tmpdir, ignore_errors=True)
def associate_variables_and_data(source) -> dict:
data_dict = {}
for v, data in zip(source.variables, source.next()):
data_dict[v] = data
return data_dict
def check_relative_csv_file_result(cache_file_fmt, csvfilename, cachedir):
# check cache_index.csv
cache_info_csv_path = os.path.join(cachedir, 'cache_index.csv')
assert os.path.exists(cache_info_csv_path)
with open(cache_info_csv_path, 'r') as f:
for row in csv.reader(f):
assert os.path.exists(os.path.join(cachedir, row[0]))
# check cache_info.csv
if cache_file_fmt == '.npy':
assert os.path.exists(os.path.join(cachedir, 'cache_info.csv'))
# check order.csv
assert os.path.exists(os.path.join(cachedir, 'order.csv'))
# check original.csv
original_csv_path = os.path.join(cachedir, 'original.csv')
assert os.path.exists(original_csv_path)
with open(original_csv_path, 'r') as of, open(csvfilename, 'r') as cf:
for row in of:
assert row == cf.readline()
@pytest.mark.parametrize('input_file_fmt', ['png', 'csv'])
@pytest.mark.parametrize('cache_file_fmt', ['.npy', '.h5'])
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('normalize', [False, True])
@pytest.mark.parametrize('num_of_threads', [i for i in range(10)])
def test_create_cache(test_data_csv_csv_20,
test_data_csv_png_20,
input_file_fmt,
cache_file_fmt,
shuffle,
normalize,
num_of_threads):
if input_file_fmt == 'csv':
csvfilename = test_data_csv_csv_20
else:
csvfilename = test_data_csv_png_20
nnabla_config.set('DATA_ITERATOR', 'cache_file_format', cache_file_fmt)
with create_temp_with_dir() as tmpdir:
cc = CreateCache(csvfilename, shuffle=shuffle,
num_of_threads=num_of_threads)
cc.create(tmpdir, normalize=normalize)
# get cache data source and csv file data source
with closing(CacheDataSource(tmpdir)) as cache_source:
csv_source = CsvDataSource(csvfilename, normalize=normalize)
check_relative_csv_file_result(cache_file_fmt, csvfilename, tmpdir)
assert cache_source.size == csv_source.size
assert set(cache_source.variables) == set(csv_source.variables)
if shuffle:
with open(os.path.join(tmpdir, 'order.csv'), 'r') as f:
csv_source._order = [int(row[1]) for row in csv.reader(f)]
for _ in range(cache_source.size):
cache_data = associate_variables_and_data(cache_source)
csv_data = associate_variables_and_data(csv_source)
for v in cache_source.variables:
assert_allclose(cache_data[v], csv_data[v])
|
import os
from tools.common.runner import Runner
from tools.common.util import get_logger, get_timestr
NET_TO_FILE = {
'fcn-r': 'fc' + os.path.sep + 'fcn5_mnist.py',
'alexnet-r': 'cnn/alexnet/alexnet_cifar10.py'.replace('/', os.path.sep),
'resnet-56': 'cnn/resnet/resnet_cifar10.py'.replace('/', os.path.sep),
'lstm': 'rnn/lstm/lstm.py'.replace('/', os.path.sep)
}
NET_TO_DATA = {
'fcn-r': 'MNIST_data',
'alexnet-r': 'cifar-10-batches-py',
'resnet-56': 'cifar-10-batches-py',
'lstm': 'lstm_data' + os.path.sep + 'data'
}
class TensorFlowRunner(Runner):
def __init__(self, data_dir):
self.logger = get_logger('tensorflow')
self.base_dir = os.path.dirname(os.path.abspath(__file__))
self._prepare_data(data_dir)
def _prepare_data(self, data_dir):
self.data_dir = os.path.join(data_dir, 'tensorflow')
if not os.path.isdir(self.data_dir):
zip_path = self.download_file('http://dlbench.comp.hkbu.edu.hk/s/data/tensorflow.zip',
'342D1F7CAC27D6025856101CE491EAF5B90F83A2A24E16C4CD7717A093360B0D', data_dir)
self.decompress_zip(zip_path, data_dir)
def start_experiment(self, exp, log_dir):
self.logger.info('Run with ' + str(exp.__dict__))
if exp.net_name in NET_TO_FILE:
# Prepare runtime and command
env = None
if exp.is_gpu():
if exp.device_count > 1:
self.logger.warning('Skip as multiple GPU card is not supported!')
return
self.create_gpu_env(exp.device_id)
else:
env = self.create_cpu_env(exp.device_count)
script_path = os.path.join(self.base_dir, NET_TO_FILE[exp.net_name])
dataset = os.path.join(self.data_dir, NET_TO_DATA[exp.net_name])
cmd = ('python -u %s --data_dir=%s --batch_size=%d --epochs=%d --epoch_size=%d --device_id=%s --lr=%f' %
(script_path, dataset, exp.batch_size, exp.num_epoch, exp.epoch_size, exp.device_id, exp.learning_rate))
log_path = os.path.join(log_dir, 'tensorflow_%s.log' % str(exp).replace(';', '_'))
# Execute and fetch result
if self.execute_cmd(cmd, log_path, cwd=os.path.dirname(script_path), env=env):
with open(log_path, mode='r') as fin:
for line in fin.readlines():
if line.startswith('average_batch_time'):
seconds_per_batch = float(line.split(':')[-1].strip())
self.logger.info('Average seconds per batch: %.4f' % seconds_per_batch)
return seconds_per_batch
self.logger.error('Cannot find "average_batch_time" in %s!' % log_path)
else:
self.logger.warning('Skip as %s does not register!' % exp.net_name)
|
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = True
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The base URL for the kernel server
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_kernel_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# The base URL for the websocket server, if it differs from the HTTP server
# (hint: it almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u'/home/fedor/.config/ipython'
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Wether to use Browser Side less-css parsing instead of compiled css version in
# templates that allows it. This is mainly convenient when working on the less
# file to avoid a build step, or if user want to overwrite some of the less
# variables without having to recompile everything.
#
# You will need to install the less.js component in the static directory either
# in the source tree or in your profile folder.
# c.NotebookApp.use_less = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Neccesary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
c.IPKernelApp.pylab = "inline"
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/fedor/.config/ipython'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = '127.0.0.1'
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'fedor'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The image format for figures with the inline backend.
# c.InlineBackend.figure_format = 'png'
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': 'white', 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': 'white'}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# The directory to use for notebooks.
# c.NotebookManager.notebook_dir = u'/home/fedor/Checkouts/argus2/notebooks'
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The location in which to keep notebook checkpoints
#
# By default, it is notebook-dir/.ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = u''
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
# The directory to use for notebooks.
# c.FileNotebookManager.notebook_dir = u'/home/fedor/Checkouts/argus2/notebooks'
|
from Tkinter import *
from time import time
import sys
class App:
def __init__(self):
self.root = Tk()
self.updateRate = 30 #ms
self.lastUpdateOnContinous = time()
self.lastUpdateOnRenderLoop = time()
self.root.after(self.updateRate, self.run_continous)
#self.root.mainloop()
def run_continous(self):
self.root.after(self.updateRate, self.run_continous)
current = time()
delta = current - self.lastUpdateOnContinous
self.lastUpdateOnContinous = current
self.UpdateAllElements(delta)
print("Continous delta ", delta)
def getDeltaTime(self, lastUpdate):
return delta
def startRendering(self):
while True:
current = time()
delta = current - self.lastUpdateOnRenderLoop
self.lastUpdateOnRenderLoop = current
print("Renderloop delta ", delta)
self.root.update()
self.root.update_idletasks()
if __name__ == "__main__":
a = App()
a.startRendering()
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
)
from test_framework.wallet_util import test_address
OUT_OF_RANGE = "Amount out of range"
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [[
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.setup_clean_chain = True
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Only need nodes 0-2 running at start of test
self.stop_node(3)
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_all(self.nodes[0:3])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['vsize']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all(self.nodes[0:3])
self.nodes[1].generate(101)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Test gettxout")
confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
# First, outputs that are unspent both in the chain and in the
# mempool should appear with or without include_mempool
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
assert_equal(txout['value'], 50)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.log.info("Test gettxout (second part)")
# utxo spent in mempool should be visible if you exclude mempool
# but invisible if you include mempool
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
assert txout is None
# new utxo from mempool should be invisible if you exclude mempool
# but visible if you include mempool
txout = self.nodes[0].gettxout(mempool_txid, 0, False)
assert txout is None
txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
# note the mempool tx will have randomly assigned indices
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
self.nodes[2].lockunspent, False,
[{"txid": unspent_0["txid"], "vout": 999}])
# The lock on a manually selected output is ignored
unspent_0 = self.nodes[1].listunspent()[0]
self.nodes[1].lockunspent(False, [unspent_0])
tx = self.nodes[1].createrawtransaction([unspent_0], { self.nodes[1].getnewaddress() : 1 })
self.nodes[1].fundrawtransaction(tx,{"lockUnspents": True})
# fundrawtransaction can lock an input
self.nodes[1].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[1].listlockunspent()), 0)
tx = self.nodes[1].fundrawtransaction(tx,{"lockUnspents": True})['hex']
assert_equal(len(self.nodes[1].listlockunspent()), 1)
# Send transaction
tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"]
self.nodes[1].sendrawtransaction(tx)
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all(self.nodes[0:3])
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100 - 21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransactionwithwallet(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[0]["hex"], maxfeerate=0)
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[1]["hex"], maxfeerate=0)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
# Verify that a spent output cannot be locked anymore
spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.log.info("Test sendmany")
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.log.info("Test sendmany with fee_rate param (explicit fee rate in sat/vB)")
fee_rate_sat_vb = 2
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
explicit_fee_rate_btc_kvb = Decimal(fee_rate_btc_kvb) / 1000
# Test passing fee_rate as a string
txid = self.nodes[2].sendmany(amounts={address: 10}, fee_rate=str(fee_rate_sat_vb))
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
balance = self.nodes[2].getbalance()
node_2_bal = self.check_fee_amount(balance, node_2_bal - Decimal('10'), explicit_fee_rate_btc_kvb, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(balance, node_2_bal)
node_0_bal += Decimal('10')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Test passing fee_rate as an integer
amount = Decimal("0.0001")
txid = self.nodes[2].sendmany(amounts={address: amount}, fee_rate=fee_rate_sat_vb)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
balance = self.nodes[2].getbalance()
node_2_bal = self.check_fee_amount(balance, node_2_bal - amount, explicit_fee_rate_btc_kvb, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(balance, node_2_bal)
node_0_bal += amount
assert_equal(self.nodes[0].getbalance(), node_0_bal)
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
# Test setting explicit fee rate just below the minimum.
self.log.info("Test sendmany raises 'fee rate too low' if fee_rate of 0.99999999 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendmany, amounts={address: 10}, fee_rate=0.99999999)
self.log.info("Test sendmany raises if fee_rate of 0 or -1 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendmany, amounts={address: 10}, fee_rate=0)
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[2].sendmany, amounts={address: 10}, fee_rate=-1)
self.log.info("Test sendmany raises if an invalid conf_target or estimate_mode is passed")
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[2].sendmany, amounts={address: 1}, conf_target=target, estimate_mode=mode)
for target, mode in product([-1, 0], ["btc/kb", "sat/b"]):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[2].sendmany, amounts={address: 1}, conf_target=target, estimate_mode=mode)
self.start_node(3, self.nodes[3].extra_args)
self.connect_nodes(0, 3)
self.sync_all()
# check if we can list zero value tx as available coins
# 1. create raw_tx
# 2. hex-changed one output to 0.0
# 3. sign and send
# 4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent(query_options={'minimumAmount': '49.998'})[0]
inputs = [{"txid": usp['txid'], "vout": usp['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
raw_tx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") # replace 11.11 with 0.0 (int32)
signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(raw_tx)
decoded_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex'])
zero_value_txid = decoded_raw_tx['txid']
self.nodes[1].sendrawtransaction(signed_raw_tx['hex'])
self.sync_all()
self.nodes[1].generate(1) # mine a block
self.sync_all()
unspent_txs = self.nodes[0].listunspent() # zero value tx must be in listunspents output
found = False
for uTx in unspent_txs:
if uTx['txid'] == zero_value_txid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert found
self.log.info("Test -walletbroadcast")
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_all(self.nodes[0:3])
txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
self.nodes[1].generate(1) # mine a block, tx should not be in there
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[2].getbalance(), node_2_bal) # should not be changed because tx was not broadcasted
# now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex'])
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal += 2
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# create another tx
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
# restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:3])
node_2_bal += 2
# tx should be added to balance because after restarting the nodes tx should be broadcast
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# send a tx with value in a string (PR#6380 +)
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-2'))
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# check if JSON parser can handle scientific notation in strings
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# General checks for errors from incorrect inputs
# This will raise an exception because the amount is negative
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "-1")
# This will raise an exception because the amount type is wrong
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
if not self.options.descriptors:
# This will raise an exception for the invalid private key format
assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid")
# This will raise an exception for importing an address with the PS2H flag
temp_address = self.nodes[1].getnewaddress("", "p2sh-segwit")
assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True)
# This will raise an exception for attempting to dump the private key of an address you do not own
assert_raises_rpc_error(-3, "Address does not refer to a key", self.nodes[0].dumpprivkey, temp_address)
# This will raise an exception for attempting to get the private key of an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].dumpprivkey, "invalid")
# This will raise an exception for attempting to set a label for an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].setlabel, "invalid address", "label")
# This will raise an exception for importing an invalid address
assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid")
# This will raise an exception for attempting to import a pubkey that isn't in hex
assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")
# This will raise an exception for importing an invalid pubkey
assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
self.log.info("Test sendtoaddress with fee_rate param (explicit fee rate in sat/vB)")
prebalance = self.nodes[2].getbalance()
assert prebalance > 2
address = self.nodes[1].getnewaddress()
amount = 3
fee_rate_sat_vb = 2
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
# Test passing fee_rate as an integer
txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=fee_rate_sat_vb)
tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
postbalance = self.nodes[2].getbalance()
fee = prebalance - postbalance - Decimal(amount)
assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
prebalance = self.nodes[2].getbalance()
amount = Decimal("0.001")
fee_rate_sat_vb = 1.23
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
# Test passing fee_rate as a string
txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=str(fee_rate_sat_vb))
tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
postbalance = self.nodes[2].getbalance()
fee = prebalance - postbalance - amount
assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
# Test setting explicit fee rate just below the minimum.
self.log.info("Test sendtoaddress raises 'fee rate too low' if fee_rate of 0.99999999 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=0.99999999)
self.log.info("Test sendtoaddress raises if fee_rate of 0 or -1 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendtoaddress, address=address, amount=10, fee_rate=0)
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[2].sendtoaddress, address=address, amount=1.0, fee_rate=-1)
self.log.info("Test sendtoaddress raises if an invalid conf_target or estimate_mode is passed")
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[2].sendtoaddress, address=address, amount=1, conf_target=target, estimate_mode=mode)
for target, mode in product([-1, 0], ["btc/kb", "sat/b"]):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[2].sendtoaddress, address=address, amount=1, conf_target=target, estimate_mode=mode)
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"]
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
coinbase_addr = self.nodes[1].getnewaddress()
block_hash = self.nodes[0].generatetoaddress(1, coinbase_addr)[0]
coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0]
self.sync_all(self.nodes[0:3])
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(coinbase_txid)
# check if wallet or blockchain maintenance changes the balance
self.sync_all(self.nodes[0:3])
blocks = self.nodes[0].generate(2)
self.sync_all(self.nodes[0:3])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].rpc.ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
test_address(self.nodes[0], addr, labels=[label])
assert label in self.nodes[0].listlabels()
self.nodes[0].rpc.ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
]
chainlimit = 6
for m in maintenance:
self.log.info("Test " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
self.wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
singletxid = self.nodes[0].sendrawtransaction(hexstring=signedtx["hex"], maxfeerate=0)
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for _ in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert extra_txid not in self.nodes[0].getrawmempool()
assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
self.stop_node(0)
extra_args = ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]
self.start_node(0, extra_args=extra_args)
# wait until the wallet has submitted all transactions to the mempool
self.wait_until(lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2)
# Prevent potential race condition when calling wallet RPCs right after restart
self.nodes[0].syncwithvalidationinterfacequeue()
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-6, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))
# Test getaddressinfo on external address. Note that these addresses are taken from disablewallet.py
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
address_info = self.nodes[0].getaddressinfo("mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info['address'], "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac")
assert not address_info["ismine"]
assert not address_info["iswatchonly"]
assert not address_info["isscript"]
assert not address_info["ischange"]
# Test getaddressinfo 'ischange' field on change address.
self.nodes[0].generate(1)
destination = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(destination, 0.123)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
output_addresses = [vout['scriptPubKey']['address'] for vout in tx["vout"]]
assert len(output_addresses) > 1
for address in output_addresses:
ischange = self.nodes[0].getaddressinfo(address)['ischange']
assert_equal(ischange, address != destination)
if ischange:
change = address
self.nodes[0].setlabel(change, 'foobar')
assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False)
# Test gettransaction response with different arguments.
self.log.info("Testing gettransaction response with different arguments...")
self.nodes[0].setlabel(change, 'baz')
baz = self.nodes[0].listtransactions(label="baz", count=1)[0]
expected_receive_vout = {"label": "baz",
"address": baz["address"],
"amount": baz["amount"],
"category": baz["category"],
"vout": baz["vout"]}
expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee',
'hex', 'time', 'timereceived', 'trusted', 'txid', 'walletconflicts'})
verbose_field = "decoded"
expected_verbose_fields = expected_fields | {verbose_field}
self.log.debug("Testing gettransaction response without verbose")
tx = self.nodes[0].gettransaction(txid=txid)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to False")
tx = self.nodes[0].gettransaction(txid=txid, verbose=False)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to True")
tx = self.nodes[0].gettransaction(txid=txid, verbose=True)
assert_equal(set([*tx]), expected_verbose_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
assert_equal(tx[verbose_field], self.nodes[0].decoderawtransaction(tx["hex"]))
self.log.info("Test send* RPCs with verbose=True")
address = self.nodes[0].getnewaddress("test")
txid_feeReason_one = self.nodes[2].sendtoaddress(address=address, amount=5, verbose=True)
assert_equal(txid_feeReason_one["fee_reason"], "Fallback fee")
txid_feeReason_two = self.nodes[2].sendmany(dummy='', amounts={address: 5}, verbose=True)
assert_equal(txid_feeReason_two["fee_reason"], "Fallback fee")
self.log.info("Test send* RPCs with verbose=False")
txid_feeReason_three = self.nodes[2].sendtoaddress(address=address, amount=5, verbose=False)
assert_equal(self.nodes[2].gettransaction(txid_feeReason_three)['txid'], txid_feeReason_three)
txid_feeReason_four = self.nodes[2].sendmany(dummy='', amounts={address: 5}, verbose=False)
assert_equal(self.nodes[2].gettransaction(txid_feeReason_four)['txid'], txid_feeReason_four)
if __name__ == '__main__':
WalletTest().main()
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 18 Aug 2013
@author: George
'''
'''
Class that acts as an abstract. It should have no instances. All the Resources should inherit from it
'''
# from SimPy.Simulation import Resource
import simpy
from ManPyObject import ManPyObject
# ===========================================================================
# the resource that repairs the machines
# ===========================================================================
class ObjectResource(ManPyObject):
def __init__(self,id='',name='',**kw):
ManPyObject.__init__(self,id,name)
self.initialized = False
# list that holds the objectInterruptions that have this element as victim
self.objectInterruptions=[]
# alias used for printing the trace
self.alias=None
# list with the coreObjects IDs that the resource services
self.coreObjectIds=[]
from Globals import G
G.ObjectResourceList.append(self)
def initialize(self):
from Globals import G
self.env=G.env
self.timeLastOperationStarted=0 #holds the time that the last repair was started
self.Res=simpy.Resource(self.env, capacity=self.capacity)
# variable that checks whether the resource is already initialized
self.initialized = True
# list with the coreObjects IDs that the resource services
self.coreObjectIds=[]
# list with the coreObjects that the resource services
self.coreObjects=[]
# flag that locks the resource so that it cannot get new jobs
self.isLocked=False
# lists that keep the start/endShiftTimes of the victim
self.endShiftTimes=[]
self.startShiftTimes=[]
# =======================================================================
# checks if the worker is available
# =======================================================================
def checkIfResourceIsAvailable(self,callerObject=None):
# return true if the operator is idle and on shift
return len(self.Res.users)<self.capacity and self.onShift and (not self.isLocked)
# =======================================================================
# returns the resource
# =======================================================================
def getResource(self):
return self.Res
# =======================================================================
# returns the active queue of the resource
# =======================================================================
def getResourceQueue(self):
return self.Res.users
# =======================================================================
# check if the resource is already initialized
# =======================================================================
def isInitialized(self):
return self.initialized
#===========================================================================
# print the route (the different stations the resource was occupied by)
#===========================================================================
def printRoute(self):
pass
|
#!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, GObject
import os
from stat import S_ISREG, ST_MTIME, ST_MODE
from lollypop.define import Lp
# Playlists manager: add, remove, list, append, ...
class PlaylistsManager(GObject.GObject):
_PLAYLISTS_PATH = os.path.expanduser("~") +\
"/.local/share/lollypop/playlists"
__gsignals__ = {
# Add or remove a playlist
'playlists-changed': (GObject.SignalFlags.RUN_FIRST, None, ()),
# Objects added/removed to/from playlist
'playlist-changed': (GObject.SignalFlags.RUN_FIRST, None, (str,))
}
def __init__(self):
GObject.GObject.__init__(self)
self._idx = {}
# Create playlists directory if missing
if not os.path.exists(self._PLAYLISTS_PATH):
try:
os.mkdir(self._PLAYLISTS_PATH)
except Exception as e:
print("Lollypop::PlaylistsManager::init: %s" % e)
self._init_idx()
"""
Add a playlist (Thread safe)
@param playlist name as str
@param get file descriptor as bool
@return file descriptor if 2nd param True
"""
def add(self, playlist_name, get_desc=False):
filename = self._PLAYLISTS_PATH + "/"+playlist_name + ".m3u"
try:
if os.path.exists(filename):
changed = False
else:
changed = True
f = open(filename, "w")
f.write("#EXTM3U\n")
if get_desc:
return f
else:
f.close()
# Add playlist to cache
if changed:
try:
max_idx = max(self._idx.keys())+1
except:
max_idx = 0
self._idx[max_idx] = playlist_name
GLib.idle_add(self.emit, 'playlists-changed')
except Exception as e:
print("PlaylistsManager::add: %s" % e)
"""
Rename playlist (Thread safe)
@param new playlist name as str
@param old playlist name as str
"""
def rename(self, new_name, old_name):
try:
os.rename(self._PLAYLISTS_PATH+"/"+old_name+".m3u",
self._PLAYLISTS_PATH+"/"+new_name+".m3u")
for (idx, playlist) in self._idx.items():
if playlist == old_name:
self._idx[idx] = new_name
break
GLib.idle_add(self.emit, "playlists-changed")
except Exception as e:
print("PlaylistsManager::rename: %s" % e)
"""
delete playlist (Thread safe)
@param playlist name as str
"""
def delete(self, playlist_name):
try:
os.remove(self._PLAYLISTS_PATH+"/"+playlist_name+".m3u")
for (idx, playlist) in self._idx.items():
if playlist == playlist_name:
del self._idx[idx]
break
GLib.idle_add(self.emit, "playlists-changed")
except Exception as e:
print("PlaylistsManager::delete: %s" % e)
"""
Return availables playlists
@return array of (id, string)
"""
def get(self):
return sorted(self._idx.items(),
key=lambda item: item[1].lower())
"""
Return 10 last modified playlist
@return array of (id, string)
"""
def get_last(self):
playlists = []
try:
index = 0
entries = []
for filename in os.listdir(self._PLAYLISTS_PATH):
stat = os.stat(self._PLAYLISTS_PATH+"/"+filename)
if S_ISREG(stat[ST_MODE]):
entries.append((stat[ST_MTIME], filename))
for cdate, filename in sorted(entries, reverse=True):
if filename.endswith(".m3u"):
playlists.append(filename[:-4])
index += 1
# Break if 10 playlists is reached
if index >= 10:
break
except Exception as e:
print("Lollypop::PlaylistManager::get_last: %s" % e)
return playlists
"""
Return availables tracks for playlist
@param playlist playlist_name as str
@return array of track filepath as str
"""
def get_tracks(self, playlist_name):
tracks = []
try:
f = open(self._PLAYLISTS_PATH+"/"+playlist_name+".m3u", "r")
for filepath in f:
if filepath[0] not in ["#", "\n"]:
tracks.append(filepath[:-1])
f.close()
except Exception as e:
print("PlaylistsManager::get_tracks: %s" % e)
return tracks
"""
Set playlist tracks (Thread safe)
@param playlist name as str
@param tracks path as [str]
"""
def set_tracks(self, playlist_name, tracks_path):
f = self.add(playlist_name, True)
for filepath in tracks_path:
self._add_track(f, playlist_name, filepath)
GLib.timeout_add(1000, self.emit, "playlist-changed", playlist_name)
try:
f.close()
except Exception as e:
print("PlaylistsManager::set_tracks: %s" % e)
"""
Return availables tracks id for playlist
Thread safe if you pass an sql cursor
@param playlist name as str
@return array of track id as int
"""
def get_tracks_id(self, playlist_name, sql=None):
tracks_id = []
for filepath in self.get_tracks(playlist_name):
tracks_id.append(Lp.tracks.get_id_by_path(filepath, sql))
return tracks_id
"""
Add track to playlist if not already present
@param playlist name as str
@param track filepath as str
"""
def add_track(self, playlist_name, filepath):
try:
f = open(self._PLAYLISTS_PATH+"/"+playlist_name+".m3u", "a")
self._add_track(f, playlist_name, filepath)
GLib.idle_add(self.emit, "playlist-changed", playlist_name)
f.close()
except Exception as e:
print("PlaylistsManager::add_track: %s" % e)
"""
Add tracks to playlist if not already present
@param playlist name as str
@param tracks filepath as [str]
"""
def add_tracks(self, playlist_name, tracks_path):
try:
f = open(self._PLAYLISTS_PATH+"/"+playlist_name+".m3u", "a")
for filepath in tracks_path:
self._add_track(f, playlist_name, filepath)
GLib.idle_add(self.emit, "playlist-changed", playlist_name)
f.close()
except Exception as e:
print("PlaylistsManager::add_tracks: %s" % e)
"""
Remove tracks from playlist
@param playlist name as str
@param tracks to remove as [str]
"""
def remove_tracks(self, playlist_name, tracks_to_remove):
playlist_tracks = self.get_tracks(playlist_name)
self._remove_tracks(playlist_name, playlist_tracks, tracks_to_remove)
GLib.idle_add(self.emit, "playlist-changed", playlist_name)
"""
Return True if object_id is already present in playlist
@param playlist name as str
@param object id as int
@param genre id as int
@param is an album as bool
@param sql as sqlite cursor
@return bool
"""
def is_present(self, playlist_name, object_id,
genre_id, is_album, sql=None):
playlist_paths = self.get_tracks(playlist_name)
if is_album:
tracks_path = Lp.albums.get_tracks_path(object_id,
genre_id,
sql)
else:
tracks_path = [Lp.tracks.get_path(object_id, sql)]
found = 0
len_tracks = len(tracks_path)
for filepath in tracks_path:
if filepath in playlist_paths:
found += 1
if found >= len_tracks:
break
if found == len_tracks:
return True
else:
return False
#######################
# PRIVATE #
#######################
"""
Create initial index
"""
def _init_idx(self):
playlists = []
try:
for filename in sorted(os.listdir(self._PLAYLISTS_PATH)):
if filename.endswith(".m3u"):
playlists.append(filename[:-4])
except Exception as e:
print("Lollypop::PlaylistManager::get: %s" % e)
idx = 0
for playlist in playlists:
self._idx[idx] = playlist
idx += 1
"""
Add track to playlist if not already present
@param f as file descriptor
@param playlist name as str
@param track filepath as str
"""
def _add_track(self, f, playlist_name, filepath):
tracks = self.get_tracks(playlist_name)
# Do nothing if uri already present in playlist
if filepath not in tracks:
try:
f.write(filepath+'\n')
except Exception as e:
print("PlaylistsManager::_add_track: %s" % e)
"""
Remove track from playlist
@param playlist name as str
@param playlist tracks as [str]
@param tracks to remove as [str]
"""
def _remove_tracks(self, playlist_name, playlist_tracks, tracks_to_remove):
try:
f = open(self._PLAYLISTS_PATH+"/"+playlist_name+".m3u", "w")
for path in playlist_tracks:
if path not in tracks_to_remove:
f.write(path+'\n')
f.close()
except Exception as e:
print("PlaylistsManager::remove_tracks: %s" % e)
# Radios manager
class RadiosManager(PlaylistsManager):
_PLAYLISTS_PATH = os.path.expanduser("~") + "/.local/share/lollypop/radios"
def __init__(self):
PlaylistsManager.__init__(self)
|
import itertools
import threading
import datetime
import graphics
import requests
import random
import driver
import flask
from flask import request
import game
import time
import sys
from wsgiref import simple_server
from collections import OrderedDict
class Message:
def __init__(self, text, priority=5, expiration=None, effects=[], lifetime=None):
self.text = text
self.label = graphics.TextSprite(text, width=5, height=7, y=4)
self.label.x = (112 - self.label.size()) // 2
self.priority = priority
if expiration:
self.expiration = expiration or 2147483647
elif lifetime:
self.expiration = time.time() + lifetime
else:
self.expiration = 2147483647
self.effects = []
for effect_type in effects:
effect = None
if effect_type == "scroll":
self.label.x = 112
effect = graphics.Animator(self.label, attr="x", max=112,
min=-self.label.size(),
loop=True, delay=.04, step=-1)
elif effect_type == "scroll_y":
effect = graphics.Animator(self.label, attr="y", max=15,
min=-self.label.height,
loop=True, delay=.4)
elif effect_type == "bounce_x":
self.label.x = 112
effect = graphics.Animator(
self.label, attr="x",
min=(112 - self.label.size() if self.label.size() > 112 else 0),
max=(0 if self.label.size() > 112 else 112 - self.label.size()),
delay=.04,
reverse=True)
elif effect_type == "bounce_y":
effect = graphics.Animator(self.label, attr="y",
max=15-self.label.height, min=0,
reverse=True, delay=.4)
elif effect_type == "blink":
effect = graphics.Animator(self.label, attr="visible", max=1, min=0,
reverse=True, delay=1.5)
elif effect_type == "blink_fast":
effect = graphics.Animator(self.label, attr="visible", max=1, min=0,
reverse=True, delay=.25)
elif effect_type == "shake":
effect = graphics.Animator(self.label, attr="y", max=6, min=2,
delay=.01, reverse=True)
if effect:
self.effects.append(effect)
TEMPERATURE = 0
def update_temp():
while True:
try:
global TEMPERATURE
TEMPERATURE = requests.get("http://idiotic.hackafe.net/api/item/Average_Temperature/state").json()["result"]
time.sleep(60)
except:
time.sleep(5)
def get_default_message():
return Message("{:%H:%M:%S} {:.1f}C".format(datetime.datetime.now(), TEMPERATURE), priority=.1)
class MessageBoard(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.messages = OrderedDict()
self.frame_lock = threading.Lock()
self.api = flask.Flask(__name__)
self.api.add_url_rule('/add_message', 'add_message', self.add_message, methods=['POST'])
self.api.add_url_rule('/remove_message/<id>', 'remove_message', self.remove_message, methods=['GET', 'POST'])
self.api.add_url_rule('/clear', 'clear', self.clear, methods=['POST'])
self.server = simple_server.make_server('', 8800, self.api)
self.server_thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.server_thread.start()
self.update_temp_thread = threading.Thread(target=update_temp, daemon=True)
self.update_temp_thread.start()
self.ids = 0
self.cur_msg = None
self.switch_time = 0
self.queue = self.messages_gen()
def stop(self):
super().stop()
self.server.shutdown()
self.server.server_close()
def add_message(self):
text = request.form.get("text", "?")
priority = float(request.form.get("priority", 5))
lifetime = float(request.form.get("lifetime", 0))
expiration = float(request.form.get("expiration", 0))
effects = filter(bool, request.form.get("effects", "").split(","))
name = request.form.get("name", None)
if name is None:
name = str(self.ids)
self.ids += 1
with self.frame_lock:
self.messages[name] = Message(text, priority, expiration, effects, lifetime)
return name
def remove_message(self, id):
with self.frame_lock:
del self.messages[id]
return ''
def clear(self):
with self.frame_lock:
self.messages = OrderedDict()
return ''
def loop(self):
super().loop()
if time.time() >= self.switch_time:
if self.cur_msg:
self.sprites.remove(self.cur_msg.label)
self.sprites.difference_update(set(self.cur_msg.effects))
self.cur_msg = next(self.queue)
if self.cur_msg:
self.sprites.add(self.cur_msg.label)
self.sprites.update(set(self.cur_msg.effects))
self.switch_time = time.time() + self.cur_msg.priority
else:
self.switch_time = time.time() + .5
def messages_gen(self):
while True:
msgs = []
with self.frame_lock:
msgs = list(self.messages.values())
if msgs:
yield from list(self.messages.values())
else:
yield get_default_message()
with self.frame_lock:
self.messages = {k: m for k, m in self.messages.items() if m.expiration > time.time()}
GAME = MessageBoard
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
def build(app, title, release):
os.system('rm -rf %s' %app)
res = os.system('cordova create %s com.%s.app %s' %(app, app, title))
if res != 0:
print("Failed to create android app")
return
os.system('rsync -a ./ %s/www --exclude=%s ' %(app,app))
os.system('cp androidIcon.png %s' %(app))
os.system('cp config.xml %s' %(app))
os.chdir(app)
os.system('cordova platform add android')
{% block commands %}{% endblock %}
os.system('cordova plugin add cordova-plugin-streaming-media')
os.system('cordova plugin add cordova-plugin-device')
os.system('cordova plugin add cordova-plugin-screen-orientation')
{% block plugins %}{% endblock %}
if release:
{% if androidBuild %}
build = 'cordova build android --release -- '
os.system(build + '--keystore={{androidBuild.keystore}} --storePassword={{androidBuild.storePassword}} --alias={{androidBuild.alias}} --password={{androidBuild.password}}')
{% else %}
print("Failed to build release apk androidBuild property is undefined")
{% endif %}
else:
os.system('cordova build android')
os.chdir('..')
parser = argparse.ArgumentParser('pureqml cordova android build tool')
parser.add_argument('--app', '-a', help='application name', default="app")
parser.add_argument('--title', '-t', help='application title', default="App")
parser.add_argument('--release', '-r', help='build release apk', default=False)
args = parser.parse_args()
res = os.system('cordova --version')
if res == 0:
build(args.app, args.title, args.release)
else:
print('Install "cordova" first: https://cordova.apache.org/docs/en/latest/guide/cli/')
|
# -*- coding: utf-8 -*-
from .common.util import Util
class SelectorViewModel(list):
# Properties
# Magic Methods
def __init__(self, options):
super(SelectorViewModel, self).__init__()
self._model = SelectorViewModel.generate_model(options)
self.bread_crumbs = []
self._chldren_indicator = '>>'
for item in self._format_options_list():
self.append(item)
# Public Methods
@staticmethod
def generate_model(options, delimiter='::'):
d = {}
lines = [line.strip() for line in options.split('\n') if line.strip()]
for line in lines:
sections = [x.strip() for x in line.split(delimiter) if x.strip()]
d = SelectorViewModel._chomp(sections, d)
return d
def step_into_item(self, selected):
if selected not in self:
raise ValueError(str(selected) +
' not found in SelectorViewModel')
if Util.str_ends_in_substr(selected, self._chldren_indicator):
selected_index = self.index(selected)
options = self._get_options_list()
options_text = sorted(options.keys())
option_selected = options_text[selected_index]
self.bread_crumbs.append(option_selected)
del self[:]
for item in self._format_options_list():
self.append(item)
def step_out_of_item(self):
item_stepped_out_of = ''
if self.bread_crumbs:
item_stepped_out_of = self.bread_crumbs[-1]
self.bread_crumbs.pop()
del self[:]
for item in self._format_options_list():
self.append(item)
return item_stepped_out_of
# Private Methods
@staticmethod
def _chomp(sections, d):
if sections:
if sections[0] not in d:
if sections[1:]:
d[sections[0]] = {}
else:
d[sections[0]] = {'': {}}
d[sections[0]] = SelectorViewModel._chomp(sections[1:],
d[sections[0]])
return d
def _format_options_list(self):
options = self._get_options_list()
options_text = sorted(options.keys())
formatted_options = []
for option_text in options_text:
if self._get_children(options[option_text]):
option_index = options_text.index(option_text)
padding = self._get_padding(options_text, option_index)
formatted_options.append(option_text +
padding +
self._chldren_indicator)
else:
formatted_options.append(option_text)
return sorted(formatted_options)
@staticmethod
def _get_children(option_children):
return [child for child in option_children.keys() if child]
@staticmethod
def _get_max_option_length(options):
return max([len(x) for x in options])
def _get_options_list(self):
page = self._model
for crumb in self.bread_crumbs:
if crumb not in page.keys():
raise ValueError(str(self.bread_crumbs) +
' : path traversal failed at ' +
str(crumb))
else:
page = page[crumb]
return page
@staticmethod
def _get_padding_amount(options, index):
option = options[index]
pad_to_length = SelectorViewModel._get_max_option_length(options) + 1
return pad_to_length - len(option)
@staticmethod
def _get_padding(options, index):
return ' ' * SelectorViewModel._get_padding_amount(options, index)
|
"""
@Author Mwaruwa Chaka, JKUAT ECE Final Year Project 2014
This file handles training of the model and saves it as model.tkl"""
import logging, os
# cv2 and helper:
import cv2
from lib.common import *
from lib.video import *
# add facerec to system path
import sys
sys.path.append("lib")
# facerec imports
from facerec.model import PredictableModel
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance
from facerec.classifier import NearestNeighbor
from facerec.validation import KFoldCrossValidation
from facerec.serialization import save_model, load_model
# for face detection (you can also use OpenCV2 directly):
from facedet.detector import CascadedDetector
class ExtendedPredictableModel(PredictableModel):
""" Subclasses the PredictableModel to store some more
information, so we don't need to pass the dataset
on each program call...
"""
def __init__(self, feature, classifier, image_size, subject_names):
PredictableModel.__init__(self, feature=feature, classifier=classifier)
self.image_size = image_size
self.subject_names = subject_names
def get_model(image_size, subject_names):
""" This method returns the PredictableModel which is used to learn a model
for possible further usage. If you want to define your own model, this
is the method to return it from!
"""
# Define the Fisherfaces Method as Feature Extraction method:
feature = Fisherfaces()
# Define a 1-NN classifier with Euclidean Distance:
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# Return the model as the combination:
return ExtendedPredictableModel(feature=feature, classifier=classifier, image_size=image_size, subject_names=subject_names)
def read_subject_names(path):
"""Reads the folders of a given directory, which are used to display some
meaningful name instead of simply displaying a number.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
Returns:
folder_names: The names of the folder, so you can display it in a prediction.
"""
folder_names = []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
folder_names.append(subdirname)
return folder_names
def read_images(path, image_size=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X, y, folder_names]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
folder_names: The names of the folder, so you can display it in a prediction.
"""
c = 0
X = []
y = []
folder_names = []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
folder_names.append(subdirname)
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)
# resize to given size (if given)
if (image_size is not None):
im = cv2.resize(im, image_size)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y,folder_names]
class App(object):
def __init__(self, model, camera_id, cascade_filename):
self.model = model
self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.3)
#self.cam = create_capture(camera_id)
self.cam = create_capture(0)
self.threshold = 500.0
def run(self):
while True:
ret, frame = self.cam.read()
# Resize the frame to half the original size for speeding up the detection process:
img = cv2.resize(frame, (frame.shape[1]/2, frame.shape[0]/2), interpolation = cv2.INTER_CUBIC)
imgout = img.copy()
for i,r in enumerate(self.detector.detect(img)):
x0,y0,x1,y1 = r
# (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
face = img[y0:y1, x0:x1]
face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
# Get a prediction from the model:
[prediction,confidence] = self.model.predict(face)
confidence = float(confidence['distances'])
if confidence <= self.threshold:
# Draw the face area in image:
cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,255,0),2)
# Draw the predicted name (folder name...):
draw_str(imgout, (x0-20,y0-20), self.model.subject_names[prediction])
draw_str(imgout, (x0-20,y0-40), str(confidence))
else:
# Draw the face area in image:
cv2.rectangle(imgout, (x0,y0),(x1,y1),(0,0,255),2)
# Draw the predicted name (folder name...):
draw_str(imgout, (x0-20,y0-20), "Unknown")
#draw_str(imgout, (x0-20,y0-40), str(confidence))
cv2.imshow('videofacerec', imgout)
# Show image & exit on escape:
ch = cv2.waitKey(10)
if ch == 27:
break
if __name__ == '__main__':
from optparse import OptionParser
# model.pkl is a pickled (hopefully trained) PredictableModel, which is
# used to make predictions. You can learn a model yourself by passing the
# parameter -d (or --dataset) to learn the model from a given dataset.
usage = "usage: %prog [options] model_filename"
# Add options for training, resizing, validation and setting the camera id:
parser = OptionParser(usage=usage)
parser.add_option("-r", "--resize", action="store", type="string", dest="size", default="100x100",
help="Resizes the given dataset to a given size in format [width]x[height] (default: 100x100).")
parser.add_option("-v", "--validate", action="store", dest="numfolds", type="int", default=None,
help="Performs a k-fold cross validation on the dataset, if given (default: None).")
parser.add_option("-t", "--train", action="store", dest="dataset", type="string", default=None,
help="Trains the model on the given dataset.")
parser.add_option("-i", "--id", action="store", dest="camera_id", type="int", default=0,
help="Sets the Camera Id to be used (default: 0).")
parser.add_option("-c", "--cascade", action="store", dest="cascade_filename", default="haarcascade_frontalface_alt2.xml",
help="Sets the path to the Haar Cascade used for the face detection part (default: haarcascade_frontalface_alt2.xml).")
# Show the options to the user:
#parser.print_help()
print "Press [ESC] to exit the program!"
print "Script output:"
# Parse arguments:
(options, args) = parser.parse_args()
# Check if a model name was passed:
if len(args) == 0:
print "Starting training"
os.system("python train_model.py -t eigens -v 10 model.tkl")
print "Finished training, exiting"
sys.exit()
# This model will be used (or created if the training parameter (-t, --train) exists:
model_filename = args[0]
# Check if the given model exists, if no dataset was passed:
if (options.dataset is None) and (not os.path.exists(model_filename)):
print "[Error] No prediction model found at '%s'." % model_filename
sys.exit()
# Check if the given (or default) cascade file exists:
if not os.path.exists(options.cascade_filename):
print "[Error] No Cascade File found at '%s'." % options.cascade_filename
sys.exit()
# We are resizing the images to a fixed size, as this is neccessary for some of
# the algorithms, some algorithms like LBPH don't have this requirement. To
# prevent problems from popping up, we resize them with a default value if none
# was given:
try:
image_size = (int(options.size.split("x")[0]), int(options.size.split("x")[1]))
except:
print "[Error] Unable to parse the given image size '%s'. Please pass it in the format [width]x[height]!" % options.size
sys.exit()
# We have got a dataset to learn a new model from:
if options.dataset:
# Check if the given dataset exists:
if not os.path.exists(options.dataset):
print "[Error] No dataset found at '%s'." % dataset_path
sys.exit()
# Reads the images, labels and folder_names from a given dataset. Images
# are resized to given size on the fly:
print "Loading dataset..."
[images, labels, subject_names] = read_images(options.dataset, image_size)
# Zip us a {label, name} dict from the given data:
list_of_labels = list(xrange(max(labels)+1))
subject_dictionary = dict(zip(list_of_labels, subject_names))
# Get the model we want to compute:
model = get_model(image_size=image_size, subject_names=subject_dictionary)
# Sometimes you want to know how good the model may perform on the data
# given, the script allows you to perform a k-fold Cross Validation before
# the Detection & Recognition part starts:
if options.numfolds:
print "Validating model with %s folds..." % options.numfolds
# We want to have some log output, so set up a new logging handler
# and point it to stdout:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add a handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Perform the validation & print results:
crossval = KFoldCrossValidation(model, k=options.numfolds)
crossval.validate(images, labels)
crossval.print_results()
# Compute the model:
print "Computing the model..."
model.compute(images, labels)
# And save the model, which uses Pythons pickle module:
print "Saving the model..."
save_model(model_filename, model)
else:
print "Loading the model..."
model = load_model(model_filename)
# We operate on an ExtendedPredictableModel. Quit the application if this
# isn't what we expect it to be:
if not isinstance(model, ExtendedPredictableModel):
print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
sys.exit()
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe import _, msgprint, scrub
from frappe.defaults import get_user_permissions
from frappe.utils import add_days, getdate, formatdate, get_first_day, date_diff
from erpnext.utilities.doctype.address.address import get_address_display
from erpnext.utilities.doctype.contact.contact import get_contact_details
from erpnext.exceptions import PartyFrozen, InvalidCurrency, PartyDisabled, InvalidAccountCurrency
class DuplicatePartyAccountError(frappe.ValidationError): pass
@frappe.whitelist()
def get_party_details(party=None, account=None, party_type="Customer", company=None,
posting_date=None, price_list=None, currency=None, doctype=None):
if not party:
return {}
if not frappe.db.exists(party_type, party):
frappe.throw(_("{0}: {1} does not exists").format(party_type, party))
return _get_party_details(party, account, party_type,
company, posting_date, price_list, currency, doctype)
def _get_party_details(party=None, account=None, party_type="Customer", company=None,
posting_date=None, price_list=None, currency=None, doctype=None, ignore_permissions=False):
out = frappe._dict(set_account_and_due_date(party, account, party_type, company, posting_date, doctype))
party = out[party_type.lower()]
if not ignore_permissions and not frappe.has_permission(party_type, "read", party):
frappe.throw(_("Not permitted for {0}").format(party), frappe.PermissionError)
party = frappe.get_doc(party_type, party)
set_address_details(out, party, party_type)
set_contact_details(out, party, party_type)
set_other_values(out, party, party_type)
set_price_list(out, party, party_type, price_list)
out["taxes_and_charges"] = set_taxes(party.name, party_type, posting_date, company, out.customer_group, out.supplier_type)
if not out.get("currency"):
out["currency"] = currency
# sales team
if party_type=="Customer":
out["sales_team"] = [{
"sales_person": d.sales_person,
"allocated_percentage": d.allocated_percentage or None
} for d in party.get("sales_team")]
return out
def set_address_details(out, party, party_type):
billing_address_field = "customer_address" if party_type == "Lead" \
else party_type.lower() + "_address"
out[billing_address_field] = frappe.db.get_value("Address",
{party_type.lower(): party.name, "is_primary_address":1}, "name")
# address display
out.address_display = get_address_display(out[billing_address_field])
# shipping address
if party_type in ["Customer", "Lead"]:
out.shipping_address_name = frappe.db.get_value("Address",
{party_type.lower(): party.name, "is_shipping_address":1}, "name")
out.shipping_address = get_address_display(out["shipping_address_name"])
def set_contact_details(out, party, party_type):
out.contact_person = frappe.db.get_value("Contact",
{party_type.lower(): party.name, "is_primary_contact":1}, "name")
if not out.contact_person:
out.update({
"contact_person": None,
"contact_display": None,
"contact_email": None,
"contact_mobile": None,
"contact_phone": None,
"contact_designation": None,
"contact_department": None
})
else:
out.update(get_contact_details(out.contact_person))
def set_other_values(out, party, party_type):
# copy
if party_type=="Customer":
to_copy = ["customer_name", "customer_group", "territory", "language"]
else:
to_copy = ["supplier_name", "supplier_type", "language"]
for f in to_copy:
out[f] = party.get(f)
# fields prepended with default in Customer doctype
for f in ['currency'] \
+ (['sales_partner', 'commission_rate'] if party_type=="Customer" else []):
if party.get("default_" + f):
out[f] = party.get("default_" + f)
def get_default_price_list(party):
"""Return default price list for party (Document object)"""
if party.default_price_list:
return party.default_price_list
if party.doctype == "Customer":
price_list = frappe.db.get_value("Customer Group",
party.customer_group, "default_price_list")
if price_list:
return price_list
return None
def set_price_list(out, party, party_type, given_price_list):
# price list
price_list = filter(None, get_user_permissions().get("Price List", []))
if isinstance(price_list, list):
price_list = price_list[0] if len(price_list)==1 else None
if not price_list:
price_list = get_default_price_list(party)
if not price_list:
price_list = given_price_list
if price_list:
out.price_list_currency = frappe.db.get_value("Price List", price_list, "currency")
out["selling_price_list" if party.doctype=="Customer" else "buying_price_list"] = price_list
def set_account_and_due_date(party, account, party_type, company, posting_date, doctype):
if doctype not in ["Sales Invoice", "Purchase Invoice"]:
# not an invoice
return {
party_type.lower(): party
}
if party:
account = get_party_account(party_type, party, company)
account_fieldname = "debit_to" if party_type=="Customer" else "credit_to"
out = {
party_type.lower(): party,
account_fieldname : account,
"due_date": get_due_date(posting_date, party_type, party, company)
}
return out
def get_company_currency():
company_currency = frappe._dict()
for d in frappe.get_all("Company", fields=["name", "default_currency"]):
company_currency.setdefault(d.name, d.default_currency)
return company_currency
@frappe.whitelist()
def get_party_account(party_type, party, company):
"""Returns the account for the given `party`.
Will first search in party (Customer / Supplier) record, if not found,
will search in group (Customer Group / Supplier Type),
finally will return default."""
if not company:
frappe.throw(_("Please select a Company"))
if party:
account = frappe.db.get_value("Party Account",
{"parenttype": party_type, "parent": party, "company": company}, "account")
if not account:
party_group_doctype = "Customer Group" if party_type=="Customer" else "Supplier Type"
group = frappe.db.get_value(party_type, party, scrub(party_group_doctype))
account = frappe.db.get_value("Party Account",
{"parenttype": party_group_doctype, "parent": group, "company": company}, "account")
if not account:
default_account_name = "default_receivable_account" if party_type=="Customer" else "default_payable_account"
account = frappe.db.get_value("Company", company, default_account_name)
return account
def get_party_account_currency(party_type, party, company):
def generator():
party_account = get_party_account(party_type, party, company)
return frappe.db.get_value("Account", party_account, "account_currency")
return frappe.local_cache("party_account_currency", (party_type, party, company), generator)
def get_party_gle_currency(party_type, party, company):
def generator():
existing_gle_currency = frappe.db.sql("""select account_currency from `tabGL Entry`
where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s
limit 1""", { "company": company, "party_type": party_type, "party": party })
return existing_gle_currency[0][0] if existing_gle_currency else None
return frappe.local_cache("party_gle_currency", (party_type, party, company), generator,
regenerate_if_none=True)
def validate_party_gle_currency(party_type, party, company, party_account_currency=None):
"""Validate party account currency with existing GL Entry's currency"""
if not party_account_currency:
party_account_currency = get_party_account_currency(party_type, party, company)
existing_gle_currency = get_party_gle_currency(party_type, party, company)
if existing_gle_currency and party_account_currency != existing_gle_currency:
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(party_type, party, existing_gle_currency), InvalidAccountCurrency)
def validate_party_accounts(doc):
companies = []
for account in doc.get("accounts"):
if account.company in companies:
frappe.throw(_("There can only be 1 Account per Company in {0} {1}")
.format(doc.doctype, doc.name), DuplicatePartyAccountError)
else:
companies.append(account.company)
party_account_currency = frappe.db.get_value("Account", account.account, "account_currency")
existing_gle_currency = get_party_gle_currency(doc.doctype, doc.name, account.company)
if existing_gle_currency and party_account_currency != existing_gle_currency:
frappe.throw(_("Accounting entries have already been made in currency {0} for company {1}. Please select a receivable or payable account with currency {0}.").format(existing_gle_currency, account.company))
@frappe.whitelist()
def get_due_date(posting_date, party_type, party, company):
"""Set Due Date = Posting Date + Credit Days"""
due_date = None
if posting_date and party:
due_date = posting_date
credit_days_based_on, credit_days = get_credit_days(party_type, party, company)
if credit_days_based_on == "Fixed Days" and credit_days:
due_date = add_days(posting_date, credit_days)
elif credit_days_based_on == "Last Day of the Next Month":
due_date = (get_first_day(posting_date, 0, 2) + datetime.timedelta(-1)).strftime("%Y-%m-%d")
return due_date
def get_credit_days(party_type, party, company):
if party_type and party:
if party_type == "Customer":
credit_days_based_on, credit_days, customer_group = \
frappe.db.get_value(party_type, party, ["credit_days_based_on", "credit_days", "customer_group"])
else:
credit_days_based_on, credit_days, supplier_type = \
frappe.db.get_value(party_type, party, ["credit_days_based_on", "credit_days", "supplier_type"])
if not credit_days_based_on:
if party_type == "Customer":
credit_days_based_on, credit_days = \
frappe.db.get_value("Customer Group", customer_group, ["credit_days_based_on", "credit_days"]) \
or frappe.db.get_value("Company", company, ["credit_days_based_on", "credit_days"])
else:
credit_days_based_on, credit_days = \
frappe.db.get_value("Supplier Type", supplier_type, ["credit_days_based_on", "credit_days"])\
or frappe.db.get_value("Company", company, ["credit_days_based_on", "credit_days"] )
return credit_days_based_on, credit_days
def validate_due_date(posting_date, due_date, party_type, party, company):
if getdate(due_date) < getdate(posting_date):
frappe.throw(_("Due Date cannot be before Posting Date"))
else:
default_due_date = get_due_date(posting_date, party_type, party, company)
if not default_due_date:
return
if default_due_date != posting_date and getdate(due_date) > getdate(default_due_date):
is_credit_controller = frappe.db.get_single_value("Accounts Settings", "credit_controller") in frappe.get_roles()
if is_credit_controller:
msgprint(_("Note: Due / Reference Date exceeds allowed customer credit days by {0} day(s)")
.format(date_diff(due_date, default_due_date)))
else:
frappe.throw(_("Due / Reference Date cannot be after {0}").format(formatdate(default_due_date)))
@frappe.whitelist()
def set_taxes(party, party_type, posting_date, company, customer_group=None, supplier_type=None,
billing_address=None, shipping_address=None, use_for_shopping_cart=None):
from erpnext.accounts.doctype.tax_rule.tax_rule import get_tax_template, get_party_details
args = {
party_type.lower(): party,
"customer_group": customer_group,
"supplier_type": supplier_type,
"company": company
}
if billing_address or shipping_address:
args.update(get_party_details(party, party_type, {"billing_address": billing_address, \
"shipping_address": shipping_address }))
else:
args.update(get_party_details(party, party_type))
if party_type=="Customer":
args.update({"tax_type": "Sales"})
else:
args.update({"tax_type": "Purchase"})
if use_for_shopping_cart:
args.update({"use_for_shopping_cart": use_for_shopping_cart})
return get_tax_template(posting_date, args)
def validate_party_frozen_disabled(party_type, party_name):
if party_type and party_name:
party = frappe.db.get_value(party_type, party_name, ["is_frozen", "disabled"], as_dict=True)
if party.disabled:
frappe.throw("{0} {1} is disabled".format(party_type, party_name), PartyDisabled)
elif party.is_frozen:
frozen_accounts_modifier = frappe.db.get_value( 'Accounts Settings', None,'frozen_accounts_modifier')
if not frozen_accounts_modifier in frappe.get_roles():
frappe.throw("{0} {1} is frozen".format(party_type, party_name), PartyFrozen)
|
"""
Test lldb Python event APIs.
"""
from __future__ import print_function
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipIfLinux # llvm.org/pr25924, sometimes generating SIGSEGV
@skipIfDarwin
class EventAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.line = line_number(
'main.c', '// Find the line number of function "c" here.')
@add_test_categories(['pyapi'])
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr23730 Flaky, fails ~1/10 cases")
@skipIfWindows # This is flakey on Windows AND when it fails, it hangs: llvm.org/pr38373
@skipIfNetBSD
def test_listen_for_and_print_event(self):
"""Exercise SBEvent API."""
self.build()
exe = self.getBuildArtifact("a.out")
self.dbg.SetAsync(True)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateByName('c', 'a.out')
listener = lldb.SBListener("my listener")
# Now launch the process, and do not stop at the entry point.
error = lldb.SBError()
process = target.Launch(listener,
None, # argv
None, # envp
None, # stdin_path
None, # stdout_path
None, # stderr_path
None, # working directory
0, # launch flags
False, # Stop at entry
error) # error
self.assertTrue(
process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
# Create an empty event object.
event = lldb.SBEvent()
traceOn = self.TraceOn()
if traceOn:
lldbutil.print_stacktraces(process)
# Create MyListeningThread class to wait for any kind of event.
import threading
class MyListeningThread(threading.Thread):
def run(self):
count = 0
# Let's only try at most 4 times to retrieve any kind of event.
# After that, the thread exits.
while not count > 3:
if traceOn:
print("Try wait for event...")
if listener.WaitForEvent(5, event):
if traceOn:
desc = lldbutil.get_description(event)
print("Event description:", desc)
print("Event data flavor:", event.GetDataFlavor())
print(
"Process state:",
lldbutil.state_type_to_str(
process.GetState()))
print()
else:
if traceOn:
print("timeout occurred waiting for event...")
count = count + 1
listener.Clear()
return
# Let's start the listening thread to retrieve the events.
my_thread = MyListeningThread()
my_thread.start()
# Use Python API to continue the process. The listening thread should be
# able to receive the state changed events.
process.Continue()
# Use Python API to kill the process. The listening thread should be
# able to receive the state changed event, too.
process.Kill()
# Wait until the 'MyListeningThread' terminates.
my_thread.join()
# Shouldn't we be testing against some kind of expectation here?
@add_test_categories(['pyapi'])
@expectedFlakeyLinux("llvm.org/pr23730") # Flaky, fails ~1/100 cases
@skipIfWindows # This is flakey on Windows AND when it fails, it hangs: llvm.org/pr38373
@skipIfNetBSD
def test_wait_for_event(self):
"""Exercise SBListener.WaitForEvent() API."""
self.build()
exe = self.getBuildArtifact("a.out")
self.dbg.SetAsync(True)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateByName('c', 'a.out')
#print("breakpoint:", breakpoint)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Get the debugger listener.
listener = self.dbg.GetListener()
# Now launch the process, and do not stop at entry point.
error = lldb.SBError()
process = target.Launch(listener,
None, # argv
None, # envp
None, # stdin_path
None, # stdout_path
None, # stderr_path
None, # working directory
0, # launch flags
False, # Stop at entry
error) # error
self.assertTrue(error.Success() and process, PROCESS_IS_VALID)
# Create an empty event object.
event = lldb.SBEvent()
self.assertFalse(event, "Event should not be valid initially")
# Create MyListeningThread to wait for any kind of event.
import threading
class MyListeningThread(threading.Thread):
def run(self):
count = 0
# Let's only try at most 3 times to retrieve any kind of event.
while not count > 3:
if listener.WaitForEvent(5, event):
#print("Got a valid event:", event)
#print("Event data flavor:", event.GetDataFlavor())
#print("Event type:", lldbutil.state_type_to_str(event.GetType()))
listener.Clear()
return
count = count + 1
print("Timeout: listener.WaitForEvent")
listener.Clear()
return
# Use Python API to kill the process. The listening thread should be
# able to receive a state changed event.
process.Kill()
# Let's start the listening thread to retrieve the event.
my_thread = MyListeningThread()
my_thread.start()
# Wait until the 'MyListeningThread' terminates.
my_thread.join()
self.assertTrue(event,
"My listening thread successfully received an event")
@skipIfFreeBSD # llvm.org/pr21325
@add_test_categories(['pyapi'])
@expectedFailureAll(
oslist=["linux"],
bugnumber="llvm.org/pr23617 Flaky, fails ~1/10 cases")
@skipIfWindows # This is flakey on Windows AND when it fails, it hangs: llvm.org/pr38373
@expectedFlakeyNetBSD
def test_add_listener_to_broadcaster(self):
"""Exercise some SBBroadcaster APIs."""
self.build()
exe = self.getBuildArtifact("a.out")
self.dbg.SetAsync(True)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c by name 'c'.
breakpoint = target.BreakpointCreateByName('c', 'a.out')
#print("breakpoint:", breakpoint)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
listener = lldb.SBListener("my listener")
# Now launch the process, and do not stop at the entry point.
error = lldb.SBError()
process = target.Launch(listener,
None, # argv
None, # envp
None, # stdin_path
None, # stdout_path
None, # stderr_path
None, # working directory
0, # launch flags
False, # Stop at entry
error) # error
# Create an empty event object.
event = lldb.SBEvent()
self.assertFalse(event, "Event should not be valid initially")
# The finite state machine for our custom listening thread, with an
# initial state of None, which means no event has been received.
# It changes to 'connected' after 'connected' event is received (for remote platforms)
# It changes to 'running' after 'running' event is received (should happen only if the
# currentstate is either 'None' or 'connected')
# It changes to 'stopped' if a 'stopped' event is received (should happen only if the
# current state is 'running'.)
self.state = None
# Create MyListeningThread to wait for state changed events.
# By design, a "running" event is expected following by a "stopped"
# event.
import threading
class MyListeningThread(threading.Thread):
def run(self):
#print("Running MyListeningThread:", self)
# Regular expression pattern for the event description.
pattern = re.compile("data = {.*, state = (.*)}$")
# Let's only try at most 6 times to retrieve our events.
count = 0
while True:
if listener.WaitForEvent(5, event):
desc = lldbutil.get_description(event)
#print("Event description:", desc)
match = pattern.search(desc)
if not match:
break
if match.group(1) == 'connected':
# When debugging remote targets with lldb-server, we
# first get the 'connected' event.
self.context.assertTrue(self.context.state is None)
self.context.state = 'connected'
continue
elif match.group(1) == 'running':
self.context.assertTrue(
self.context.state is None or self.context.state == 'connected')
self.context.state = 'running'
continue
elif match.group(1) == 'stopped':
self.context.assertTrue(
self.context.state == 'running')
# Whoopee, both events have been received!
self.context.state = 'stopped'
break
else:
break
print("Timeout: listener.WaitForEvent")
count = count + 1
if count > 6:
break
listener.Clear()
return
# Use Python API to continue the process. The listening thread should be
# able to receive the state changed events.
process.Continue()
# Start the listening thread to receive the "running" followed by the
# "stopped" events.
my_thread = MyListeningThread()
# Supply the enclosing context so that our listening thread can access
# the 'state' variable.
my_thread.context = self
my_thread.start()
# Wait until the 'MyListeningThread' terminates.
my_thread.join()
# The final judgement. :-)
self.assertTrue(self.state == 'stopped',
"Both expected state changed events received")
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
https://tools.ietf.org/html/rfc7231#section-7.1.4
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import hashlib
import logging
import re
import time
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import (
http_date, parse_etags, parse_http_date_safe, quote_etag,
)
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
logger = logging.getLogger('django.request')
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.get('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in cc_delim_re.split(response['Cache-Control']))
try:
return int(cc['max-age'])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming:
response['ETag'] = quote_etag(hashlib.md5(response.content).hexdigest())
return response
def _precondition_failed(request):
logger.warning(
'Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request,
},
)
return HttpResponse(status=412)
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by Section 4.1 of RFC 7232, as well as
# Last-Modified.
for header in ('Cache-Control', 'Content-Location', 'Date', 'ETag', 'Expires', 'Last-Modified', 'Vary'):
if header in response:
new_response[header] = response[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get('HTTP_IF_MATCH', ''))
if_unmodified_since = request.META.get('HTTP_IF_UNMODIFIED_SINCE')
if if_unmodified_since:
if_unmodified_since = parse_http_date_safe(if_unmodified_since)
if_none_match_etags = parse_etags(request.META.get('HTTP_IF_NONE_MATCH', ''))
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
# Step 1 of section 6 of RFC 7232: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (not if_match_etags and if_unmodified_since and
not _if_unmodified_since_passes(last_modified, if_unmodified_since)):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (not if_none_match_etags and if_modified_since and
not _if_modified_since_passes(last_modified, if_modified_since)):
if request.method in ('GET', 'HEAD'):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in section 3.1 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith('W/'):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in section 3.4 of
RFC 7232.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in section 3.2 of RFC 7232.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ['*']:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip('W/')
etags = (etag.strip('W/') for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in section 3.3 of RFC 7232.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependent names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(force_bytes(value))
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, url.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = hashlib.md5(force_bytes(iri_to_uri(request.build_absolute_uri())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, url.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=', 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
# sqlalchemy/schema.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
import re, inspect
from sqlalchemy import exc, util, dialects
from sqlalchemy.sql import expression, visitors
sqlutil = util.importlater("sqlalchemy.sql", "util")
url = util.importlater("sqlalchemy.engine", "url")
__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index',
'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint',
'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData',
'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault',
'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL',
'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence',
'AddConstraint', 'DropConstraint',
]
__all__.sort()
RETAIN_SCHEMA = util.symbol('retain_schema')
class SchemaItem(visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
quote = None
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent(self)
def _set_parent(self, parent):
"""Associate with this SchemaItem's parent object."""
raise NotImplementedError()
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return "%s()" % self.__class__.__name__
@util.memoized_property
def info(self):
return {}
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
class Table(SchemaItem, expression.TableClause):
"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The Table object constructs a unique instance of itself based on its
name within the given MetaData object. Constructor
arguments are as follows:
:param name: The name of this table as represented in the database.
This property, along with the *schema*, indicates the *singleton
identity* of this table in relation to its parent :class:`MetaData`.
Additional calls to :class:`Table` with the same name, metadata,
and schema name will return the same :class:`Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
:param metadata: a :class:`MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`~sqlalchemy.engine.base.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`PrimaryKeyConstraint`, and :class:`ForeignKeyConstraint`.
:param autoload: Defaults to False: the Columns for this table should
be reflected from the database. Usually there will be no Column
objects in the constructor if this property is set.
:param autoload_with: If autoload==True, this is an optional Engine
or Connection instance to be used for the table reflection. If
``None``, the underlying MetaData's bound connectable will be used.
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param info: A dictionary which defaults to ``{}``. A space to store
application specific data. This must be a dictionary.
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`MetaData`` collection.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The *schema name* for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
:param useexisting: When ``True``, indicates that if this Table is already
present in the given :class:`MetaData`, apply further arguments within
the constructor to the existing :class:`Table`. If this flag is not
set, an error is raised when the parameters of an existing
:class:`Table` are overwritten.
"""
__visit_name__ = 'table'
ddl_events = ('before-create', 'after-create',
'before-drop', 'after-drop')
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
useexisting = kw.pop('useexisting', False)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not useexisting and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'useexisting=True' to redefine "
"options and columns on an existing Table object." % key)
table = metadata.tables[key]
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
metadata.tables[key] = table = object.__new__(cls)
try:
table._init(name, metadata, *args, **kw)
return table
except:
metadata.tables.pop(key)
raise
def __init__(self, *args, **kw):
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
pass
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(name)
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
self.indexes = set()
self.constraints = set()
self._columns = expression.ColumnCollection()
self._set_primary_key(PrimaryKeyConstraint())
self._foreign_keys = util.OrderedSet()
self._extra_dependencies = set()
self.ddl_listeners = util.defaultdict(list)
self.kwargs = {}
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
self.quote = kwargs.pop('quote', None)
self.quote_schema = kwargs.pop('quote_schema', None)
if 'info' in kwargs:
self.info = kwargs.pop('info')
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
if autoload_with:
autoload_with.reflecttable(self,
include_columns=include_columns)
else:
_bind_or_error(metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>").\
reflecttable(self, include_columns=include_columns)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _init_existing(self, *args, **kwargs):
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
schema = kwargs.pop('schema', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns:
for c in self.c:
if c.name not in include_columns:
self.c.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
if 'info' in kwargs:
self.info = kwargs.pop('info')
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
# validate remaining kwargs that they all specify DB prefixes
if len([k for k in kwargs
if not re.match(
r'^(?:%s)_' %
'|'.join(dialects.__all__), k
)
]):
raise TypeError(
"Invalid argument(s) for Table: %r" % kwargs.keys())
self.kwargs.update(kwargs)
def _set_primary_key(self, pk):
if getattr(self, '_primary_key', None) in self.constraints:
self.constraints.remove(self._primary_key)
self._primary_key = pk
self.constraints.add(pk)
for c in pk.columns:
c.primary_key = True
@util.memoized_property
def _autoincrement_column(self):
for col in self.primary_key:
if col.autoincrement and \
isinstance(col.type, types.Integer) and \
not col.foreign_keys and \
isinstance(col.default, (type(None), Sequence)):
return col
@property
def key(self):
return _get_table_key(self.name, self.schema)
@property
def primary_key(self):
return self._primary_key
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a ``Column`` to this ``Table``."""
column._set_parent(self)
def append_constraint(self, constraint):
"""Append a ``Constraint`` to this ``Table``."""
constraint._set_parent(self)
def append_ddl_listener(self, event, listener):
"""Append a DDL event listener to this ``Table``.
The ``listener`` callable will be triggered when this ``Table`` is
created or dropped, either directly before or after the DDL is issued
to the database. The listener may modify the Table, but may not abort
the event itself.
:param event:
One of ``Table.ddl_events``; e.g. 'before-create', 'after-create',
'before-drop' or 'after-drop'.
:param listener:
A callable, invoked with three positional arguments:
:event:
The event currently being handled
:target:
The ``Table`` object being created or dropped
:bind:
The ``Connection`` bueing used for DDL execution.
Listeners are added to the Table's ``ddl_listeners`` attribute.
"""
if event not in self.ddl_events:
raise LookupError(event)
self.ddl_listeners[event].append(listener)
def _set_parent(self, metadata):
metadata.tables[_get_table_key(self.name, self.schema)] = self
self.metadata = metadata
def get_children(self, column_collections=True,
schema_visitor=False, **kw):
if not schema_visitor:
return expression.TableClause.get_children(
self, column_collections=column_collections, **kw)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table,
self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this table.
See also ``metadata.create_all()``.
"""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this table.
See also ``metadata.drop_all()``.
"""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA):
"""Return a copy of this :class:`Table` associated with a different
:class:`MetaData`.
E.g.::
# create two metadata
meta1 = MetaData('sqlite:///querytest.db')
meta2 = MetaData()
# load 'users' from the sqlite engine
users_table = Table('users', meta1, autoload=True)
# create the same Table object for the plain metadata
users_table_2 = users_table.tometadata(meta2)
"""
if schema is RETAIN_SCHEMA:
schema = self.schema
key = _get_table_key(self.name, schema)
if key in metadata.tables:
util.warn("Table '%s' already exists within the given "
"MetaData - not copying." % self.description)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
for c in self.constraints:
args.append(c.copy(schema=schema))
table = Table(
self.name, metadata, schema=schema,
*args, **self.kwargs
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if len(index.columns) == 1 and \
list(index.columns)[0].index:
continue
Index(index.name,
unique=index.unique,
*[table.c[col] for col in index.columns.keys()],
**index.kwargs)
return table
class Column(SchemaItem, expression.ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.AbstractType`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
There is partial support for automatic detection of the
type based on that of a :class:`ForeignKey` associated
with this column, if the type is specified as ``None``.
However, this feature is not fully implemented and
may not function in all cases.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`Constraint`, :class:`ForeignKey`, :class:`ColumnDefault`,
and :class:`Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: This flag may be set to ``False`` to
indicate an integer primary key column that should not be
considered to be the "autoincrement" column, that is
the integer primary key column which generates values
implicitly upon INSERT and whose value is usually returned
via the DBAPI cursor.lastrowid attribute. It defaults
to ``True`` to satisfy the common use case of a table
with a single integer primary key column. If the table
has a composite primary key consisting of more than one
integer column, set this flag to True only on the
column that should be considered "autoincrement".
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT)
* Part of the primary key
* Are not referenced by any foreign keys
* have no server side or client side defaults (with the exception
of Postgresql SERIAL).
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on Postgresql, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior. See the SQLite dialect documentation for
information on SQLite's AUTOINCREMENT.
* The column will be considered to be available as
cursor.lastrowid or equivalent, for those dialects which
"post fetch" newly inserted identifiers after a row has
been inserted (SQLite, MySQL, MS-SQL). It does not have
any effect in this regard for databases that use sequences
to generate primary key identifiers (i.e. Firebird, Postgresql,
Oracle).
:param default: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`ColumnDefault` as
a positional argument.
Contrast this argument to ``server_default`` which creates a
default generator on the database side.
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`Index` construct
instead.
:param info: A dictionary which defaults to ``{}``. A space to store
application specific data. This must be a dictionary.
:param nullable: If set to the default of ``True``, indicates the
column will be rendered as allowing NULL, else it's rendered as
NOT NULL. This parameter is only used when issuing CREATE TABLE
statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`ColumnDefault` as a positional argument with
``for_update=True``.
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`Table` can be specified via an explicit
:class:`PrimaryKeyConstraint` object.
:param server_default: A :class:`FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))0
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a :class:`DefaultClause`
object upon initialization.
Use :class:`FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
:param server_onupdate: A :class:`FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not specify any DDL
and the implementation is left to the database, such as via a
trigger.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`UniqueConstraint` or
:class:`Index` constructs explicitly.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], basestring):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if (isinstance(coltype, types.AbstractType) or
(isinstance(coltype, type) and
issubclass(coltype, types.AbstractType))):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
no_type = type_ is None
super(Column, self).__init__(name, None, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.quote = kwargs.pop('quote', None)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = set()
self.foreign_keys = util.OrderedSet()
self._table_events = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self.proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, types.SchemaType):
self.type._set_parent(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default)
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_default)
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
if not self.foreign_keys and no_type:
raise exc.ArgumentError("'type' is required on Column objects "
"which have no foreign keys.")
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(kwargs.keys()))
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.references(column.table):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" %
self.table.description or "")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if self.name is None:
raise exc.ArgumentError(
"Column must be constructed with a name or assign .name "
"before adding to a Table.")
if self.key is None:
self.key = self.name
if getattr(self, 'table', None) is not None:
raise exc.ArgumentError("this Column already has a table!")
if self.key in table._columns:
col = table._columns.get(self.key)
for fk in col.foreign_keys:
col.foreign_keys.remove(fk)
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, basestring):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index(expression._generated_label('ix_%s' % self._label), self, unique=self.unique)
elif self.unique:
if isinstance(self.unique, basestring):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
for fn in self._table_events:
fn(table, self)
del self._table_events
def _on_table_attach(self, fn):
if self.table is not None:
fn(self.table, self)
else:
self._table_events.add(fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
c = Column(
name=self.name,
type_=self.type,
key = self.key,
primary_key = self.primary_key,
nullable = self.nullable,
unique = self.unique,
quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
info=self.info,
*args
)
if hasattr(self, '_table_events'):
c._table_events = list(self._table_events)
return c
def _make_proxy(self, selectable, name=None):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column) for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError("Cannot initialize a sub-selectable"
" with this Column object until it's 'name' has "
"been assigned.")
c = self._constructor(
name or self.name,
self.type,
key = name or self.key,
primary_key = self.primary_key,
nullable = self.nullable,
quote=self.quote, _proxies=[self], *fk)
c.table = selectable
selectable.columns.add(c)
if self.primary_key:
selectable.primary_key.add(c)
for fn in c._table_events:
fn(selectable, c)
del c._table_events
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate)
if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return expression.ColumnClause.get_children(self, **kwargs)
class ForeignKey(SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`Column` which
in turn is associated with a :class:`Table`. Conversely,
when :class:`ForeignKeyConstraint` is applied to a :class:`Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`ForeignKeyConstraint` object must be used, and applied
to the :class:`Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
initially=None, link_to_name=False):
"""
Construct a column-level FOREIGN KEY.
The :class:`ForeignKey` object when constructed generates a
:class:`ForeignKeyConstraint` which is associated with the parent
:class:`Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`ForeignKeyConstraint` to indicate the constraint should be
generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See that classes' constructor for details.
"""
self._colspec = column
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`ForeignKey` object.
The new :class:`ForeignKey` will not be bound
to any :class:`Column`.
This method is usually used by the internal
copy procedures of :class:`Column`, :class:`Table`,
and :class:`MetaData`.
:param schema: The returned :class:`ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
return ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
def _get_colspec(self, schema=None):
"""Return a string based 'column specification' for this :class:`ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
return schema + "." + self.column.table.name + \
"." + self.column.key
elif isinstance(self._colspec, basestring):
return self._colspec
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
return "%s.%s" % (_column.table.fullname, _column.key)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`Table` is referenced by this :class:`ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`.Column` in the given :class:`.Table`
referenced by this :class:`ForeignKey`.
Returns None if this :class:`ForeignKey` does not reference the given
:class:`Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def column(self):
"""Return the target :class:`.Column` referenced by this :class:`.ForeignKey`.
If this :class:`ForeignKey` was created using a
string-based target column specification, this
attribute will on first access initiate a resolution
process to locate the referenced remote
:class:`.Column`. The resolution process traverses
to the parent :class:`.Column`, :class:`.Table`, and
:class:`.MetaData` to proceed - if any of these aren't
yet present, an error is raised.
"""
# ForeignKey inits its remote column as late as possible, so tables
# can be defined without dependencies
if isinstance(self._colspec, basestring):
# locate the parent table this foreign key is attached to. we
# use the "original" column which our parent column represents
# (its a list of columns/other ColumnElements if the parent
# table is a UNION)
for c in self.parent.base_columns:
if isinstance(c, Column):
parenttable = c.table
break
else:
raise exc.ArgumentError(
"Parent column '%s' does not descend from a "
"table-attached Column" % str(self.parent))
m = self._colspec.split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
(schema, tname, colname) = (None, None, None)
if (len(m) == 1):
tname = m.pop()
else:
colname = m.pop()
tname = m.pop()
if (len(m) > 0):
schema = '.'.join(m)
if _get_table_key(tname, schema) not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key assocated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'" % (self.parent, tname, colname))
table = Table(tname, parenttable.metadata,
mustexist=True, schema=schema)
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not create ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, table.name, key))
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
# propagate TypeEngine to parent if it didn't have one
if isinstance(self.parent.type, types.NullType):
self.parent.type = _column.type
return _column
def _set_parent(self, column):
if hasattr(self, 'parent'):
if self.parent is column:
return
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_table(self, table, column):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
)
self.constraint._elements[self.parent] = self
self.constraint._set_parent(table)
table.foreign_keys.add(self)
class DefaultGenerator(SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
def __repr__(self):
return "DefaultGenerator()"
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, expression.ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and \
not self.is_clause_element and \
not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Backward compat: Wrap callables that don't accept a context."""
if inspect.isfunction(fn):
inspectable = fn
elif inspect.isclass(fn):
inspectable = fn.__init__
elif hasattr(fn, '__call__'):
inspectable = fn.__call__
else:
# probably not inspectable, try anyways.
inspectable = fn
try:
argspec = inspect.getargspec(inspectable)
except TypeError:
return lambda ctx: fn()
positionals = len(argspec[0])
# Py3K compat - no unbound methods
if inspect.ismethod(inspectable) or inspect.isclass(fn):
positionals -= 1
if positionals == 0:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
if positionals - defaulted > 1:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
return fn
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence."""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=None, metadata=None, for_update=False):
super(Sequence, self).__init__(for_update=for_update)
self.name = name
self.start = start
self.increment = increment
self.optional = optional
self.quote = quote
self.schema = schema
self.metadata = metadata
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
def __repr__(self):
return "Sequence(%s)" % ', '.join(
[repr(self.name)] +
["%s=%s" % (k, repr(getattr(self, k)))
for k in ['start', 'increment', 'optional']])
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, table, column):
self.metadata = table.metadata
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst)
class FetchedValue(object):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
"""
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return 'FetchedValue(for_update=%r)' % self.for_update
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
def __init__(self, arg, for_update=False):
util.assert_arg_type(arg, (basestring,
expression.ClauseElement,
expression._TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % \
(self.arg, self.for_update)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
.. deprecated:: 0.6 :class:`.PassiveDefault` is deprecated.
Use :class:`.DefaultClause`.
"""
@util.deprecated("0.6",
":class:`.PassiveDefault` is deprecated. "
"Use :class:`.DefaultClause`.",
False)
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None):
"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
self._create_rule = _create_rule
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.add_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
class ColumnCollectionConstraint(Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(ColumnCollectionConstraint, self).__init__(**kw)
self.columns = expression.ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c)
for c in columns]
if self._pending_colargs and \
isinstance(self._pending_colargs[0], Column) and \
self._pending_colargs[0].table is not None:
self._set_parent(self._pending_colargs[0].table)
def _set_parent(self, table):
super(ColumnCollectionConstraint, self)._set_parent(table)
for col in self._pending_colargs:
if isinstance(col, basestring):
col = table.c[col]
self.columns.add(col)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
return self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
def contains_column(self, col):
return self.columns.contains_column(col)
def __iter__(self):
return iter(self.columns)
def __len__(self):
return len(self.columns)
class CheckConstraint(Constraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, _create_rule=None):
"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct.
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(CheckConstraint, self).\
__init__(name, deferrable, initially, _create_rule)
self.sqltext = expression._literal_as_text(sqltext)
if table is not None:
self._set_parent(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, **kw):
return CheckConstraint(self.sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule)
class ForeignKeyConstraint(Constraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`ForeignKey` to the definition of a :class:`Column` is a shorthand
equivalent for an unnamed, single column :class:`ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None, use_alter=False,
link_to_name=False, table=None):
"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped. This is shorthand for the
usage of :class:`AddConstraint` and :class:`DropConstraint` applied
as "after-create" and "before-drop" events on the MetaData object.
This is normally used to generate/drop constraints on objects that
are mutually dependent on each other.
"""
super(ForeignKeyConstraint, self).\
__init__(name, deferrable, initially)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
if self.name is None and use_alter:
raise exc.ArgumentError("Alterable Constraint requires a name")
self.use_alter = use_alter
self._elements = util.OrderedDict()
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
for col, refcol in zip(columns, refcolumns):
self._elements[col] = ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name
)
if table is not None:
self._set_parent(table)
@property
def columns(self):
return self._elements.keys()
@property
def elements(self):
return self._elements.values()
def _set_parent(self, table):
super(ForeignKeyConstraint, self)._set_parent(table)
for col, fk in self._elements.iteritems():
# string-specified column names now get
# resolved to Column objects
if isinstance(col, basestring):
col = table.c[col]
fk._set_parent(col)
if self.use_alter:
def supports_alter(ddl, event, schema_item, bind, **kw):
return table in set(kw['tables']) and \
bind.dialect.supports_alter
AddConstraint(self, on=supports_alter).\
execute_at('after-create', table.metadata)
DropConstraint(self, on=supports_alter).\
execute_at('before-drop', table.metadata)
def copy(self, **kw):
return ForeignKeyConstraint(
[x.parent.name for x in self._elements.values()],
[x._get_colspec(**kw) for x in self._elements.values()],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
Defines a single column or composite PRIMARY KEY constraint. For a
no-frills primary key, adding ``primary_key=True`` to one or more
``Column`` definitions is a shorthand equivalent for an unnamed single- or
multiple-column PrimaryKeyConstraint.
"""
__visit_name__ = 'primary_key_constraint'
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
table._set_primary_key(self)
def _replace(self, col):
self.columns.replace(col)
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX. For a no-frills, single
column index, adding ``index=True`` to the ``Column`` definition is
a shorthand equivalent for an unnamed, single column Index.
"""
__visit_name__ = 'index'
def __init__(self, name, *columns, **kwargs):
"""Construct an index object.
:param name:
The name of the index
:param \*columns:
Columns to include in the index. All columns must belong to the same
table.
:param unique:
Defaults to False: create a unique index.
:param \**kw:
Other keyword arguments may be interpreted by specific dialects.
"""
self.name = name
self.columns = expression.ColumnCollection()
self.table = None
self.unique = kwargs.pop('unique', False)
self.kwargs = kwargs
for column in columns:
column = _to_schema_column(column)
if self.table is None:
self._set_parent(column.table)
elif column.table != self.table:
# all columns muse be from same table
raise exc.ArgumentError(
"All index columns must be from same table. "
"%s is from %s not %s" %
(column, column.table, self.table))
self.columns.add(column)
def _set_parent(self, table):
self.table = table
table.indexes.add(self)
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
if bind is None:
bind = _bind_or_error(self)
bind.create(self)
return self
def drop(self, bind=None):
if bind is None:
bind = _bind_or_error(self)
bind.drop(self)
def __repr__(self):
return 'Index("%s", %s%s)' % (
self.name,
', '.join(repr(c) for c in self.columns),
(self.unique and ', unique=True') or '')
class MetaData(SchemaItem):
"""A collection of Tables and their associated schema constructs.
Holds a collection of Tables and an optional binding to an ``Engine`` or
``Connection``. If bound, the :class:`~sqlalchemy.schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The `Table` objects themselves are stored in the `metadata.tables`
dictionary.
The ``bind`` property may be assigned to dynamically. A common pattern is
to start unbound and then bind later when an engine is available::
metadata = MetaData()
# define tables
Table('mytable', metadata, ...)
# connect to an engine later, perhaps after loading a URL from a
# configuration file
metadata.bind = an_engine
MetaData is a thread-safe object after tables have been explicitly defined
or loaded via reflection.
.. index::
single: thread safety; MetaData
"""
__visit_name__ = 'metadata'
ddl_events = ('before-create', 'after-create',
'before-drop', 'after-drop')
def __init__(self, bind=None, reflect=False):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
For finer control over loaded tables, use the ``reflect`` method of
``MetaData``.
"""
self.tables = {}
self.bind = bind
self.metadata = self
self.ddl_listeners = util.defaultdict(list)
if reflect:
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True")
self.reflect()
def __repr__(self):
return 'MetaData(%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, basestring):
table_or_key = table_or_key.key
return table_or_key in self.tables
def __getstate__(self):
return {'tables': self.tables}
def __setstate__(self, state):
self.tables = state['tables']
self._bind = None
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An Engine or Connection to which this MetaData is bound.
This property may be assigned an ``Engine`` or ``Connection``, or
assigned a string or URL to automatically create a basic ``Engine``
for this bind with ``create_engine()``.
"""
return self._bind
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, (basestring, url.URL)):
from sqlalchemy import create_engine
self._bind = create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
# TODO: why have clear()/remove() but not all
# other accesors/mutators for the tables dict ?
self.tables.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
# TODO: scan all other tables and remove FK _column
del self.tables[table.key]
@property
def sorted_tables(self):
"""Returns a list of ``Table`` objects sorted in order of
dependency.
"""
return sqlutil.sort_tables(self.tables.itervalues())
def reflect(self, bind=None, schema=None, views=False, only=None):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param schema:
Optional, query and reflect tables from an alterate schema.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
"""
reflect_opts = {'autoload': True}
if bind is None:
bind = _bind_or_error(self)
conn = None
else:
reflect_opts['autoload_with'] = bind
conn = bind.contextual_connect()
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(bind.engine.table_names(schema,
connection=conn))
if views:
available.update(
bind.dialect.get_view_names(conn or bind, schema)
)
current = set(self.tables.iterkeys())
if only is None:
load = [name for name in available if name not in current]
elif util.callable(only):
load = [name for name in available
if name not in current and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' %
(bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if name not in current]
for name in load:
Table(name, self, **reflect_opts)
def append_ddl_listener(self, event, listener):
"""Append a DDL event listener to this ``MetaData``.
The ``listener`` callable will be triggered when this ``MetaData`` is
involved in DDL creates or drops, and will be invoked either before
all Table-related actions or after.
:param event:
One of ``MetaData.ddl_events``; 'before-create', 'after-create',
'before-drop' or 'after-drop'.
:param listener:
A callable, invoked with three positional arguments:
:event:
The event currently being handled
:target:
The ``MetaData`` object being operated upon
:bind:
The ``Connection`` bueing used for DDL execution.
Listeners are added to the MetaData's ``ddl_listeners`` attribute.
Note: MetaData listeners are invoked even when ``Tables`` are created
in isolation. This may change in a future release. I.e.::
# triggers all MetaData and Table listeners:
metadata.create_all()
# triggers MetaData listeners too:
some.table.create()
"""
if event not in self.ddl_events:
raise LookupError(event)
self.ddl_listeners[event].append(listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst, tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst, tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, (basestring, url.URL)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
from sqlalchemy import create_engine
e = create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldnt have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.itervalues():
if hasattr(e, 'dispose'):
e.dispose()
class SchemaVisitor(visitors.ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor':True}
class DDLElement(expression.Executable, expression.ClauseElement):
"""Base class for DDL expression constructs."""
_execution_options = expression.Executable.\
_execution_options.union({'autocommit':True})
target = None
on = None
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`~sqlalchemy.engine.base.Connectable` or
:class:`~sqlalchemy.engine.base.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`~sqlalchemy.engine.base.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(None, target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
def execute_at(self, event, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`MetaData` and :class:`Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
if not hasattr(target, 'ddl_listeners'):
raise exc.ArgumentError(
"%s does not support DDL events" % type(target).__name__)
if event not in target.ddl_events:
raise exc.ArgumentError(
"Unknown event, expected one of (%s), got '%r'" %
(', '.join(target.ddl_events), event))
target.ddl_listeners[event].append(self)
return self
@expression._generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
def __call__(self, event, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(event, target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, (basestring, tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def _should_execute(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, basestring):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects can
be attached to ``Tables`` or ``MetaData`` instances, conditionally
executing SQL as part of the DDL lifecycle of those schema items. Basic
templating support allows a single DDL instance to handle repetitive tasks
for multiple tables.
Examples::
tbl = Table('users', metadata, Column('uid', Integer)) # ...
DDL('DROP TRIGGER users_trigger').execute_at('before-create', tbl)
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE', on='somedb')
spow.execute_at('after-create', tbl)
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's ``context``, if any, will be combined with the standard
substutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`~sqlalchemy.engine.base.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
"""
if not isinstance(statement, basestring):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element
class _CreateDropBase(DDLElement):
"""Base class for DDL constucts that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s %r' % (name, label)
else:
item = name
if isinstance(schemaitem, (MetaData, DDL)):
bindable = "the %s's .bind" % name
else:
bindable = "this %s's .metadata.bind" % name
if msg is None:
msg = "The %s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against. Either execute with an explicit connection or "\
"assign %s to enable implicit execution." % \
(item, bindable)
raise exc.UnboundExecutionError(msg)
return bind
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyjsend',
version='0.1.0',
description='A python jsend library',
long_description=long_description,
url='https://github.com/onceaweeq/py-jsend',
author='Juncheol Cho',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
# What does your project relate to?
keywords='python jsend',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
|
# -*- coding:utf-8 -*-
from ..test_stub import *
class EIP(MINI):
def __init__(self, uri=None, initialized=False):
self.eip_name = None
self.eip_list = []
if initialized:
# if initialized is True, uri should not be None
self.uri = uri
return
super(EIP, self).__init__()
def create_eip(self, name=None, dsc=None, network=None, required_ip=None, view='card'):
self.eip_name = name if name else 'EIP-' + get_time_postfix()
self.eip_list.append(self.eip_name)
network = network if network else os.getenv('l3PublicNetworkName')
test_util.test_logger('Create EIP[%s]' % self.eip_name)
priority_dict = {'l3NetworkUuid': network}
eip_dict = {'name': self.eip_name,
'description': dsc,
'requiredIp': required_ip}
eip_elem = self.create(eip_dict, "eip", view=view, priority_dict=priority_dict)
check_list = [self.eip_name]
if required_ip is not None:
check_list.append(required_ip)
checker = MINICHECKER(self, eip_elem)
checker.eip_check(check_list)
def delete_eip(self, eip_name=None, view='card', corner_btn=True, details_page=False):
eip_name = eip_name if eip_name else self.eip_list
self.delete(eip_name, 'eip', view=view, corner_btn=corner_btn, details_page=details_page)
def eip_binding(self, eip_name, vm_name):
test_util.test_logger("Bind %s to %s" % (eip_name, vm_name))
vm_inv = get_inv(vm_name, "vm")
vm_nic_ip = vm_inv.vmNics[0].ip
self.navigate('eip')
self.more_operate(u'绑定', eip_name)
self.input('resourceUuid', vm_name)
self.input('vmNicUuid', vm_nic_ip)
self.click_ok()
eip_elem = self.get_res_element(eip_name)
checker = MINICHECKER(self, eip_elem)
checker.eip_check([vm_nic_ip])
def eip_unbinding(self, eip_name):
test_util.test_logger("Unbind %s" % eip_name)
self.navigate('eip')
self.more_operate(u'解绑', eip_name)
self.click_ok()
eip_elem = self.get_res_element(eip_name)
assert self.get_detail_info(eip_name, 'eip', u'私网IP:') == '-'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Updates marks for modified results"""
from __future__ import with_statement
import os
import time
from . import paths
from . import repo_walker
from . import submissions
from . import penalty
from . import vmlogging
from .paths import VmcheckerPaths
from .config import StorerCourseConfig
from .coursedb import opening_course_db
from .courselist import CourseList
_logger = vmlogging.create_module_logger('update_db')
def compute_late_penalty(assignment, account, vmcfg):
"""Returns the late submission penalty for this submission
Computes the time penalty for the account, obtains the other
penalties and bonuses from the grade_filename file
and computes the final grade.
"""
# The weights and limit are specific for each assignment
# because you can have different weights and limit per
# assignment
weights = [float(x) for x in vmcfg.assignments().get(assignment, 'PenaltyWeights').split()]
limit = int(vmcfg.assignments().get(assignment, 'PenaltyLimit'))
sss = submissions.Submissions(VmcheckerPaths(vmcfg.root_path()))
upload_time = sss.get_upload_time_struct(assignment, account)
deadline = time.strptime(vmcfg.assignments().get(assignment, 'Deadline'),
penalty.DATE_FORMAT)
holidays = int(vmcfg.get('vmchecker', 'Holidays'))
if holidays != 0:
holiday_start = vmcfg.get('vmchecker', 'HolidayStart') .split(' , ')
holiday_finish = vmcfg.get('vmchecker', 'HolidayFinish').split(' , ')
penalty_value = penalty.compute_penalty(upload_time, deadline, 1,
weights, limit, holiday_start, holiday_finish)[0]
else:
penalty_value = penalty.compute_penalty(upload_time, deadline, 1,
weights, limit)[0]
return (-penalty_value)
def compute_TA_penalty(grade_filename):
"""Compute the penalty assigned by the teaching assistant
The grade_filename file can have any structure.
The only rule is the following: any line that starts with a number
with '-' or '+' is taken into account when computing the grade.
An example for the file:
+0.1 very good comments
-0.2 possible leak of memory on line 234
+0.1 treats exceptions
-0.2 use of magic numbers
"""
if not os.path.exists(grade_filename):
return 0
acc = 0
with open(grade_filename) as handler:
for line in handler.readlines():
words = line.split()
if len(words) == 0:
continue
fst_word = words[0].strip()
try:
# The first line may be of the form: '-1.0: my comment'.
# This make the first word be '-1.0:' and that does not
# parse as a float.
acc += float(fst_word.split(':')[0])
except ValueError:
pass
return acc
def compute_grade(assignment, user, grade_filename, vmcfg):
"""Returns the grade value after applying penalties and bonuses."""
#if the file only contains one word(evaluation status) there's noting to compute
with open(grade_filename) as f:
lines = f.readlines()
if len(lines) == 1 and len(lines[0].split()) == 1:
# only one word in the file!
return lines[0].split()[0]
# Some courses don't grade on a 10 scale, so read the total number
# of points for this assignment
grade = float(vmcfg.assignments().get(assignment, 'TotalPoints'))
grade += compute_TA_penalty(grade_filename)
grade += compute_late_penalty(assignment, user, vmcfg)
#at this point, grade is <= 0 if the homework didn't compile
if grade <= 0:
grade = 0
return grade
def db_save_grade(vmcfg, assignment, account, submission_root,
course_db, ignore_timestamp = False):
"""Updates grade for the account's submission of assignment.
Reads the grade's value only if the file containing the
value was modified since the last update of the DB for this
submission.
"""
grade_filename = paths.submission_results_grade(submission_root)
assignment_id = course_db.get_assignment_id(assignment)
if assignment_id is None:
assignment_id = course_db.add_assignment(assignment)
user_id = None
team_id = None
isTeamAccount = False
# First check if this is a team's mutual account
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
sss = submissions.Submissions(vmpaths)
submitting_user = sss.get_submitting_user(assignment, account)
# First check if this is a team account
team_id = course_db.get_team_id(account)
if team_id is not None:
isTeamAccount = True
if submitting_user is not None:
# If there is a separate submitting user, then this is a team account
isTeamAccount = True
if team_id is None:
team_id = course_db.add_team(account, True)
submitting_user_id = course_db.get_user_id(submitting_user)
if submitting_user_id is None:
submitting_user_id = course_db.add_user(submitting_user)
course_db.add_team_member(submitting_user_id, team_id)
course_db.activate_team_for_assignment(team_id, assignment_id)
if not isTeamAccount:
user_id = course_db.get_user_id(account)
if user_id is None:
user_id = course_db.add_user(account)
db_mtime = course_db.get_grade_mtime(assignment_id, user_id = user_id, team_id = team_id)
if os.path.exists(grade_filename):
# we have the evaluation results for this homework
mtime = os.path.getmtime(grade_filename)
grade = compute_grade(assignment, account, grade_filename, vmcfg)
elif os.path.exists(submission_root):
# we don't have evaluation results, but the homework exists.
# it must be in the tester's queue waiting to be evaluated.
ignore_timestamp = True
grade = submissions.STATUS_QUEUED
mtime = 0
else:
# not evaluated and not even submitted. The student did not
# send anything for this homework, so we don't fill this entry
# in the grade table.
return
# only update grades for newer submissions than those already checked
# or when forced to do so
if db_mtime != mtime or ignore_timestamp:
_logger.debug('Updating %s, %s (%s)', assignment, account, grade_filename)
if not isTeamAccount:
course_db.save_user_grade(assignment_id, user_id, grade, mtime)
else:
course_db.save_team_grade(assignment_id, team_id, grade, mtime)
_logger.info('Updated %s, %s (%s) -- grade=%s', assignment, account, grade_filename, str(grade))
else:
_logger.info('SKIP (no tstamp change) %s, %s (%s)', assignment, account, grade_filename)
def update_grades(course_id, account = None, assignment = None,
ignore_timestamp = False, simulate = False):
"""Update grades based on the given parameters.
@account and @assignment can be used to narrow the search:
* account==None, assignment==None -- compute all grades
* account==None, assignment!=None -- all submissions for the assignment
* account!=None, assignment==None -- all submissions from the account
* account!=None, assignment!=None -- the account's last submission for the assignment
"""
vmcfg = StorerCourseConfig(CourseList().course_config(course_id))
vmpaths = paths.VmcheckerPaths(vmcfg.root_path())
walker = repo_walker.RepoWalker(vmcfg, simulate)
db_file = vmpaths.db_file()
with opening_course_db(db_file, isolation_level="EXCLUSIVE") as course_db:
walker.walk(account, assignment, func=db_save_grade,
args=(course_db, ignore_timestamp))
|
# Copyright (C) 2020 Christopher Gearhart
# [email protected]
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import os
import json
from zipfile import ZipFile
# Blender imports
import bpy
from bpy.props import *
from bpy.types import Operator
# Module imports
from ..functions import *
# ui list item actions
class BRICKER_OT_bool_list_action(Operator):
bl_idname = "bricker.bool_list_action"
bl_label = "Boolean List Action"
bl_options = {"REGISTER", "UNDO"}
################################################
# Blender Operator methods
# @classmethod
# def poll(self, context):
# scn = context.scene
# for cm in scn.cmlist:
# if cm.animated:
# return False
# return True
def execute(self, context):
try:
scn, cm, _ = get_active_context_info(context)
idx = cm.boolean_index
try:
item = cm.booleans[idx]
except IndexError:
pass
if self.action == "REMOVE" and len(cm.booleans) > 0 and idx >= 0:
self.remove_item(context, idx)
elif self.action == "ADD":
self.add_item(context)
elif self.action == "DOWN" and idx < len(cm.booleans) - 1:
self.move_down(context, item)
elif self.action == "UP" and idx >= 1:
self.move_up(context, item)
except:
bricker_handle_exception()
return{"FINISHED"}
###################################################
# class variables
action = EnumProperty(
name="Action",
items=(
("UP", "Up", ""),
("DOWN", "Down", ""),
("REMOVE", "Remove", ""),
("ADD", "Add", ""),
),
default="ADD",
)
#############################################
# class methods
@staticmethod
def add_item(context):
# scn = context.scene
# active_object = context.active_object
# if active_object:
# # if active object isn't on visible layer, don't set it as default source for new model
# if not is_obj_visible_in_viewport(active_object):
# active_object = None
# # if active object is already the source for another model, don't set it as default source for new model
# elif any([cm.source_obj is active_object for cm in scn.cmlist]):
# active_object = None
scn, cm, _ = get_active_context_info(context)
item = cm.booleans.add()
# switch to new cmlist item
cm.boolean_index = len(cm.booleans) - 1
# set item properties
item.idx = cm.boolean_index
item.name = f"Boolean {item.idx}"
item.id = max([bool.id for bool in cm.booleans]) + 1
def remove_item(self, context, idx):
scn, cm, _ = get_active_context_info(context)
if len(cm.booleans) - 1 == scn.cmlist_index:
cm.boolean_index -= 1
cm.booleans.remove(idx)
if cm.boolean_index == -1 and len(cm.booleans) > 0:
cm.boolean_index = 0
# else:
# # run update function of the property
# cm.boolean_index = cm.boolean_index
self.update_idxs(cm.booleans)
def move_down(self, context, item):
scn, cm = get_active_context_info(context)
cm.booleans.move(cm.boolean_index, cm.boolean_index + 1)
cm.boolean_index += 1
self.update_idxs(cm.booleans)
def move_up(self, context, item):
scn, cm = get_active_context_info(context)
cm.booleans.move(cm.boolean_index, cm.boolean_index - 1)
cm.boolean_index -= 1
self.update_idxs(cm.booleans)
@staticmethod
def update_idxs(list):
for i, item in enumerate(list):
item.idx = i
#############################################
|
#!/usr/bin/env python3
# GraviTrax Sound Trigger
# By Mike Cook September 2019
import time
import pygame
import os
import RPi.GPIO as io
pygame.init()
pygame.display.set_caption("GraviTrax Sound Trigger")
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.mixer.quit()
pygame.mixer.init(frequency = 22050, size =- 16, channels = 2, buffer = 512)
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.KEYDOWN, pygame.QUIT, pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP]
)
textHeight=18
font = pygame.font.Font(None, textHeight)
backCol = (160, 160, 160) ; lineCol = (128, 128, 0)
hiCol = (0, 255, 255)
def main():
global screen, lastIn, rows
initIO()
rows = len(inPins)
screen = pygame.display.set_mode([390, 34 + 40*rows], 0, 32)
init() ; pendPlay = [0]*rows
nowIn = [0]*rows; pendTime = [0.0]*rows
drawScreen()
while True: # repeat forever
checkForEvent()
for i in range(0, rows):
nowIn[i] = io.input(inPins[inPin[i]])
if lastIn[i] != nowIn[i]:
lastIn[i] = nowIn[i]
tmatch = trigNum[i]-1 # match trigger
if tmatch == 2:
tmatch = nowIn[i]
if trigNum[i] != 0 and nowIn[i] == tmatch:
pendPlay[i] = soundFX[soundNumber[i]]
pendTime[i] = time.time() + delayTime[i]
for i in range(0, rows): # check what to play now
if pendTime[i] > 0.0 and time.time()>=pendTime[i]:
pendPlay[i].play() ; pendTime[i] = 0.0
def init():
global incRect, decRect, icon, decRect, voiceRect
global inPin, soundNumber, delayTime, triggerRect
global lastIn, trigNum, trigIcon
lastIn = [0]*rows
loadResources()
icon=[pygame.image.load("icons/"+str(i)+".png").convert_alpha()
for i in range(0,2)
]
incRect = [pygame.Rect((0,0),(15,15))]*rows*3
decRect = [pygame.Rect((0,0),(15,15))]*rows*3
for j in range(0,3):
for i in range(0, rows):
incRect[i+j*rows] = pygame.Rect((76 + j*80, 30 + i*40),(15, 15))
decRect[i+j*rows] = pygame.Rect((76 + j*80, 50 + i*40),(15, 15))
triggerRect = [pygame.Rect((0, 0), (20, 20))]*rows
trigNum = [0]*rows
trigIcon = [pygame.image.load("icons/trig"+str(i)+".png").convert_alpha()
for i in range(0,4)
]
voiceRect = [pygame.Rect((0,0), (15,15))]*rows
for i in range(0, rows):
triggerRect[i] = pygame.Rect((10, 36 + 40*i,20, 20))
voiceRect[i] = pygame.Rect((268, 39 + i*40),(100, 20))
sounds = rows + len(soundNames)
inPin = [1]*rows ; soundNumber = [0]*sounds
for i in range(0, rows):
inPin[i] = i
for i in range(0, len(soundNames)):
soundNumber[i] = i
delayTime = [0.0]*rows
def initIO():
global inPins
inPins = [24, 23, 22, 27, 17, 4, 15, 14]
io.setmode(io.BCM); io.setwarnings(False)
io.setup(inPins, io.IN, pull_up_down = io.PUD_UP)
def loadResources():
global soundFX, soundNames
soundNames = ["owl", "Breaking Glass", "ComputerBeeps1",
"CymbalCrash", "Fairydust", "Dog1", "Zoop", "Ya", "Pop"
]
soundFX = [pygame.mixer.Sound("sounds/"+ soundNames[effect]+".wav")
for effect in range(0,len(soundNames))
]
def drawScreen():
screen.fill(backCol)
for i in range(0,len(incRect)): # increment / decrement icons
screen.blit(icon[0], (incRect[i].left,incRect[i].top))
pygame.draw.rect(screen, lineCol, incRect[i],1)
screen.blit(icon[1], (decRect[i].left, decRect[i].top))
pygame.draw.rect(screen, lineCol, decRect[i], 1)
for i in range(0,rows): # draw all triggers
screen.blit(trigIcon[trigNum[i]], (triggerRect[i].left,
triggerRect[i].top)
)
drawWords("Trigger", 5, 8, (0, 0, 0), backCol)
drawWords("GPIO", 70, 8, (0, 0, 0), backCol)
drawWords("Delay", 138, 8, (0, 0, 0), backCol)
drawWords("Sound", 218, 8, (0, 0, 0), backCol)
updateValues()
def updateValues():
for i in range(0,rows):
drawWords(str(inPins[inPin[i]]) + " ", 48, 39 + i*40, (0, 0, 0),
backCol
)
drawWords(" " + str(round(delayTime[i], 1)) + " ", 112, 39 + i*40,
(0, 0, 0), backCol
)
pygame.draw.rect(screen, backCol, voiceRect[i], 0)
drawWords(str(soundNames[soundNumber[i]]), 270, 39 + i*40, (0, 0, 0),
backCol
)
pygame.display.update()
def drawWords(words, x, y, col, backCol) :
textSurface = font.render(words, True, col, backCol)
textRect = textSurface.get_rect()
textRect.left = x # right for align right
textRect.top = y
screen.blit(textSurface, textRect)
return textRect
def handleMouse(pos): # look at mouse down
global pramClick, pramInc, trigClick
#print(pos)
trigClick = -1
for i in range(0, rows):
if triggerRect[i].collidepoint(pos) :
trigClick = i
pygame.draw.rect(screen, hiCol, triggerRect[i], 0)
pygame.display.update()
pramClick = -1
pramInc = 0
for i in range(0, len(incRect)):
if incRect[i].collidepoint(pos):
pramClick = i ; pramInc = 1
pygame.draw.rect(screen, hiCol, incRect[pramClick], 1)
pygame.display.update()
for i in range(0, len(decRect)):
if decRect[i].collidepoint(pos):
pramClick = i ; pramInc = -1
pygame.draw.rect(screen, hiCol, decRect[pramClick], 1)
pygame.display.update()
def handleMouseUp(pos): # look at mouse up
global soundNumber, delayTime, inPin
if trigClick != -1:
trigNum[trigClick] += 1
if trigNum[trigClick] > 3:
trigNum[trigClick] = 0
pygame.draw.rect(screen, backCol, triggerRect[trigClick], 0)
screen.blit(trigIcon[trigNum[trigClick]], (triggerRect[trigClick].left,
triggerRect[trigClick].top))
updateValues()
if pramClick != -1:
if pramClick < rows: # GPIO Coloumn
inPin[pramClick] += pramInc
inPin[pramClick] = constrain(inPin[pramClick], 0, rows-1)
elif pramClick < rows*2: # Delay Coloumn
delayTime[pramClick-rows] += (pramInc / 10)
delayTime[pramClick-rows] = constrain(delayTime[pramClick - rows],
0, 5
)
if delayTime[pramClick - rows] < 0.01:
delayTime[pramClick - rows] = 0
elif pramClick < rows*3: # Sound coloum
soundNumber[pramClick - rows*2] += pramInc
soundNumber[pramClick - rows*2] = constrain(soundNumber[pramClick
- rows*2], 0, len(soundNames)-1)
if pramInc !=0:
if pramInc < 0:
screen.blit(icon[1], (decRect[pramClick].left,
decRect[pramClick].top))
pygame.draw.rect(screen, lineCol, decRect[pramClick],1)
else:
screen.blit(icon[0], (incRect[pramClick].left,
incRect[pramClick].top))
pygame.draw.rect(screen, lineCol, incRect[pramClick], 1)
updateValues()
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def terminate(): # close down the program
pygame.mixer.quit()
pygame.quit() # close pygame
os._exit(1)
def checkForEvent(): # see if we need to quit
event = pygame.event.poll()
if event.type == pygame.QUIT :
terminate()
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE :
terminate()
if event.type == pygame.MOUSEBUTTONDOWN :
handleMouse(pygame.mouse.get_pos())
if event.type == pygame.MOUSEBUTTONUP :
handleMouseUp(pygame.mouse.get_pos())
if __name__ == '__main__':
main()
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import warnings
from datetime import datetime
import numpy as np
import scipy as sp
from os.path import isfile
import itertools as itools
from ..sile import add_sile, get_sile_class, sile_fh_open, sile_raise_write, SileError
from .sile import SileSiesta
from .._help import *
from sisl._internal import set_module
from sisl import constant
from sisl.unit.siesta import units
import sisl._array as _a
from sisl._indices import indices_only
from sisl.utils.ranges import list2str
from sisl.messages import SislError, info, warn
from sisl.utils.mathematics import fnorm
from .binaries import tshsSileSiesta, tsdeSileSiesta
from .binaries import dmSileSiesta, hsxSileSiesta, onlysSileSiesta
from .eig import eigSileSiesta
from .fc import fcSileSiesta
from .fa import faSileSiesta
from .siesta_grid import gridncSileSiesta
from .siesta_nc import ncSileSiesta
from .basis import ionxmlSileSiesta, ionncSileSiesta
from .orb_indx import orbindxSileSiesta
from .struct import structSileSiesta
from .xv import xvSileSiesta
from sisl import Orbital, SphericalOrbital, Atom, AtomGhost, Atoms
from sisl import Geometry, SuperCell, DynamicalMatrix
from sisl.utils.cmd import default_ArgumentParser, default_namespace
from sisl.utils.misc import merge_instances
from sisl.unit.siesta import unit_convert, unit_default, unit_group
__all__ = ['fdfSileSiesta']
_LOGICAL_TRUE = ['.true.', 'true', 'yes', 'y', 't']
_LOGICAL_FALSE = ['.false.', 'false', 'no', 'n', 'f']
_LOGICAL = _LOGICAL_FALSE + _LOGICAL_TRUE
Bohr2Ang = unit_convert('Bohr', 'Ang')
def _listify_str(arg):
if isinstance(arg, str):
return [arg]
return arg
def _track(method, msg):
if method.__self__.track:
info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}")
def _track_file(method, f, msg=None):
if msg is None:
if f.is_file():
msg = f"reading file {f}"
else:
msg = f"could not find file {f}"
if method.__self__.track:
info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}")
@set_module("sisl.io.siesta")
class fdfSileSiesta(SileSiesta):
""" FDF-input file
By supplying base you can reference files in other directories.
By default the ``base`` is the directory given in the file name.
Parameters
----------
filename: str
fdf file
mode : str, optional
opening mode, default to read-only
base : str, optional
base-directory to read output files from.
Examples
--------
>>> fdf = fdfSileSiesta('tmp/RUN.fdf') # reads output files in 'tmp/' folder
>>> fdf = fdfSileSiesta('tmp/RUN.fdf', base='.') # reads output files in './' folder
"""
def _setup(self, *args, **kwargs):
""" Setup the `fdfSileSiesta` after initialization """
self._comment = ['#', '!', ';']
# List of parent file-handles used while reading
# This is because fdf enables inclusion of other files
self._parent_fh = []
# Public key for printing information about where stuff comes from
self.track = kwargs.get("track", False)
def _pushfile(self, f):
if self.dir_file(f).is_file():
self._parent_fh.append(self.fh)
self.fh = self.dir_file(f).open(self._mode)
else:
warn(str(self) + f' is trying to include file: {f} but the file seems not to exist? Will disregard file!')
def _popfile(self):
if len(self._parent_fh) > 0:
self.fh.close()
self.fh = self._parent_fh.pop()
return True
return False
def _seek(self):
""" Closes all files, and starts over from beginning """
try:
while self._popfile():
pass
self.fh.seek(0)
except:
pass
@sile_fh_open()
def includes(self):
""" Return a list of all files that are *included* or otherwise necessary for reading the fdf file """
self._seek()
# In FDF files, %include marks files that progress
# down in a tree structure
def add(f):
f = self.dir_file(f)
if f not in includes:
includes.append(f)
# List of includes
includes = []
l = self.readline()
while l != '':
ls = l.split()
if '%include' == ls[0].lower():
add(ls[1])
self._pushfile(ls[1])
elif '<' in ls:
# TODO, in principle the < could contain
# include if this line is not a %block.
add(ls[ls.index('<')+1])
l = self.readline()
while l == '':
# last line of file
if self._popfile():
l = self.readline()
else:
break
return includes
@sile_fh_open()
def _read_label(self, label):
""" Try and read the first occurence of a key
This will take care of blocks, labels and piped in labels
Parameters
----------
label : str
label to find in the fdf file
"""
self._seek()
def tolabel(label):
return label.lower().replace('_', '').replace('-', '').replace('.', '')
labell = tolabel(label)
def valid_line(line):
ls = line.strip()
if len(ls) == 0:
return False
return not (ls[0] in self._comment)
def process_line(line):
# Split line by spaces
ls = line.split()
if len(ls) == 0:
return None
# Make a lower equivalent of ls
lsl = list(map(tolabel, ls))
# Check if there is a pipe in the line
if '<' in lsl:
idx = lsl.index('<')
# Now there are two cases
# 1. It is a block, in which case
# the full block is piped into the label
# %block Label < file
if lsl[0] == '%block' and lsl[1] == labell:
# Correct line found
# Read the file content, removing any empty and/or comment lines
lines = self.dir_file(ls[3]).open('r').readlines()
return [l.strip() for l in lines if valid_line(l)]
# 2. There are labels that should be read from a subsequent file
# Label1 Label2 < other.fdf
if labell in lsl[:idx]:
# Valid line, read key from other.fdf
return fdfSileSiesta(self.dir_file(ls[idx+1]), base=self._directory)._read_label(label)
# It is not in this line, either key is
# on the RHS of <, or the key could be "block". Say.
return None
# The last case is if the label is the first word on the line
# In that case we have found what we are looking for
if lsl[0] == labell:
return (' '.join(ls[1:])).strip()
elif lsl[0] == '%block':
if lsl[1] == labell:
# Read in the block content
lines = []
# Now read lines
l = self.readline().strip()
while not tolabel(l).startswith('%endblock'):
if len(l) > 0:
lines.append(l)
l = self.readline().strip()
return lines
elif lsl[0] == '%include':
# We have to open a new file
self._pushfile(ls[1])
return None
# Perform actual reading of line
l = self.readline().split('#')[0]
if len(l) == 0:
return None
l = process_line(l)
while l is None:
l = self.readline().split('#')[0]
if len(l) == 0:
if not self._popfile():
return None
l = process_line(l)
return l
@classmethod
def _type(cls, value):
""" Determine the type by the value
Parameters
----------
value : str or list or numpy.ndarray
the value to check for fdf-type
"""
if value is None:
return None
if isinstance(value, list):
# A block, %block ...
return 'B'
if isinstance(value, np.ndarray):
# A list, Label [...]
return 'a'
# Grab the entire line (beside the key)
values = value.split()
if len(values) == 1:
fdf = values[0].lower()
if fdf in _LOGICAL:
# logical
return 'b'
try:
float(fdf)
if '.' in fdf:
# a real number (otherwise an integer)
return 'r'
return 'i'
except:
pass
# fall-back to name with everything
elif len(values) == 2:
# possibly a physical value
try:
float(values[0])
return 'p'
except:
pass
return 'n'
@sile_fh_open()
def type(self, label):
""" Return the type of the fdf-keyword
Parameters
----------
label : str
the label to look-up
"""
self._seek()
return self._type(self._read_label(label))
@sile_fh_open()
def get(self, label, default=None, unit=None, with_unit=False):
""" Retrieve fdf-keyword from the file
Parameters
----------
label : str
the fdf-label to search for
default : optional
if the label is not found, this will be the returned value (default to ``None``)
unit : str, optional
unit of the physical quantity to return
with_unit : bool, optional
whether the physical quantity gets returned with the found unit in the fdf file.
Returns
-------
value : the value of the fdf-label. If the label is a block, a `list` is returned, for
a real value a `float` (or if the default is of `float`), for an integer, an
`int` is returned.
unit : if `with_unit` is true this will contain the associated unit if it is specified
Examples
--------
>>> print(open(...).readlines())
LabeleV 1. eV
LabelRy 1. Ry
Label name
FakeInt 1
%block Hello
line 1
line2
%endblock
>>> fdf.get('LabeleV') == 1. # default unit is eV
>>> fdf.get('LabelRy') == unit.siesta.unit_convert('Ry', 'eV')
>>> fdf.get('LabelRy', unit='Ry') == 1.
>>> fdf.get('LabelRy', with_unit=True) == (1., 'Ry')
>>> fdf.get('FakeInt', '0') == '1'
>>> fdf.get('LabeleV', with_unit=True) == (1., 'eV')
>>> fdf.get('Label', with_unit=True) == 'name' # no unit present on line
>>> fdf.get('Hello') == ['line 1', 'line2']
"""
# Try and read a line
value = self._read_label(label)
# Simply return the default value if not found
if value is None:
return default
# Figure out what it is
t = self._type(value)
# We will only do something if it is a real, int, or physical.
# Else we simply return, as-is
if t == 'r':
if default is None:
return float(value)
t = type(default)
return t(value)
elif t == 'i':
if default is None:
return int(value)
t = type(default)
return t(value)
elif t == 'p':
value = value.split()
if with_unit:
# Simply return, as is. Let the user do whatever.
return float(value[0]), value[1]
if unit is None:
default = unit_default(unit_group(value[1]))
else:
if unit_group(value[1]) != unit_group(unit):
raise ValueError(f"Requested unit for {label} is not the same type. "
"Found/Requested {value[1]}/{unit}'")
default = unit
return float(value[0]) * unit_convert(value[1], default)
elif t == 'b':
return value.lower() in _LOGICAL_TRUE
return value
def set(self, key, value, keep=True):
""" Add the key and value to the FDF file
Parameters
----------
key : str
the fdf-key value to be set in the fdf file
value : str or list of str
the value of the string. If a `str` is passed a regular
fdf-key is used, if a `list` it will be a %block.
keep : bool, optional
whether old flags will be kept in the fdf file. In this case
a time-stamp will be written to show when the key was overwritten.
"""
# To set a key we first need to figure out if it is
# already present, if so, we will add the new key, just above
# the already present key.
top_file = str(self.file)
# 1. find the old value, and thus the file in which it is found
with self:
try:
self.get(key)
# Get the file of the containing data
top_file = str(self.fh.name)
except:
pass
# Ensure that all files are closed
self._seek()
# Now we should re-read and edit the file
lines = open(top_file, 'r').readlines()
def write(fh, value):
if value is None:
return
fh.write(self.print(key, value))
if isinstance(value, str) and '\n' not in value:
fh.write('\n')
# Now loop, write and edit
do_write = True
lkey = key.lower()
with open(top_file, 'w') as fh:
for line in lines:
if self.line_has_key(line, lkey, case=False) and do_write:
write(fh, value)
if keep:
fh.write('# Old value ({})\n'.format(datetime.today().strftime('%Y-%m-%d %H:%M')))
fh.write(f'{line}')
do_write = False
else:
fh.write(line)
if do_write:
write(fh, value)
@staticmethod
def print(key, value):
""" Return a string which is pretty-printing the key+value """
if isinstance(value, list):
s = f'%block {key}'
# if the value has any new-values
has_nl = False
for v in value:
if '\n' in v:
has_nl = True
break
if has_nl:
# copy list, we are going to change it
value = value[:]
# do not skip to next line in next segment
value[-1] = value[-1].replace('\n', '')
s += '\n{}\n'.format(''.join(value))
else:
s += '\n{}\n'.format('\n'.join(value))
s += f'%endblock {key}'
else:
s = f'{key} {value}'
return s
@sile_fh_open()
def write_supercell(self, sc, fmt='.8f', *args, **kwargs):
""" Writes the supercell
Parameters
----------
sc : SuperCell
supercell object to write
fmt : str, optional
precision used to store the lattice vectors
unit : {'Ang', 'Bohr'}
the unit used when writing the data.
"""
sile_raise_write(self)
fmt_str = ' {{0:{0}}} {{1:{0}}} {{2:{0}}}\n'.format(fmt)
unit = kwargs.get('unit', 'Ang').capitalize()
conv = 1.
if unit in ['Ang', 'Bohr']:
conv = unit_convert('Ang', unit)
else:
unit = 'Ang'
# Write out the cell
self._write(f'LatticeConstant 1.0 {unit}\n')
self._write('%block LatticeVectors\n')
self._write(fmt_str.format(*sc.cell[0, :] * conv))
self._write(fmt_str.format(*sc.cell[1, :] * conv))
self._write(fmt_str.format(*sc.cell[2, :] * conv))
self._write('%endblock LatticeVectors\n')
@sile_fh_open()
def write_geometry(self, geometry, fmt='.8f', *args, **kwargs):
""" Writes the geometry
Parameters
----------
geometry : Geometry
geometry object to write
fmt : str, optional
precision used to store the atomic coordinates
unit : {'Ang', 'Bohr', 'fractional', 'frac'}
the unit used when writing the data.
fractional and frac are the same.
"""
sile_raise_write(self)
self.write_supercell(geometry.sc, fmt, *args, **kwargs)
self._write('\n')
self._write(f'NumberOfAtoms {geometry.na}\n')
unit = kwargs.get('unit', 'Ang').capitalize()
is_fractional = unit in ('Frac', 'Fractional')
if is_fractional:
self._write('AtomicCoordinatesFormat Fractional\n')
else:
conv = unit_convert('Ang', unit)
self._write(f'AtomicCoordinatesFormat {unit}\n')
self._write('%block AtomicCoordinatesAndAtomicSpecies\n')
n_species = len(geometry.atoms.atom)
# Count for the species
if is_fractional:
xyz = geometry.fxyz
else:
xyz = geometry.xyz * conv
if fmt[0] == '.':
# Correct for a "same" length of all coordinates
c_max = len(str((f'{{:{fmt}}}').format(xyz.max())))
c_min = len(str((f'{{:{fmt}}}').format(xyz.min())))
fmt = str(max(c_min, c_max)) + fmt
fmt_str = ' {{3:{0}}} {{4:{0}}} {{5:{0}}} {{0}} # {{1:{1}d}}: {{2}}\n'.format(fmt, len(str(len(geometry))))
for ia, a, isp in geometry.iter_species():
self._write(fmt_str.format(isp + 1, ia + 1, a.tag, *xyz[ia, :]))
self._write('%endblock AtomicCoordinatesAndAtomicSpecies\n\n')
# Write out species
# First swap key and value
self._write(f'NumberOfSpecies {n_species}\n')
self._write('%block ChemicalSpeciesLabel\n')
for i, a in enumerate(geometry.atoms.atom):
if isinstance(a, AtomGhost):
self._write(' {} {} {}\n'.format(i + 1, -a.Z, a.tag))
else:
self._write(' {} {} {}\n'.format(i + 1, a.Z, a.tag))
self._write('%endblock ChemicalSpeciesLabel\n')
_write_block = True
def write_block(atoms, append, write_block):
if write_block:
self._write('\n# Constraints\n%block Geometry.Constraints\n')
write_block = False
self._write(f' atom [{atoms}]{append}\n')
return write_block
for d in range(4):
append = {0: '', 1: ' 1. 0. 0.', 2: ' 0. 1. 0.', 3: ' 0. 0. 1.'}.get(d)
n = 'CONSTRAIN' + {0: '', 1: '-x', 2: '-y', 3: '-z'}.get(d)
if n in geometry.names:
idx = list2str(geometry.names[n] + 1).replace('-', ' -- ')
if len(idx) > 200:
info(f"{str(self)}.write_geometry will not write the constraints for {n} (too long line).")
else:
_write_block = write_block(idx, append, _write_block)
if not _write_block:
self._write('%endblock\n')
@staticmethod
def _SpGeom_replace_geom(spgeom, geometry):
""" Replace all atoms in spgeom with the atom in geometry while retaining the number of orbitals
Currently we need some way of figuring out whether the number of atoms and orbitals are
consistent.
Parameters
----------
spgeom : SparseGeometry
the sparse object with attached geometry
geometry : Geometry
geometry to grab atoms from
full_replace : bool, optional
whether the full geometry may be replaced in case ``spgeom.na != geometry.na && spgeom.no == geometry.no``.
This is required when `spgeom` does not contain information about atoms.
"""
if spgeom.na != geometry.na and spgeom.no == geometry.no:
# In this case we cannot compare individiual atoms # of orbitals.
# I.e. we suspect the incoming geometry to be correct.
spgeom._geometry = geometry
return True
elif spgeom.na != geometry.na:
warn('cannot replace geometry due to insufficient information regarding number of '
'atoms and orbitals, ensuring correct geometry failed...')
no_no = spgeom.no == geometry.no
# Loop and make sure the number of orbitals is consistent
for a, idx in geometry.atoms.iter(True):
if len(idx) == 0:
continue
Sa = spgeom.geometry.atoms[idx[0]]
if Sa.no != a.no:
# Make sure the atom we replace with retains the same information
# *except* the number of orbitals.
a = a.__class__(a.Z, Sa.orbital, mass=a.mass, tag=a.tag)
spgeom.geometry.atoms.replace(idx, a)
spgeom.geometry.reduce()
return no_no
def read_supercell_nsc(self, *args, **kwargs):
""" Read supercell size using any method available
Raises
------
SislWarning if none of the files can be read
"""
order = _listify_str(kwargs.pop('order', ['nc', 'ORB_INDX']))
for f in order:
v = getattr(self, '_r_supercell_nsc_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_supercell_nsc, f"found file {f}")
return v
warn('number of supercells could not be read from output files. Assuming molecule cell '
'(no supercell connections)')
return _a.onesi(3)
def _r_supercell_nsc_nc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_supercell_nsc_nc, f)
if f.is_file():
return ncSileSiesta(f).read_supercell_nsc()
return None
def _r_supercell_nsc_orb_indx(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX')
_track_file(self._r_supercell_nsc_orb_indx, f)
if f.is_file():
return orbindxSileSiesta(f).read_supercell_nsc()
return None
def read_supercell(self, output=False, *args, **kwargs):
""" Returns SuperCell object by reading fdf or Siesta output related files.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
output: bool, optional
whether to read supercell from output files (default to read from
the fdf file).
order: list of str, optional
the order of which to try and read the supercell.
By default this is ``['XV', 'nc', 'fdf']`` if `output` is true.
If `order` is present `output` is disregarded.
Examples
--------
>>> fdf = get_sile('RUN.fdf')
>>> fdf.read_supercell() # read from fdf
>>> fdf.read_supercell(True) # read from [XV, nc, fdf]
>>> fdf.read_supercell(order=['nc']) # read from [nc]
>>> fdf.read_supercell(True, order=['nc']) # read from [nc]
"""
if output:
order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf']))
else:
order = _listify_str(kwargs.pop('order', ['fdf']))
for f in order:
v = getattr(self, '_r_supercell_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_supercell, f"found file {f}")
return v
return None
def _r_supercell_fdf(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
s = self.get('LatticeConstant', unit='Ang')
if s is None:
raise SileError('Could not find LatticeConstant in file')
# Read in cell
cell = _a.emptyd([3, 3])
lc = self.get('LatticeVectors')
if lc:
for i in range(3):
cell[i, :] = [float(k) for k in lc[i].split()[:3]]
else:
lc = self.get('LatticeParameters')
if lc:
tmp = [float(k) for k in lc[0].split()[:6]]
cell = SuperCell.tocell(*tmp)
if lc is None:
# the fdf file contains neither the latticevectors or parameters
raise SileError('Could not find LatticeVectors or LatticeParameters block in file')
cell *= s
# When reading from the fdf, the warning should be suppressed
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nsc = self.read_supercell_nsc()
return SuperCell(cell, nsc=nsc)
def _r_supercell_nc(self):
# Read supercell from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_supercell_nc, f)
if f.is_file():
return ncSileSiesta(f).read_supercell()
return None
def _r_supercell_xv(self, *args, **kwargs):
""" Returns `SuperCell` object from the XV file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV')
_track_file(self._r_supercell_xv, f)
if f.is_file():
nsc = self.read_supercell_nsc()
sc = xvSileSiesta(f).read_supercell()
sc.set_nsc(nsc)
return sc
return None
def _r_supercell_struct(self, *args, **kwargs):
""" Returns `SuperCell` object from the STRUCT files """
sc = None
for end in ['STRUCT_NEXT_ITER', 'STRUCT_OUT', 'STRUCT_IN']:
f = self.dir_file(self.get('SystemLabel', default='siesta') + f'.{end}')
_track_file(self._r_supercell_struct, f)
if f.is_file():
nsc = self.read_supercell_nsc()
sc = structSileSiesta(f).read_supercell()
sc.set_nsc(nsc)
break
return sc
def _r_supercell_tshs(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_supercell_tshs, f)
if f.is_file():
return tshsSileSiesta(f).read_supercell()
return None
def _r_supercell_onlys(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS')
_track_file(self._r_supercell_onlys, f)
if f.is_file():
return onlysSileSiesta(f).read_supercell()
return None
def read_force(self, *args, **kwargs):
""" Read forces from the output of the calculation (forces are not defined in the input)
Parameters
----------
order : list of str, optional
the order of the forces we are trying to read, default to ``['FA', 'nc']``
Returns
-------
numpy.ndarray : vector with forces for each of the atoms, along each Cartesian direction
"""
order = _listify_str(kwargs.pop('order', ['FA', 'nc']))
for f in order:
v = getattr(self, '_r_force_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_force) found in file={f}")
return v
return None
def _r_force_fa(self, *args, **kwargs):
""" Read forces from the FA file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FA')
_track_file(self._r_force_fa, f)
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_fac(self, *args, **kwargs):
""" Read forces from the FAC file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FAC')
_track_file(self._r_force_fac, f)
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_tsfa(self, *args, **kwargs):
""" Read forces from the TSFA file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSFA')
_track_file(self._r_force_tsfa, f)
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_tsfac(self, *args, **kwargs):
""" Read forces from the TSFAC file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSFAC')
_track_file(self._r_force_tsfac, f)
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_nc(self, *args, **kwargs):
""" Read forces from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_force_nc, f)
if f.is_file():
return ncSileSiesta(f).read_force()
return None
def read_force_constant(self, *args, **kwargs):
""" Read force constant from the output of the calculation
Returns
-------
force_constant : numpy.ndarray
vector ``[*, 3, 2, *, 3]`` with force constant element for each of the atomic displacements
"""
order = _listify_str(kwargs.pop('order', ['nc', 'FC']))
for f in order:
v = getattr(self, '_r_force_constant_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_force_constant) found in file={f}")
return v
return None
def _r_force_constant_nc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_force_constant_nc, f)
if f.is_file():
if not 'FC' in ncSileSiesta(f).groups:
return None
fc = ncSileSiesta(f).read_force_constant()
return fc
return None
def _r_force_constant_fc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FC')
_track_file(self._r_force_constant_fc, f)
if f.is_file():
na = self.get('NumberOfAtoms', default=None)
return fcSileSiesta(f).read_force_constant(na=na)
return None
def read_fermi_level(self, *args, **kwargs):
""" Read fermi-level from output of the calculation
Parameters
----------
order: list of str, optional
the order of which to try and read the fermi-level.
By default this is ``['nc', 'TSDE', 'TSHS', 'EIG']``.
Returns
-------
Ef : float
fermi-level
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'TSHS', 'EIG']))
for f in order:
v = getattr(self, '_r_fermi_level_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_fermi_level) found in file={f}")
return v
return None
def _r_fermi_level_nc(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_fermi_level_nc, f)
if isfile(f):
return ncSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_tsde(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
_track_file(self._r_fermi_level_tsde, f)
if isfile(f):
return tsdeSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_tshs(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_fermi_level_tshs, f)
if isfile(f):
return tshsSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_eig(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.EIG')
_track_file(self._r_fermi_level_eig, f)
if isfile(f):
return eigSileSiesta(f).read_fermi_level()
return None
def read_dynamical_matrix(self, *args, **kwargs):
""" Read dynamical matrix from output of the calculation
Generally the mass is stored in the basis information output,
but for dynamical matrices it makes sense to let the user control this,
e.g. through the fdf file.
By default the mass will be read from the AtomicMass key in the fdf file
and _not_ from the basis set information.
Parameters
----------
order: list of str, optional
the order of which to try and read the dynamical matrix.
By default this is ``['nc', 'FC']``.
cutoff_dist : float, optional
cutoff value for the distance of the force-constants (everything farther than
`cutoff_dist` will be set to 0 Ang). Default, no cutoff.
cutoff : float, optional
absolute values below the cutoff are considered 0. Defaults to 0. eV/Ang**2.
trans_inv : bool, optional
if true (default), the force-constant matrix will be fixed so that translational
invariance will be enforced
sum0 : bool, optional
if true (default), the sum of forces on atoms for each displacement will be
forced to 0.
hermitian: bool, optional
if true (default), the returned dynamical matrix will be hermitian
Returns
-------
dynamic_matrix : DynamicalMatrix
the dynamical matrix
"""
order = _listify_str(kwargs.pop('order', ['nc', 'FC']))
for f in order:
v = getattr(self, '_r_dynamical_matrix_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_dynamical_matrix) found in file={f}")
return v
return None
def _r_dynamical_matrix_fc(self, *args, **kwargs):
FC = self.read_force_constant(*args, order="FC", **kwargs)
if FC is None:
return None
geom = self.read_geometry()
basis_fdf = self.read_basis(order="fdf")
for i, atom in enumerate(basis_fdf):
geom.atoms.replace(i, atom)
# Get list of FC atoms
FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na))
return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs)
def _r_dynamical_matrix_nc(self, *args, **kwargs):
FC = self.read_force_constant(*args, order=['nc'], **kwargs)
if FC is None:
return None
geom = self.read_geometry(order=['nc'])
basis_fdf = self.read_basis(order="fdf")
for i, atom in enumerate(basis_fdf):
geom.atoms.replace(i, atom)
# Get list of FC atoms
# TODO change to read in from the NetCDF file
FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na))
return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs)
def _dynamical_matrix_from_fc(self, geom, FC, FC_atoms, *args, **kwargs):
# We have the force constant matrix.
# Now handle it...
# FC(OLD) = (n_displ, 3, 2, na, 3)
# FC(NEW) = (n_displ, 3, na, 3)
# In fact, after averaging this becomes the Hessian
FC = FC.sum(axis=2) * 0.5
na_full = FC.shape[2]
hermitian = kwargs.get("hermitian", True)
# Figure out the "original" periodic directions
periodic = geom.nsc > 1
# Create conversion from eV/Ang^2 to correct units
# Further down we are multiplying with [1 / amu]
scale = constant.hbar / units('Ang', 'm') / units('eV amu', 'J kg') ** 0.5
# Cut-off too small values
fc_cut = kwargs.get('cutoff', 0.)
FC = np.where(np.fabs(FC) > fc_cut, FC, 0.)
# Convert the force constant such that a diagonalization returns eV ^ 2
# FC is in [eV / Ang^2]
# Convert the geometry to contain 3 orbitals per atom (x, y, z)
R = kwargs.get('cutoff_dist', -2.)
orbs = [Orbital(R / 2, tag=tag) for tag in 'xyz']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for atom, _ in geom.atoms.iter(True):
new_atom = atom.__class__(atom.Z, orbs, mass=atom.mass, tag=atom.tag)
geom.atoms.replace(atom, new_atom)
# Figure out the supercell indices
# if the displaced atoms equals the length of the geometry
# it means we are not using a supercell.
supercell = kwargs.get('supercell', len(geom) != len(FC_atoms))
if supercell is False:
supercell = [1] * 3
elif supercell is True:
_, supercell = geom.as_primary(FC.shape[0], ret_super=True)
info("{}.read_dynamical_matrix(FC) guessed on a [{}, {}, {}] "
"supercell calculation.".format(str(self), *supercell))
# Convert to integer array
supercell = _a.asarrayi(supercell)
# Reshape to supercell
FC.shape = (FC.shape[0], 3, *supercell, -1, 3)
na_fc = len(FC_atoms)
assert FC.shape[0] == len(FC_atoms)
assert FC.shape[5] == len(geom) // np.prod(supercell)
# Now we are in a problem since the tiling of the geometry
# is not necessarily in x, y, z order.
# Say for users who did:
# geom.tile(*, 2).tile(*, 1).tile(*, 0).write(...)
# then we need to pivot the data to be consistent with the
# supercell information
if np.any(supercell > 1):
# Re-arange FC before we use _fc_correct
# Now we need to figure out how the atoms are laid out.
# It *MUST* either be repeated or tiled (preferentially tiled).
# We have an actual supercell. Lets try and fix it.
# First lets recreate the smallest geometry
sc = geom.sc.cell.copy()
sc[0, :] /= supercell[0]
sc[1, :] /= supercell[1]
sc[2, :] /= supercell[2]
# Ensure nsc is at least an odd number, later down we will symmetrize the FC matrix
nsc = supercell + (supercell + 1) % 2
if R > 0:
# Correct for the optional radius
sc_norm = fnorm(sc)
# R is already "twice" the "orbital" range
nsc_R = 1 + 2 * np.ceil(R / sc_norm).astype(np.int32)
for i in range(3):
nsc[i] = min(nsc[i], nsc_R[i])
del nsc_R
# Construct the minimal unit-cell geometry
sc = SuperCell(sc, nsc=nsc)
# TODO check that the coordinates are in the cell
geom_small = Geometry(geom.xyz[FC_atoms], geom.atoms[FC_atoms], sc)
# Convert the big geometry's coordinates to fractional coordinates of the small unit-cell.
isc_xyz = (geom.xyz.dot(geom_small.sc.icell.T) -
np.tile(geom_small.fxyz, (np.product(supercell), 1)))
axis_tiling = []
offset = len(geom_small)
for _ in (supercell > 1).nonzero()[0]:
first_isc = (np.around(isc_xyz[FC_atoms + offset, :]) == 1.).sum(0)
axis_tiling.append(np.argmax(first_isc))
# Fix the offset and wrap-around
offset = (offset * supercell[axis_tiling[-1]]) % na_full
for i in range(3):
if not i in axis_tiling:
axis_tiling.append(i)
# Now we have the tiling operation, check it sort of matches
geom_tile = geom_small.copy()
for axis in axis_tiling:
geom_tile = geom_tile.tile(supercell[axis], axis)
# Proximity check of 0.01 Ang (TODO add this as an argument)
for ax in range(3):
daxis = geom_tile.xyz[:, ax] - geom.xyz[:, ax]
if not np.allclose(daxis, daxis[0], rtol=0., atol=0.01):
raise SislError(f"{str(self)}.read_dynamical_matrix(FC) could "
"not figure out the tiling method for the supercell")
# Convert the FC matrix to a "rollable" matrix
# This will make it easier to symmetrize
# 0. displaced atoms
# 1. x, y, z (displacements)
# 2. tile-axis_tiling[0]
# 3. tile-axis_tiling[1]
# 4. tile-axis_tiling[2]
# 5. na
# 6. x, y, z (force components)
# order of FC is reversed of the axis_tiling (because of contiguous arrays)
# so reverse
axis_tiling.reverse()
FC.shape = (na_fc, 3, *supercell[axis_tiling], -1, 3)
# now ensure we have the correct order of the supercell
# If the input supercell is
# [-2] [-1] [0] [1] [2]
# we need to convert it to
# [0] [1] [2] [3] [4] [5]
isc_xyz.shape = (*supercell[axis_tiling], na_fc, 3)
for axis in axis_tiling:
nroll = isc_xyz[..., axis].min()
inroll = int(round(nroll))
if inroll != 0:
# offset axis by 2 due to (na_fc, 3, ...)
FC = np.roll(FC, inroll, axis=axis + 2)
FC_atoms -= FC_atoms.min()
# Now swap the [2, 3, 4] dimensions so that we get in order of lattice vectors
# x, y, z
FC = np.transpose(FC, (0, 1, *(axis_tiling.index(i)+2 for i in range(3)), 5, 6))
del axis_tiling
# Now FC is sorted according to the supercell tiling
# TODO this will probably fail if: FC_atoms.size != FC.shape[5]
from ._help import _fc_correct
FC = _fc_correct(FC, trans_inv=kwargs.get("trans_inv", True),
sum0=kwargs.get("sum0", True),
hermitian=hermitian)
# Remove ghost-atoms or atoms with 0 mass!
# TODO check if ghost-atoms should be taken into account in _fc_correct
idx = (geom.atoms.mass == 0.).nonzero()[0]
if len(idx) > 0:
FC = np.delete(FC, idx, axis=5)
geom = geom.remove(idx)
geom.set_nsc([1] * 3)
raise NotImplementedError(f"{self}.read_dynamical_matrix could not reduce geometry "
"since there are atoms with 0 mass.")
# Now we can build the dynamical matrix (it will always be real)
na = len(geom)
if np.all(supercell <= 1):
# also catches supercell == 0
D = sp.sparse.lil_matrix((geom.no, geom.no), dtype=np.float64)
FC = np.squeeze(FC, axis=(2, 3, 4))
# Instead of doing the sqrt in all D = FC (below) we do it here
m = scale / geom.atoms.mass ** 0.5
FC *= m[FC_atoms].reshape(-1, 1, 1, 1) * m.reshape(1, 1, -1, 1)
j_FC_atoms = FC_atoms
idx = _a.arangei(len(FC_atoms))
for ia, fia in enumerate(FC_atoms):
if R > 0:
# find distances between the other atoms to cut-off the distance
idx = geom.close(fia, R=R, atoms=FC_atoms)
idx = indices_only(FC_atoms, idx)
j_FC_atoms = FC_atoms[idx]
for ja, fja in zip(idx, j_FC_atoms):
D[ia*3:(ia+1)*3, ja*3:(ja+1)*3] = FC[ia, :, fja, :]
else:
geom = geom_small
if np.any(np.diff(FC_atoms) != 1):
raise SislError(f"{self}.read_dynamical_matrix(FC) requires the FC atoms to be consecutive!")
# Re-order FC matrix so the FC-atoms are first
if FC.shape[0] != FC.shape[5]:
ordered = _a.arangei(FC.shape[5])
ordered = np.concatenate(FC_atoms, np.delete(ordered, FC_atoms))
FC = FC[:, :, :, :, :, ordered, :]
FC_atoms = _a.arangei(len(FC_atoms))
if FC_atoms[0] != 0:
# TODO we could roll the axis such that the displaced atoms moves into the
# first elements
raise SislError(f"{self}.read_dynamical_matrix(FC) requires the displaced atoms to start from 1!")
# After having done this we can easily mass scale all FC components
m = scale / geom.atoms.mass ** 0.5
FC *= m.reshape(-1, 1, 1, 1, 1, 1, 1) * m.reshape(1, 1, 1, 1, 1, -1, 1)
# Check whether we need to "halve" the equivalent supercell
# This will be present in calculations done on an even number of supercells.
# I.e. for 4 supercells
# [0] [1] [2] [3]
# where in the supercell approach:
# *[2] [3] [0] [1] *[2]
# I.e. since we are double counting [2] we will halve it.
# This is not *exactly* true because depending on the range one should do the symmetry operations.
# However the FC does not contain such symmetry considerations.
for i in range(3):
if supercell[i] % 2 == 1:
# We don't need to do anything
continue
# Figure out the supercell to halve
halve_idx = supercell[i] // 2
if i == 0:
FC[:, :, halve_idx, :, :, :, :] *= 0.5
elif i == 1:
FC[:, :, :, halve_idx, :, :, :] *= 0.5
else:
FC[:, :, :, :, halve_idx, :, :] *= 0.5
# Now create the dynamical matrix
# Currently this will be in lil_matrix (changed in the end)
D = sp.sparse.lil_matrix((geom.no, geom.no_s), dtype=np.float64)
# When x, y, z are negative we simply look-up from the back of the array
# which is exactly what is required
isc_off = geom.sc.isc_off
nxyz, na = geom.no, geom.na
dist = geom.rij
# Now take all positive supercell connections (including inner cell)
nsc = geom.nsc // 2
list_nsc = [range(-x, x + 1) for x in nsc]
iter_FC_atoms = _a.arangei(len(FC_atoms))
iter_j_FC_atoms = iter_FC_atoms
for x, y, z in itools.product(*list_nsc):
isc = isc_off[x, y, z]
aoff = isc * na
joff = isc * nxyz
for ia in iter_FC_atoms:
# Reduce second loop based on radius cutoff
if R > 0:
iter_j_FC_atoms = iter_FC_atoms[dist(ia, aoff + iter_FC_atoms) <= R]
for ja in iter_j_FC_atoms:
D[ia*3:(ia+1)*3, joff+ja*3:joff+(ja+1)*3] += FC[ia, :, x, y, z, ja, :]
D = D.tocsr()
# Remove all zeros
D.eliminate_zeros()
D = DynamicalMatrix.fromsp(geom, D)
if hermitian:
D.finalize()
D = (D + D.transpose()) * 0.5
return D
def read_geometry(self, output=False, *args, **kwargs):
""" Returns Geometry object by reading fdf or Siesta output related files.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
output: bool, optional
whether to read geometry from output files (default to read from
the fdf file).
order: list of str, optional
the order of which to try and read the geometry.
By default this is ``['XV', 'nc', 'fdf', 'TSHS', 'STRUCT']`` if `output` is true
If `order` is present `output` is disregarded.
Examples
--------
>>> fdf = get_sile('RUN.fdf')
>>> fdf.read_geometry() # read from fdf
>>> fdf.read_geometry(True) # read from [XV, nc, fdf]
>>> fdf.read_geometry(order=['nc']) # read from [nc]
>>> fdf.read_geometry(True, order=['nc']) # read from [nc]
"""
##
# NOTE
# When adding more capabilities please check the read_geometry(True, order=...) in this
# code to correct.
##
if output:
order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf', 'TSHS', 'STRUCT']))
else:
order = _listify_str(kwargs.pop('order', ['fdf']))
for f in order:
v = getattr(self, '_r_geometry_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_geometry) found in file={f}")
return v
return None
def _r_geometry_xv(self, *args, **kwargs):
""" Returns `Geometry` object from the XV file """
geom = None
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV')
_track_file(self._r_geometry_xv, f)
if f.is_file():
basis = self.read_basis()
if basis is None:
geom = xvSileSiesta(f).read_geometry(species_Z=False)
else:
geom = xvSileSiesta(f).read_geometry(species_Z=True)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for atom, _ in geom.atoms.iter(True):
geom.atoms.replace(atom, basis[atom.Z-1])
geom.reduce()
nsc = self.read_supercell_nsc()
geom.set_nsc(nsc)
return geom
def _r_geometry_struct(self, *args, **kwargs):
""" Returns `Geometry` object from the STRUCT_* files """
geom = None
for end in ['STRUCT_NEXT_ITER', 'STRUCT_OUT', 'STRUCT_IN']:
f = self.dir_file(self.get('SystemLabel', default='siesta') + f'.{end}')
_track_file(self._r_geometry_struct, f)
if f.is_file():
basis = self.read_basis()
if basis is None:
geom = structSileSiesta(f).read_geometry(species_Z=False)
else:
geom = structSileSiesta(f).read_geometry(species_Z=True)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for atom, _ in geom.atoms.iter(True):
geom.atoms.replace(atom, basis[atom.Z-1])
geom.reduce()
nsc = self.read_supercell_nsc()
geom.set_nsc(nsc)
break
return geom
def _r_geometry_nc(self):
# Read geometry from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_geometry_nc, f)
if f.is_file():
return ncSileSiesta(f).read_geometry()
return None
def _r_geometry_tshs(self):
# Read geometry from <>.TSHS file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_geometry_tshs, f)
if f.is_file():
# Default to a geometry with the correct atomic numbers etc.
return tshsSileSiesta(f).read_geometry(geometry=self.read_geometry(False))
return None
def _r_geometry_fdf(self, *args, **kwargs):
""" Returns Geometry object from the FDF file
NOTE: Interaction range of the Atoms are currently not read.
"""
sc = self.read_supercell(order='fdf')
# No fractional coordinates
is_frac = False
# Read atom scaling
lc = self.get('AtomicCoordinatesFormat', default='Bohr').lower()
if 'ang' in lc or 'notscaledcartesianang' in lc:
s = 1.
elif 'bohr' in lc or 'notscaledcartesianbohr' in lc:
s = Bohr2Ang
elif 'scaledcartesian' in lc:
# the same scaling as the lattice-vectors
s = self.get('LatticeConstant', unit='Ang')
elif 'fractional' in lc or 'scaledbylatticevectors' in lc:
# no scaling of coordinates as that is entirely
# done by the latticevectors
s = 1.
is_frac = True
# If the user requests a shifted geometry
# we correct for this
origo = _a.zerosd([3])
lor = self.get('AtomicCoordinatesOrigin')
if lor:
if kwargs.get('origin', True):
if isinstance(lor, str):
origo = lor.lower()
else:
origo = _a.asarrayd(list(map(float, lor[0].split()[:3]))) * s
# Origo cannot be interpreted with fractional coordinates
# hence, it is not transformed.
# Read atom block
atms = self.get('AtomicCoordinatesAndAtomicSpecies')
if atms is None:
raise SileError('AtomicCoordinatesAndAtomicSpecies block could not be found')
# Read number of atoms and block
# We default to the number of elements in the
# AtomicCoordinatesAndAtomicSpecies block
na = self.get('NumberOfAtoms', default=len(atms))
# Reduce space if number of atoms specified
if na < len(atms):
# align number of atoms and atms array
atms = atms[:na]
elif na > len(atms):
raise SileError('NumberOfAtoms is larger than the atoms defined in the blocks')
elif na == 0:
raise SileError('NumberOfAtoms has been determined to be zero, no atoms.')
# Create array
xyz = _a.emptyd([na, 3])
species = _a.emptyi([na])
for ia in range(na):
l = atms[ia].split()
xyz[ia, :] = [float(k) for k in l[:3]]
species[ia] = int(l[3]) - 1
if is_frac:
xyz = np.dot(xyz, sc.cell)
xyz *= s
# Read the block (not strictly needed, if so we simply set all atoms to H)
atoms = self.read_basis()
if atoms is None:
warn('Block ChemicalSpeciesLabel does not exist, cannot determine the basis (all Hydrogen).')
# Default atom (hydrogen)
atoms = Atom(1)
else:
atoms = [atoms[i] for i in species]
atoms = Atoms(atoms, na=len(xyz))
if isinstance(origo, str):
opt = origo
if opt.startswith('cop'):
origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0)
elif opt.startswith('com'):
# TODO for ghost atoms its mass should not be used
w = atom.mass
w /= w.sum()
origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0, weights=w)
elif opt.startswith('min'):
origo = - np.amin(xyz, 0)
if len(opt) > 4:
opt = opt[4:]
if opt == 'x':
origo[1:] = 0.
elif opt == 'y':
origo[[0, 2]] = 0.
elif opt == 'z':
origo[:2] = 0.
elif opt == 'xy' or opt == 'yx':
origo[2] = 0.
elif opt == 'xz' or opt == 'zx':
origo[1] = 0.
elif opt == 'yz' or opt == 'zy':
origo[0] = 0.
# create geometry
xyz += origo
geom = Geometry(xyz, atoms, sc=sc)
# and finally check for supercell constructs
supercell = self.get('SuperCell')
if supercell is not None:
# we need to expand
# check that we are only dealing with an orthogonal supercell
supercell = np.array([[int(x) for x in line.split()]
for line in supercell])
assert supercell.shape == (3, 3)
# Check it is diagonal
diag = np.diag(supercell)
if not np.allclose(supercell - np.diag(diag), 0):
raise SileError('SuperCell input is not diagonal, currently not implemented in sisl')
# now tile it
for axis, nt in enumerate(diag):
geom = geom.tile(nt, axis)
return geom
def read_grid(self, name, *args, **kwargs):
""" Read grid related information from any of the output files
The order of the readed data is shown below.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
name : str
name of data to read. The list of names correspond to the
Siesta output manual (Rho, TotalPotential, etc.), the strings are
case insensitive.
order: list of str, optional
the order of which to try and read the geometry.
By default this is ``['nc', 'grid.nc', 'bin']`` (bin refers to the binary files)
"""
order = _listify_str(kwargs.pop('order', ['nc', 'grid.nc', 'bin']))
for f in order:
v = getattr(self, '_r_grid_{}'.format(f.lower().replace('.', '_')))(name, *args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_grid) found in file={f}")
return v
return None
def _r_grid_nc(self, name, *args, **kwargs):
# Read grid from the <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_grid_nc, f)
if f.is_file():
# Capitalize correctly
name = {'rho': 'Rho',
'rhoinit': 'RhoInit',
'vna': 'Vna',
'ioch': 'Chlocal',
'chlocal': 'Chlocal',
'toch': 'RhoTot',
'totalcharge': 'RhoTot',
'rhotot': 'RhoTot',
'drho': 'RhoDelta',
'deltarho': 'RhoDelta',
'rhodelta': 'RhoDelta',
'vh': 'Vh',
'electrostaticpotential': 'Vh',
'rhoxc': 'RhoXC',
'vt': 'Vt',
'totalpotential': 'Vt',
'bader': 'RhoBader',
'baderrho': 'RhoBader',
'rhobader': 'RhoBader'
}.get(name.lower())
return ncSileSiesta(f).read_grid(name, **kwargs)
return None
def _r_grid_grid_nc(self, name, *args, **kwargs):
# Read grid from the <>.nc file
name = {'rho': 'Rho',
'rhoinit': 'RhoInit',
'vna': 'Vna',
'ioch': 'Chlocal',
'chlocal': 'Chlocal',
'toch': 'TotalCharge',
'totalcharge': 'TotalCharge',
'rhotot': 'TotalCharge',
'drho': 'DeltaRho',
'deltarho': 'DeltaRho',
'rhodelta': 'DeltaRho',
'vh': 'ElectrostaticPotential',
'electrostaticpotential': 'ElectrostaticPotential',
'rhoxc': 'RhoXC',
'vt': 'TotalPotential',
'totalpotential': 'TotalPotential',
'bader': 'BaderCharge',
'baderrho': 'BaderCharge',
'rhobader': 'BaderCharge'
}.get(name.lower()) + '.grid.nc'
f = self.dir_file(name)
_track_file(self._r_grid_grid_nc, f)
if f.is_file():
grid = gridncSileSiesta(f).read_grid(*args, **kwargs)
grid.set_geometry(self.read_geometry(True))
return grid
return None
def _r_grid_bin(self, name, *args, **kwargs):
# Read grid from the <>.VT/... file
name = {'rho': '.RHO',
'rhoinit': '.RHOINIT',
'vna': '.VNA',
'ioch': '.IOCH',
'chlocal': '.IOCH',
'toch': '.TOCH',
'totalcharge': '.TOCH',
'rhotot': '.TOCH',
'drho': '.DRHO',
'deltarho': '.DRHO',
'rhodelta': '.DRHO',
'vh': '.VH',
'electrostaticpotential': '.VH',
'rhoxc': '.RHOXC',
'vt': '.VT',
'totalpotential': '.VT',
'bader': '.BADER',
'baderrho': '.BADER',
'rhobader': '.BADER'
}.get(name.lower())
f = self.dir_file(self.get('SystemLabel', default='siesta') + name)
_track_file(self._r_grid_bin, f)
if f.is_file():
grid = get_sile_class(f)(f).read_grid(*args, **kwargs)
grid.set_geometry(self.read_geometry(True))
return grid
return None
def read_basis(self, *args, **kwargs):
""" Read the atomic species and figure out the number of atomic orbitals in their basis
The order of the read is shown below.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the basis information.
By default this is ``['nc', 'ion', 'ORB_INDX', 'fdf']``
"""
order = _listify_str(kwargs.pop('order', ['nc', 'ion', 'ORB_INDX', 'fdf']))
for f in order:
v = getattr(self, '_r_basis_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_basis) found in file={f}")
return v
return None
def _r_basis_nc(self):
# Read basis from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_basis_nc, f)
if f.is_file():
return ncSileSiesta(f).read_basis()
return None
def _r_basis_ion(self):
# Read basis from <>.ion.nc file or <>.ion.xml
spcs = self.get('ChemicalSpeciesLabel')
if spcs is None:
# We haven't found the chemical and species label
# so return nothing
return None
# Now spcs contains the block of the chemicalspecieslabel
atoms = [None] * len(spcs)
found_one = False
found_all = True
for spc in spcs:
idx, Z, lbl = spc.split()[:3]
idx = int(idx) - 1 # F-indexing
Z = int(Z)
lbl = lbl.strip()
f = self.dir_file(lbl + ".ext")
# now try and read the basis
if f.with_suffix('.ion.nc').is_file():
atoms[idx] = ionncSileSiesta(f.with_suffix('.ion.nc')).read_basis()
found_one = True
elif f.with_suffix('.ion.xml').is_file():
atoms[idx] = ionxmlSileSiesta(f.with_suffix('.ion.xml')).read_basis()
found_one = True
else:
# default the atom to not have a range, and no associated orbitals
atoms[idx] = Atom(Z=Z, tag=lbl)
found_all = False
if found_one and not found_all:
warn("Siesta basis information could not read all ion.nc/ion.xml files. "
"Only a subset of the basis information is accessible.")
elif not found_one:
return None
return atoms
def _r_basis_orb_indx(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX')
_track_file(self._r_basis_orb_indx, f)
if f.is_file():
info(f"Siesta basis information is read from {f}, the radial functions are not accessible.")
return orbindxSileSiesta(f).read_basis(atoms=self._r_basis_fdf())
return None
def _r_basis_fdf(self):
# Read basis from fdf file
spcs = self.get('ChemicalSpeciesLabel')
if spcs is None:
# We haven't found the chemical and species label
# so return nothing
return None
# We create a dictionary with the different atomic species
# and create defaults with another dictionary.
atoms = [{} for _ in spcs]
pao_basis = self.get("PAO.Basis", default=[])
all_mass = self.get('AtomicMass', default=[])
# default mass
mass = None
# Now spcs contains the block of the chemicalspecieslabel
for spc in spcs:
idx, Z, lbl = spc.split()[:3]
idx = int(idx) - 1 # F-indexing
Z = int(Z)
lbl = lbl.strip()
if len(all_mass) > 0:
for mass_line in all_mass:
s, mass = mass_line.split()
if int(s) - 1 == idx:
mass = float(mass)
break
else:
mass = None
atoms[idx]["Z"] = Z
atoms[idx]["mass"] = mass
atoms[idx]["tag"] = lbl
try:
# Only in some cases can we parse the PAO.Basis block.
# There are many corner cases where we can't parse it
# And the we just don't do anything...
# We don't even warn the user...
atoms[idx]["orbitals"] = self._parse_pao_basis(pao_basis, lbl)
except Exception as e:
pass
# Now check if we can find the orbitals
return [Atom(**atom) for atom in atoms]
@classmethod
def _parse_pao_basis(cls, block, specie=None):
""" Parse the full PAO.Basis block with *optionally* only a single specie
Notes
-----
This parsing of the basis set is not complete, in any sense.
Especially if users requests filtered orbitals.
Parameters
----------
block : list of str or str
the entire PAO.Basis block as read by ``self.get("PAO.Basis")``
specie : str, optional
which specie to parse
Returns
-------
orbitals : list of AtomicOrbital
only if requested `specie` is not None
tag_orbitals : dict
if `specie` is None then a dictionary is returned
"""
if isinstance(block, str):
block = block.splitlines()
if len(block) == 0:
if specie is None:
return []
return {}
# make a copy
block = list(block)
def blockline():
nonlocal block
out = ""
while len(out) == 0:
if len(block) == 0:
return out
out = block.pop(0).split('#')[0].strip(" \n\r\t")
return out
def parse_next():
nonlocal blockline
line = blockline()
if len(line) == 0:
return None
# In this basis parser we don't care about the options for
# the specifications
tag, nl, *_ = line.split()
# now loop orbitals
orbs = []
# we just use a non-physical number to signal it didn't get added
# in siesta it can automatically determine this, we can't... (yet!)
n = 0
for _ in range(int(nl)):
# we have 2 or 3 lines
nl_line = blockline()
rc_line = blockline()
# check if we have contraction in the line
# This is not perfect, but should grab
# contration lines rather than next orbital line.
# This is because the first n=<integer> should never
# contain a ".", whereas the contraction *should*.
if len(block) > 0:
if '.' in block[0].split()[0]:
contract_line = blockline()
# remove n=
nl_line = nl_line.replace("n=", "").split()
# first 3|2: are n?, l, Nzeta
first = int(nl_line.pop(0))
second = int(nl_line.pop(0))
try:
int(nl_line[0])
n = first
l = second
nzeta = int(nl_line.pop(0))
except:
l = first
nzeta = second
# Number of polarizations
npol = 0
while len(nl_line) > 0:
opt = nl_line.pop(0)
if opt == "P":
try:
npol = int(nl_line[0])
nl_line.pop(0)
except:
npol = 1
# now we have everything to build the orbitals etc.
first_zeta = None
for izeta, rc in enumerate(map(float, rc_line.split()), 1):
if rc > 0:
rc *= Bohr2Ang
elif rc == 0:
rc = orbs[-1].R
else:
rc *= -orbs[-1].R
orb = SphericalOrbital(l, None, R=rc)
orbs.extend(orb.toAtomicOrbital(n=n, zeta=izeta))
if izeta == 1:
first_zeta = orb
nzeta -= 1
# In case the final orbitals hasn't been defined.
# They really should be defined in this one, but sometimes it may be
# useful to leave the rc's definitions out.
orb = orbs[-1]
rc = orb.R
for izeta in range(orb.zeta, orb.zeta + nzeta):
orb = SphericalOrbital(l, None, R=rc)
orbs.extend(orb.toAtomicOrbital(n=n, zeta=izeta))
for ipol in range(1, npol+1):
orb = SphericalOrbital(l+1, None, R=first_zeta.R)
orbs.extend(orb.toAtomicOrbital(n=n, zeta=ipol, P=True))
return tag, orbs
atoms = {}
ret = parse_next()
while ret is not None:
atoms[ret[0]] = ret[1]
ret = parse_next()
if specie is None:
return atoms
return atoms[specie]
def _r_add_overlap(self, parent_call, M):
""" Internal routine to ensure that the overlap matrix is read and added to the matrix `M` """
try:
S = self.read_overlap()
# Check for the same sparsity pattern
if np.all(M._csr.col == S._csr.col):
M._csr._D[:, -1] = S._csr._D[:, 0]
else:
raise ValueError
except:
warn(str(self) + f' could not succesfully read the overlap matrix in {parent_call}.')
def read_density_matrix(self, *args, **kwargs):
""" Try and read density matrix by reading the <>.nc, <>.TSDE files, <>.DM (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the density matrix
By default this is ``['nc', 'TSDE', 'DM']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'DM']))
for f in order:
DM = getattr(self, '_r_density_matrix_{}'.format(f.lower()))(*args, **kwargs)
if DM is not None:
_track(self.read_density_matrix, f"found file {f}")
return DM
return None
def _r_density_matrix_nc(self, *args, **kwargs):
""" Try and read the density matrix by reading the <>.nc """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_density_matrix_nc, f)
DM = None
if f.is_file():
# this *should* also contain the overlap matrix
DM = ncSileSiesta(f).read_density_matrix(*args, **kwargs)
return DM
def _r_density_matrix_tsde(self, *args, **kwargs):
""" Read density matrix from the TSDE file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
_track_file(self._r_density_matrix_tsde, f)
DM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
DM = tsdeSileSiesta(f).read_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_density_matrix_tsde', DM)
return DM
def _r_density_matrix_dm(self, *args, **kwargs):
""" Read density matrix from the DM file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.DM')
_track_file(self._r_density_matrix_dm, f)
DM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
DM = dmSileSiesta(f).read_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_density_matrix_dm', DM)
return DM
def read_energy_density_matrix(self, *args, **kwargs):
""" Try and read energy density matrix by reading the <>.nc or <>.TSDE files (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the density matrix
By default this is ``['nc', 'TSDE']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE']))
for f in order:
EDM = getattr(self, '_r_energy_density_matrix_{}'.format(f.lower()))(*args, **kwargs)
if EDM is not None:
_track(self.read_energy_density_matrix, f"found file {f}")
return EDM
return None
def _r_energy_density_matrix_nc(self, *args, **kwargs):
""" Read energy density matrix by reading the <>.nc """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_energy_density_matrix_nc, f)
if f.is_file():
return ncSileSiesta(f).read_energy_density_matrix(*args, **kwargs)
return None
def _r_energy_density_matrix_tsde(self, *args, **kwargs):
""" Read energy density matrix from the TSDE file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
_track_file(self._r_energy_density_matrix_tsde, f)
EDM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
EDM = tsdeSileSiesta(f).read_energy_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_energy_density_matrix_tsde', EDM)
return EDM
def read_overlap(self, *args, **kwargs):
""" Try and read the overlap matrix by reading the <>.nc, <>.TSHS files, <>.HSX, <>.onlyS (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the overlap matrix
By default this is ``['nc', 'TSHS', 'HSX', 'onlyS']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX', 'onlyS']))
for f in order:
v = getattr(self, '_r_overlap_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_overlap, f"found file {f}")
return v
return None
def _r_overlap_nc(self, *args, **kwargs):
""" Read overlap from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_overlap_nc, f)
if f.is_file():
return ncSileSiesta(f).read_overlap(*args, **kwargs)
return None
def _r_overlap_tshs(self, *args, **kwargs):
""" Read overlap from the TSHS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_overlap_tshs, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
S = tshsSileSiesta(f).read_overlap(*args, **kwargs)
return S
def _r_overlap_hsx(self, *args, **kwargs):
""" Read overlap from the HSX file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX')
_track_file(self._r_overlap_hsx, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
S = hsxSileSiesta(f).read_overlap(*args, **kwargs)
return S
def _r_overlap_onlys(self, *args, **kwargs):
""" Read overlap from the onlyS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS')
_track_file(self._r_overlap_onlys, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
S = onlysSileSiesta(f).read_overlap(*args, **kwargs)
return S
def read_hamiltonian(self, *args, **kwargs):
""" Try and read the Hamiltonian by reading the <>.nc, <>.TSHS files, <>.HSX (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the Hamiltonian.
By default this is ``['nc', 'TSHS', 'HSX']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX']))
for f in order:
H = getattr(self, '_r_hamiltonian_{}'.format(f.lower()))(*args, **kwargs)
if H is not None:
_track(self.read_hamiltonian, f"found file {f}")
return H
return None
def _r_hamiltonian_nc(self, *args, **kwargs):
""" Read Hamiltonian from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_hamiltonian_nc, f)
if f.is_file():
return ncSileSiesta(f).read_hamiltonian(*args, **kwargs)
return None
def _r_hamiltonian_tshs(self, *args, **kwargs):
""" Read Hamiltonian from the TSHS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_hamiltonian_tshs, f)
H = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
H = tshsSileSiesta(f).read_hamiltonian(*args, **kwargs)
return H
def _r_hamiltonian_hsx(self, *args, **kwargs):
""" Read Hamiltonian from the HSX file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX')
_track_file(self._r_hamiltonian_hsx, f)
H = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
H = hsxSileSiesta(f).read_hamiltonian(*args, **kwargs)
Ef = self.read_fermi_level()
if Ef is None:
info(f"{str(self)}.read_hamiltonian from HSX file failed shifting to the Fermi-level.")
else:
H.shift(-Ef)
return H
@default_ArgumentParser(description="Manipulate a FDF file.")
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
import argparse
# We must by-pass this fdf-file for importing
import sisl.io.siesta as sis
# The fdf parser is more complicated
# It is based on different settings based on the
sp = p.add_subparsers(help="Determine which part of the fdf-file that should be processed.")
# Get the label which retains all the sub-modules
label = self.get('SystemLabel', default='siesta')
f_label = label + ".ext"
def label_file(suffix):
return self.dir_file(f_label).with_suffix(suffix)
# The default on all sub-parsers are the retrieval and setting
d = {
'_fdf': self,
'_fdf_first': True,
}
namespace = default_namespace(**d)
ep = sp.add_parser('edit',
help='Change or read and print data from the fdf file')
# As the fdf may provide additional stuff, we do not add EVERYTHING from
# the Geometry class.
class FDFAdd(argparse.Action):
def __call__(self, parser, ns, values, option_string=None):
key = values[0]
val = values[1]
if ns._fdf_first:
# Append to the end of the file
with ns._fdf as fd:
fd.write('\n\n# SISL added keywords\n')
setattr(ns, '_fdf_first', False)
ns._fdf.set(key, val)
ep.add_argument('--set', '-s', nargs=2, metavar=('KEY', 'VALUE'),
action=FDFAdd,
help='Add a key to the FDF file. If it already exists it will be overwritten')
class FDFGet(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
# Retrieve the value in standard units
# Currently, we write out the unit "as-is"
val = ns._fdf.get(value[0], with_unit=True)
if val is None:
print(f'# {value[0]} is currently not in the FDF file ')
return
if isinstance(val, tuple):
print(ns._fdf.print(value[0], '{} {}'.format(*val)))
else:
print(ns._fdf.print(value[0], val))
ep.add_argument('--get', '-g', nargs=1, metavar='KEY',
action=FDFGet,
help='Print (to stdout) the value of the key in the FDF file.')
# If the XV file exists, it has precedence
# of the contained geometry (we will issue
# a warning in that case)
f = label_file('.XV')
try:
geom = self.read_geometry(True)
tmp_p = sp.add_parser('geom',
help="Edit the contained geometry in the file")
tmp_p, tmp_ns = geom.ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
except:
# Allowed pass due to pythonic reading
pass
f = label_file('.bands')
if f.is_file():
tmp_p = sp.add_parser('band',
help="Manipulate bands file from the Siesta simulation")
tmp_p, tmp_ns = sis.bandsSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PDOS.xml')
if f.is_file():
tmp_p = sp.add_parser('pdos',
help="Manipulate PDOS.xml file from the Siesta simulation")
tmp_p, tmp_ns = sis.pdosSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.EIG')
if f.is_file():
tmp_p = sp.add_parser('eig',
help="Manipulate EIG file from the Siesta simulation")
tmp_p, tmp_ns = sis.eigSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
#f = label + '.FA'
#if isfile(f):
# tmp_p = sp.add_parser('force',
# help="Manipulate FA file from the Siesta simulation")
# tmp_p, tmp_ns = sis.faSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
# namespace = merge_instances(namespace, tmp_ns)
f = label_file('.TBT.nc')
if f.is_file():
tmp_p = sp.add_parser('tbt',
help="Manipulate tbtrans output file")
tmp_p, tmp_ns = sis.tbtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.TBT.Proj.nc')
if f.is_file():
tmp_p = sp.add_parser('tbt-proj',
help="Manipulate tbtrans projection output file")
tmp_p, tmp_ns = sis.tbtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PHT.nc')
if f.is_file():
tmp_p = sp.add_parser('pht',
help="Manipulate the phtrans output file")
tmp_p, tmp_ns = sis.phtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PHT.Proj.nc')
if f.is_file():
tmp_p = sp.add_parser('pht-proj',
help="Manipulate phtrans projection output file")
tmp_p, tmp_ns = sis.phtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.nc')
if f.is_file():
tmp_p = sp.add_parser('nc',
help="Manipulate Siesta NetCDF output file")
tmp_p, tmp_ns = sis.ncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
return p, namespace
add_sile('fdf', fdfSileSiesta, case=False, gzip=True)
|
# -*- coding: utf-8 -*-
import copy
import marshal
import struct
import traceback
from .._compat import PY2, exists, copyreg, integer_types, implements_bool, \
iterkeys, itervalues, iteritems
from .serializers import serializers
long = integer_types[-1]
@implements_bool
class BasicStorage(object):
def __init__(self, *args, **kwargs):
return self.__dict__.__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict__.__getitem__(str(key))
__setitem__ = object.__setattr__
def __delitem__(self, key):
try:
delattr(self, key)
except AttributeError:
raise KeyError(key)
def __bool__(self):
return len(self.__dict__) > 0
__iter__ = lambda self: self.__dict__.__iter__()
__str__ = lambda self: self.__dict__.__str__()
__repr__ = lambda self: self.__dict__.__repr__()
has_key = __contains__ = lambda self, key: key in self.__dict__
def get(self, key, default=None):
return self.__dict__.get(key, default)
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return iterkeys(self.__dict__)
def values(self):
return self.__dict__.values()
def itervalues(self):
return itervalues(self.__dict__)
def items(self):
return self.__dict__.items()
def iteritems(self):
return iteritems(self.__dict__)
pop = lambda self, *args, **kwargs: self.__dict__.pop(*args, **kwargs)
clear = lambda self, *args, **kwargs: self.__dict__.clear(*args, **kwargs)
copy = lambda self, *args, **kwargs: self.__dict__.copy(*args, **kwargs)
def pickle_basicstorage(s):
return BasicStorage, (dict(s),)
copyreg.pickle(BasicStorage, pickle_basicstorage)
class Serializable(object):
def as_dict(self, flat=False, sanitize=True):
return self.__dict__
def as_xml(self, sanitize=True):
return serializers.xml(self.as_dict(flat=True, sanitize=sanitize))
def as_json(self, sanitize=True):
return serializers.json(self.as_dict(flat=True, sanitize=sanitize))
def as_yaml(self, sanitize=True):
return serializers.yaml(self.as_dict(flat=True, sanitize=sanitize))
class Reference(long):
def __allocate(self):
if not self._record:
self._record = self._table[long(self)]
if not self._record:
raise RuntimeError(
"Using a recursive select but encountered a broken " +
"reference: %s %d" % (self._table, long(self))
)
def __getattr__(self, key):
if key == 'id':
return long(self)
if key in self._table:
self.__allocate()
if self._record:
# to deal with case self.update_record()
return self._record.get(key, None)
else:
return None
def get(self, key, default=None):
return self.__getattr__(key, default)
def __setattr__(self, key, value):
if key.startswith('_'):
long.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == 'id':
return long(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self, key, value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(long(data))
except AttributeError:
marshal_dump = 'i%s' % struct.pack('<i', long(data))
return (Reference_unpickler, (marshal_dump,))
copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.dal
"""
def __init__(self, table):
self._table = table
def __str__(self):
return ', '.join([str(field) for field in self._table])
class SQLCustomType(object):
"""
Allows defining of custom SQL types
Args:
type: the web2py type (default = 'string')
native: the backend type
encoder: how to encode the value to store it in the backend
decoder: how to decode the value retrieved from the backend
validator: what validators to use ( default = None, will use the
default validator for type)
Example::
Define as:
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
"""
def __init__(self, type='string', native=None, encoder=None, decoder=None,
validator=None, _class=None, widget=None, represent=None):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
self.widget = widget
self.represent = represent
def startswith(self, text=None):
try:
return self.type.startswith(self, text)
except TypeError:
return False
def endswith(self, text=None):
try:
return self.type.endswith(self, text)
except TypeError:
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class RecordUpdater(object):
def __init__(self, colset, table, id):
self.colset, self.db, self.tablename, self.id = \
colset, table._db, table._tablename, id
def __call__(self, **fields):
colset, db, tablename, id = self.colset, self.db, self.tablename, \
self.id
table = db[tablename]
newfields = fields or dict(colset)
for fieldname in list(newfields.keys()):
if fieldname not in table.fields or table[fieldname].type == 'id':
del newfields[fieldname]
table._db(table._id == id, ignore_common_filters=True).update(
**newfields
)
colset.update(newfields)
return colset
class RecordDeleter(object):
def __init__(self, table, id):
self.db, self.tablename, self.id = table._db, table._tablename, id
def __call__(self):
return self.db(self.db[self.tablename]._id == self.id).delete()
class MethodAdder(object):
def __init__(self, table):
self.table = table
def __call__(self):
return self.register()
def __getattr__(self, method_name):
return self.register(method_name)
def register(self, method_name=None):
def _decorated(f):
instance = self.table
import types
if PY2:
method = types.MethodType(f, instance, instance.__class__)
else:
method = types.MethodType(f, instance)
name = method_name or f.func_name
setattr(instance, name, method)
return f
return _decorated
class FakeCursor(object):
'''
The Python Database API Specification has a cursor() method, which
NoSql drivers generally don't support. If the exception in this
function is taken then it likely means that some piece of
functionality has not yet been implemented in the driver. And
something is using the cursor.
https://www.python.org/dev/peps/pep-0249/
'''
def warn_bad_usage(self, attr):
raise Exception("FakeCursor.%s is not implemented" % attr)
def __getattr__(self, attr):
self.warn_bad_usage(attr)
def __setattr__(self, attr, value):
self.warn_bad_usage(attr)
class NullCursor(FakeCursor):
lastrowid = 1
def __getattr__(self, attr):
return lambda *a, **b: []
class FakeDriver(BasicStorage):
def __init__(self, *args, **kwargs):
super(FakeDriver, self).__init__(*args, **kwargs)
self._build_cursor_()
def _build_cursor_(self):
self._fake_cursor_ = FakeCursor()
def cursor(self):
return self._fake_cursor_
def close(self):
return None
def commit(self):
return None
def __str__(self):
state = ["%s=%r" % (attribute, value)
for (attribute, value) in self.items()]
return '\n'.join(state)
class NullDriver(FakeDriver):
def _build_cursor_(self):
self._fake_cursor_ = NullCursor()
class DatabaseStoredFile:
web2py_filesystems = set()
def escape(self, obj):
return self.db._adapter.escape(obj)
@staticmethod
def try_create_web2py_filesystem(db):
if db._uri not in DatabaseStoredFile.web2py_filesystems:
if db._adapter.dbengine == 'mysql':
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;"
elif db._adapter.dbengine in ('postgres', 'sqlite'):
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));"
db.executesql(sql)
DatabaseStoredFile.web2py_filesystems.add(db._uri)
def __init__(self, db, filename, mode):
if db._adapter.dbengine not in ('mysql', 'postgres', 'sqlite'):
raise RuntimeError(
"only MySQL/Postgres/SQLite can store metadata .table files" +
" in database for now")
self.db = db
self.filename = filename
self.mode = mode
DatabaseStoredFile.try_create_web2py_filesystem(db)
self.p = 0
self.data = ''
if mode in ('r', 'rw', 'rb', 'a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
% filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
elif exists(filename):
datafile = open(filename, 'r')
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ('r', 'rw'):
raise RuntimeError("File %s does not exist" % filename)
def read(self, bytes=None):
if bytes is None:
bytes = len(self.data)
data = self.data[self.p:self.p+bytes]
self.p += len(data)
return data
def readline(self):
i = self.data.find('\n', self.p)+1
if i > 0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
def write(self, data):
self.data += data
def close_connection(self):
if self.db is not None:
self.db.executesql(
"DELETE FROM web2py_filesystem WHERE path='%s'" %
self.filename
)
query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
% (self.filename, self.data.replace("'", "''"))
self.db.executesql(query)
self.db.commit()
self.db = None
def close(self):
self.close_connection()
@staticmethod
def exists(db, filename):
if exists(filename):
return True
DatabaseStoredFile.try_create_web2py_filesystem(db)
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
try:
if db.executesql(query):
return True
except Exception as e:
if not (db._adapter.isOperationalError(e) or
db._adapter.isProgrammingError(e)):
raise
# no web2py_filesystem found?
tb = traceback.format_exc()
db.logger.error("Could not retrieve %s\n%s" % (filename, tb))
return False
class UseDatabaseStoredFile:
def file_exists(self, filename):
return DatabaseStoredFile.exists(self.db, filename)
def file_open(self, filename, mode='rb', lock=True):
return DatabaseStoredFile(self.db, filename, mode)
def file_close(self, fileobj):
fileobj.close_connection()
def file_delete(self, filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
|
"""
Custom Authenticator to use Bitbucket OAuth with JupyterHub
"""
import json
import urllib
from tornado.auth import OAuth2Mixin
from tornado import gen, web
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from traitlets import Set
from .oauth2 import OAuthLoginHandler, OAuthenticator
class BitbucketMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://bitbucket.org/site/oauth2/authorize"
_OAUTH_ACCESS_TOKEN_URL = "https://bitbucket.org/site/oauth2/access_token"
class BitbucketLoginHandler(OAuthLoginHandler, BitbucketMixin):
pass
class BitbucketOAuthenticator(OAuthenticator):
login_service = "Bitbucket"
client_id_env = 'BITBUCKET_CLIENT_ID'
client_secret_env = 'BITBUCKET_CLIENT_SECRET'
login_handler = BitbucketLoginHandler
team_whitelist = Set(
config=True,
help="Automatically whitelist members of selected teams",
)
headers = {"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}"
}
@gen.coroutine
def authenticate(self, handler, data=None):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = dict(
client_id=self.client_id,
client_secret=self.client_secret,
grant_type="authorization_code",
code=code,
redirect_uri=self.oauth_callback_url
)
url = url_concat(
"https://bitbucket.org/site/oauth2/access_token", params)
self.log.info(url)
bb_header = {"Content-Type":
"application/x-www-form-urlencoded;charset=utf-8"}
req = HTTPRequest(url,
method="POST",
auth_username=self.client_id,
auth_password=self.client_secret,
body=urllib.parse.urlencode(params).encode('utf-8'),
headers=bb_header
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
self.headers["Authorization"] = self.headers["Authorization"].format(access_token)
# Determine who the logged in user is
req = HTTPRequest("https://api.bitbucket.org/2.0/user",
method="GET",
headers=self.headers
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
return resp_json["username"]
def check_whitelist(self, username, headers=None):
headers = headers if headers else self.headers
if self.team_whitelist:
return self._check_group_whitelist(username, headers)
else:
return self._check_user_whitelist(username)
@gen.coroutine
def _check_user_whitelist(self, user):
return (not self.whitelist) or (user in self.whitelist)
@gen.coroutine
def _check_group_whitelist(self, username, headers=None):
http_client = AsyncHTTPClient()
# We verify the team membership by calling teams endpoint.
# Re-use the headers, change the request.
headers = headers if headers else self.headers
next_page = url_concat("https://api.bitbucket.org/2.0/teams",
{'role': 'member'})
user_teams = set()
while next_page:
req = HTTPRequest(next_page, method="GET", headers=headers)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
next_page = resp_json.get('next', None)
user_teams |= \
set([entry["username"] for entry in resp_json["values"]])
return len(self.team_whitelist & user_teams) > 0
class LocalBitbucketOAuthenticator(LocalAuthenticator,
BitbucketOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-27 15:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Compound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, null=True)),
('modified_on', models.DateTimeField(auto_now=True, null=True)),
('signed_off_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=255, unique=True)),
('smiles', models.CharField(max_length=5000)),
('drugbank_id', models.CharField(blank=True, default=None, max_length=255, null=True, unique=True)),
('pubchem_id', models.CharField(blank=True, default=None, max_length=255, null=True, unique=True)),
('upddi_id', models.CharField(blank=True, default=None, max_length=255, null=True, unique=True)),
('external_id', models.CharField(blank=True, default='', max_length=255)),
('external_id_source', models.CharField(blank=True, default='', max_length=255)),
('vendor', models.CharField(blank=True, default='', max_length=255)),
('catalog_number', models.CharField(blank=True, default='', max_length=255)),
('method', models.CharField(blank=True, choices=[('s', 's'), ('l', 'l')], default='', max_length=50)),
('drug_target', models.CharField(blank=True, default='', max_length=3000)),
('canonical_method_of_action', models.CharField(blank=True, default='', max_length=5000)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='compound_created_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='compound_modified_by', to=settings.AUTH_USER_MODEL)),
('signed_off_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='compound_signed_off_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
try:
# Create an intance
myFunc = NumericalMathFunction(["t", "x"], ["x + t^2"])
myTemporalFunc = TemporalFunction(myFunc)
print("myTemporalFunc=", myTemporalFunc)
# Get the input and output description
print("myTemporalFunc input description=",
myTemporalFunc.getInputDescription())
print("myTemporalFunc output description=",
myTemporalFunc.getOutputDescription())
# Get the input and output dimension, based on description
print("myTemporalFunc input dimension=",
myTemporalFunc.getInputDimension())
print("myTemporalFunc output dimension=",
myTemporalFunc.getOutputDimension())
# Create a TimeSeries
tg = RegularGrid(0.0, 0.2, 6)
data = NumericalSample(tg.getN(), myFunc.getInputDimension() - 1)
for i in range(data.getSize()):
for j in range(data.getDimension()):
data[i, j] = i * data.getDimension() + j
ts = TimeSeries(tg, data)
print("input time series=", ts)
print("output time series=", myTemporalFunc(ts))
# Get the number of calls
print("called ", myTemporalFunc.getCallsNumber(), " times")
except:
import sys
print("t_TemporalFunction_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
# -*- coding: utf-8 -*-
"""
taxmap.taxmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides Flask based template rendering and web service support for
taxmap. Its responsibilities for web service calls includes...
* Handling incoming HTTP requests by parsing arguments
* Call the appropriate service module for data
* Create a response from the data
:license: MIT, see LICENSE for more details.
"""
import argparse
import traceback
from flask import abort
from flask import Flask
from flask import jsonify
from flask import make_response
from flask import render_template
from flask import request
from service import region_service
app = Flask(__name__)
verbose = False
@app.route("/")
def main_page():
return render_template("index.html")
@app.route("/determine_tax_map")
def determine_tax_map():
# Read and validate request arguments
try:
zip = request.args.get("zip", "").strip().lower()
fips_code = region_service.get_fips_code_from_zip(zip)
prgm_name = request.args.get("prgm_name", "").strip()
prgm_cost = int(request.args.get("prgm_cost", 0))
# Ensure the request parameters are valid, otherwise return a 400
if not fips_code or prgm_cost < 1:
abort(400)
if verbose:
print ">>>>>>>>>>>>>>>>>>>>>> determine_tax_map"
print " * ZIP: %s" % zip
print " * Program Name: %s" % prgm_name
print " * Program Cost: %d" % prgm_cost
except Exception,e :
# If there are problems reading the request arguments, then
# the request is bad. Return a 400 HTTP Status Code - Bad
# Request
if verbose:
print " %s" % str(e)
traceback.print_exc()
abort(400)
# Determine the tax map for this zip code
response = region_service.determine_tax_map(zip, prgm_name, prgm_cost)
if verbose:
print response
print "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
return make_response(jsonify(response))
def parse_args():
""" Parse the command line arguments
"""
global verbose
parser = argparse.ArgumentParser(description="taxmap web service")
parser.add_argument("-v", "--verbose", action="store_true",
help="Make the operation talkative")
args = parser.parse_args()
verbose = args.verbose
return args
if __name__ == "__main__":
args = parse_args()
print "-----------------------------------------< taxmap web service >----"
app.run(debug=True) # If running directly from the CLI, run in debug mode.
|
"""
The model - a 2D lattice where agents live and have an opinion
"""
import random
from collections import Counter
from mesa import Model, Agent
from mesa.time import SimultaneousActivation
from mesa.space import Grid
class ColorCell(Agent):
'''
Represents a cell's opinion (visualized by a color)
'''
OPINIONS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
def __init__(self, pos, model, initial_state):
'''
Create a cell, in the given state, at the given row, col position.
'''
super().__init__(pos, model)
self._row = pos[0]
self._col = pos[1]
self._state = initial_state
self._next_state = None
def get_col(self):
'''Return the col location of this cell.'''
return self._col
def get_row(self):
'''Return the row location of this cell.'''
return self._row
def get_state(self):
'''Return the current state (OPINION) of this cell.'''
return self._state
def step(self):
'''
Determines the agent opinion for the next step by polling its neighbors
The opinion is determined by the majority of the 8 neighbors' opinion
A choice is made at random in case of a tie
The next state is stored until all cells have been polled
'''
neighbor_iter_ = self.model.grid.neighbor_iter((self._row, self._col), True)
neighbors_opinion = Counter(n.get_state() for n in neighbor_iter_)
# Following is a a tuple (attribute, occurrences)
polled_opinions = neighbors_opinion.most_common()
tied_opinions = []
for neighbor in polled_opinions:
if neighbor[1] == polled_opinions[0][1]:
tied_opinions.append(neighbor)
self._next_state = random.choice(tied_opinions)[0]
def advance(self):
'''
Set the state of the agent to the next state
'''
self._state = self._next_state
class ColorPatchModel(Model):
'''
represents a 2D lattice where agents live
'''
def __init__(self, width, height):
'''
Create a 2D lattice with strict borders where agents live
The agents next state is first determined before updating the grid
'''
self._grid = Grid(width, height, torus=False)
self._schedule = SimultaneousActivation(self)
# self._grid.coord_iter()
# --> should really not return content + col + row
# -->but only col & row
# for (contents, col, row) in self._grid.coord_iter():
# replaced content with _ to appease linter
for (_, row, col) in self._grid.coord_iter():
cell = ColorCell((row, col), self,
ColorCell.OPINIONS[random.randrange(0, 16)])
self._grid.place_agent(cell, (row, col))
self._schedule.add(cell)
self.running = True
def step(self):
'''
Advance the model one step.
'''
self._schedule.step()
# the following is a temporary fix for the framework classes accessing
# model attributes directly
# I don't think it should
# --> it imposes upon the model builder to use the attributes names that
# the framework expects.
#
# Traceback included in docstrings
@property
def grid(self):
"""
/mesa/visualization/modules/CanvasGridVisualization.py
is directly accessing Model.grid
76 def render(self, model):
77 grid_state = defaultdict(list)
---> 78 for y in range(model.grid.height):
79 for x in range(model.grid.width):
80 cell_objects = model.grid.get_cell_list_contents([(x, y)])
AttributeError: 'ColorPatchModel' object has no attribute 'grid'
"""
return self._grid
@property
def schedule(self):
"""
mesa_ABM/examples_ABM/color_patches/mesa/visualization/ModularVisualization.py",
line 278, in run_model
while self.model.schedule.steps < self.max_steps and self.model.running:
AttributeError: 'NoneType' object has no attribute 'steps'
"""
return self._schedule
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
import model_utils.fields
import datetime
class Migration(migrations.Migration):
dependencies = [
('employee', '0006_auto_20151104_2159'),
]
operations = [
migrations.CreateModel(
name='CoachingSession',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('created', model_utils.fields.AutoCreatedField(verbose_name='created', editable=False, default=django.utils.timezone.now)),
('modified', model_utils.fields.AutoLastModifiedField(verbose_name='modified', editable=False, default=django.utils.timezone.now)),
('start_date_time', models.DateTimeField(default=datetime.datetime(2015, 11, 17, 18, 23, 54, 910572, tzinfo=utc))),
('end_date_time', models.DateTimeField(null=True, blank=True)),
('comments', models.TextField()),
('coach', models.ForeignKey(to='employee.Employee', related_name='coached_sessions')),
('employee', models.ForeignKey(to='employee.Employee', related_name='coaching_sessions')),
],
options={
'abstract': False,
},
),
]
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import hashlib
import random
import string
from sqlalchemy.orm import validates
class User:
def __init__(self, username, password=None, admin=False):
self.username = username
if password is not None:
self.encodeAndSetPassword(password)
self.admin = admin
def encodeAndSetPassword(self, pw):
h = hashlib.new("sha256")
salt = "".join([random.choice(string.letters) for _ in range(32)])
h.update(pw)
h.update(salt)
self.password = ("%s%s" % (h.hexdigest(), salt))
def isPasswordValid(self, pw):
if self.password is None:
return False
salt = self.password[-32:]
h = hashlib.new("sha256")
h.update(pw)
h.update(salt)
return self.password == ("%s%s" % (h.hexdigest(), salt))
@validates("ID", "username", "password", "admin")
def validator(self, key, val):
map = {
"ID" : lambda _val: isinstance(_val, int),
"username": lambda _val: isinstance(_val, str),
"password": lambda _val: isinstance(_val, str) and len(_val) == 96,
"admin" : lambda _val: isinstance(_val, bool)
}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
|
#!/usr/bin/python3
from parents.cls_textbank import ClassTextBank
class TextBankPython(ClassTextBank):
def __init__(self):
self.cls = None #current class
self.cls_string = "" #filled with wrapUpEntity ()
self.static_attributes = ""
self.attributes = ""
self.methods = "" #definitions of methods with empty body
self.param_comments = "" #help variable to store parameter comments
self.abstract_flag = False
self.init_present_flag = False #is the __init__ method present in the diagram
self.assoc_index = 1 #incremented to secure unique names of
#association variables
#--------- FORMAT STRING PATTERNS -----------
# INDENT - set indent for this bank is four spaces
self.indent = " "
# CLASS PATTERN - main pattern to wrap up the whole class definition
# - "{[import]}\n
# class {class_name}:\n{comment}{static_attributes}{method_definitions}\n\n"
self.class_format = "{}\nclass {}:\n{}{}{}\n\n"
# DERIVED CLASS PATTERN -pattern to wrap up derived classes (inherits or implements an interface)
# - "{[import]}\n
# {indent}class {class_name}({parent_class}):\n{comment}{static_attributes}{method_definitions}\n\n"
self.derived_format = "{}\nclass {}({}):\n{}{}{}\n\n"
# ATTRIBUTE PATTERN - for attributes, default value always present, None if not provided
# - "{indent}self.{attr_name} = {value}{line_comment}\n"
self.attribute_format = "{}self.{} = {}{}\n"
# STATIC ATTRIBUTE PATTERN - static for attributes defined in class body but not in any method
# - "{indent}{attr_name} = {value}{line_comment}\n"
self.static_attribute_format = "{}{} = {}{}\n"
# __init__ PATTERN - for defining a method inside class body
# - "\n{indent}def __init__(self):\n\n{object_attributes}\n\n"
self.init_format = "{}def __init__(self):\n\n{}\n\n"
# METHOD PATTERN - for defining a method inside class body
# - "{indent}def {method_name}({parameters}):\n{[multiline_comment]}{body}\n\n"
self.mtd_format = "{}def {}({}):\n{}{}\n\n"
# STATIC METHOD PATTERN - for defining a static method with a decorator
# - "{indent}@staticmethod\n
# - {indent}def {method_name}({parameters}):\n{[multiline_comment]}{body}\n\n"
self.static_mtd_format = "{}@staticmethod\n{}def {}({}):\n{}{}\n\n"
# ABSTRACT METHOD PATTERN - for declaring abstract methods with a decorator
# - "{indent}@abstractmethod\n
# {indent}def {method_name}({parameters}):\n{multiline_comment}\n{indent*2}pass\n"
self.abstract_mtd_format = "{}@abstractmethod\n{}def {}({}):\n{}{}pass\n"
# PARAMETER PATTERN - method parameter without default value
# - "{name},"
self.param_format = "{},"
# DEFAULT PARAMETER VALUE PATTERN - to set a default value for a method parameter
# - "{name} = {default_value},"
self.default_param_format = "{} = {},"
# ABSTRACT MODULE IMPORT - in case abstract methods are present we need to import this
self.abstract_import = "from abc import ABCMeta, abstractmethod\n"
# ABSTRACT STATIC VARIABLE - needs to be present in abstract class as static variable
# - "{indent}__metaclass__ = ABCMeta\n"
self.abstract_var = "{}__metaclass__ = ABCMeta\n\n"
# LIST PATTERN - for defining a list
# - {indent}self.{name} = []\n
self.list_format = "{}self.{} = []\n"
# INSERT CODE COMMENT - to be put inside empty method definition body
# - "\n{indent}# YOUR CODE HERE\n\n"
self.your_code_here = "\n{}# YOUR CODE HERE\n\n"
#LINE COMMENT - hash and the text
# - "#{comment}"
self.line_comment = "#{}"
#MULTI-LINE COMMENT - for class comments, possibly method comments if used in definition part
# - "{indent}\"\"\"\n{comment}\n{indent}\"\"\"\n\n"
self.multiline_comment = "{}\"\"\"\n{}\n{}\"\"\"\n"
#PARAMETER COMMENT - for individual method parameter comments so they can be added in method description.
# - "{indent}{name} - {comment}\n"
self.parameter_comment = "{}{} - {}\n"
def startEntity(self,cls):
"""
Method that prepares this instance attributes to work on a new
class. Sets variable strings to empty strings again.
Args:
cls (cls_class.Class): Class instance to work with.
"""
self.cls = cls
self.cls_string = ""
self.static_attributes = ""
self.attributes = ""
self.methods = ""
self.param_comments = ""
self.abstract_flag = False
self.assoc_index = 1
return
def addAttribute(self,attr):
"""Parses the info of given Attribute instance so all neccessary info
is included in the final string in the way that this language
requests. Creates needed strings and appends them to their respective
string variables.
Args:
attr (cls_attribute.Attribute): Attribute instance to parse into text.
"""
s = ""
value = None
#VALUE
#determining attribute value, if not present, default is None
if not attr.value == "":
value = attr.value
#COMMENT
#formating comment if present and adding two indents
if not attr.comment == "":
comment = self.line_comment.format(attr.comment)
comment = "{}{}".format(self.indent * 2,comment)
else:
comment = ""
#STATIC ATTRIBUTE
#static variables go separate
if attr.static_flag:
#creates the attr string
s = self.static_attribute_format.format(self.indent,attr.name,
value,comment)
#concatenates it to the rest of static attributes
self.static_attributes = "{}{}".format(self.static_attributes,s)
#CLASSIC ATTRIBUTE
#gonna go to __init__
else:
indent = self.indent * 2
#creates the attr string
s = self.attribute_format.format(indent,attr.name,value,comment)
#concatenates it to the rest of classic attributes
self.attributes = "{}{}".format(self.attributes,s)
return
def addMethod(self,mtd):
"""Using parameters of given method, creates a string
with this method definition with empty body."
Args:
mtd (cls_method.Method): Method instance to parse into text.
"""
#PARAMETERS
#first we generate string with all parameters
param_str = self.getParameters(mtd)
#COMMENT
#formating comment if present and adding two indents, uses line comment format
#abstract method will just take the classic comment
comment = ""
#classic method comment
if not mtd.comment == "":
comment = "{}{}\n".format(self.indent * 2,mtd.comment)
#parameter comments
if not self.param_comments == "":
comment = "{}{}".format(comment,self.param_comments)
#if there is any comment of the two above
if not comment == "":
indent = self.indent * 2
comment = self.multiline_comment.format(indent,comment,indent)
#YOUR CODE HERE
your_code_here = self.your_code_here.format(self.indent * 2)
#ABSTRACT
if mtd.abstract_flag:
s = self.abstract_mtd_format.format(self.indent,self.indent,mtd.name,
param_str,comment,self.indent*2)
self.abstract_flag = True
#formats the abstract static var and adds it to the other static vars
abstract_var = self.abstract_var.format(self.indent)
self.static_attributes = "{}{}".format(self.static_attributes,abstract_var)
#STATIC
elif mtd.static_flag:
s = self.static_mtd_format.format(self.indent,mtd.name,param_str,
comment,your_code_here)
#__INIT__ METHOD - will be filled with instancial attributes
elif mtd.name == "__init__":
combined = "{}{}".format(self.attributes,your_code_here)
s = self.mtd_format.format(self.indent,mtd.name,param_str,
comment,combined)
self.init_present_flag = True
#CLASSIC
else:
s = self.mtd_format.format(self.indent,mtd.name,param_str,
comment,your_code_here)
#adds to the rest
self.methods = "{}{}".format(self.methods,s)
return
def addAssociation(self,assoc):
"""Gets an instance of Association and turns it into a proper
attribute (single value if multiplicity is 1, list for
variable count of values). Adds the attribute to other class
attributes.
Args:
assoc (cls_association.Association) - Association to parse.
"""
#first we determine which member of the association is this class
#and which member is the other class
member = assoc.whichMemberIs(self.cls)
member_dict = None
other = None
other_dict = None
if member == "A":
member_dict = assoc.A_dict
other_dict = assoc.B_dict
other = "B"
else:
member_dict = assoc.B_dict
other_dict = assoc.A_dict
other = "A"
#NEW ATTRIBUTE NAME
name = None
#using the other table's role if it was named in the association
if not other_dict["role"] == "": #format: "rolename_othername_association"
role = other_dict["role"]
role = role.replace(" ","_") #precaution in cae white spaces present, replaces with _
name = "{}_{}_association".format(role,other_dict["class"].name)
else: #we must manage with format: "othername_association"
name = "{}_association{}".format(other_dict["class"].name,self.assoc_index)
self.assoc_index = self.assoc_index + 1 #increasing the counter, this number is taken
#this class is the "member" class and it will have attribute referencing the "other" class
#thus the multiplicity of the other class matters
s = ""
if assoc.isSingleMultiplicity(other):
s = self.attribute_format.format(self.indent * 2,name,
"None","") #value set as None and no comment
else: #multiple or variable amount of values => list
s = self.list_format.format(self.indent * 2, name)
#adding to the rest
self.attributes = "{}{}".format(self.attributes,s)
return
def getParentString(self,cls):
"""Goes through all classes this class inherits from and puts them
in one string that is to be inserted in the class header.
Returns:
String with all parents and their access modifiers.
"""
parent_string = ""
#first going through classic inheritance
for i in cls.inherits_list:
parent_string = "{}{},".format(parent_string,i.name)
#then the same for interfaces that are realized by this class
for i in cls.realizes_list:
parent_string = "{}{},".format(parent_string,i.name)
#removing the last comma
parent_string = parent_string[:-1]
return parent_string
def getParameters(self,mtd):
"""Generates a string of all parameters of method mtd so they can be
used in that method's signature. Also stores comments of individual
parameters in self.param_comments allowing those to be later added
in the method comment.
Args:
mtd (cls_method.Method) - method whose parameters should be parsed here.
Returns:
A string containing all parameters of the given method separated with comma.
"""
param_str = "" #self is always present
default_param_str = ""
self.param_comments = ""
for param in mtd.param_list:
if param.value == None:
s = self.param_format.format(param.name)
param_str = "{}{}".format(param_str,s)
else:
#the string with default value is saved in default_param_string
s = self.default_param_format.format(param.name, param.value)
default_param_str = "{}{}".format(default_param_str,s)
if not param.comment == "":
c = self.parameter_comment.format(self.indent * 2, param.name,
param.comment)
self.param_comments = "{}{}".format(self.param_comments,c)
#putting the two together
param_str = "{}{}".format(param_str,default_param_str)
#removing the final comma
param_str = param_str[:-1]
#removing the last newline from comments
self.param_comments = self.param_comments[:-1]
return param_str
def wrapUpEntity(self):
"""Puts together all stored strings to create a complete class declaration.
Saves the string in self.class_string and returns it.
Returns:
Final Class string.
"""
#COMMENT (no indent)
if not self.cls.comment == "":
cls_comment = "{}{}".format(self.indent,self.cls.comment)
comment = self.multiline_comment.format(self.indent,cls_comment,self.indent)
else:
comment = ""
#INIT METHOD
if not self.attributes == "" and not self.init_present_flag:
init = self.init_format.format(self.indent, self.attributes)
self.methods = "{}{}".format(init,self.methods)
#IMPORT - if we have abstract methods, we need to import required module
if self.abstract_flag:
imp = self.abstract_import
else:
imp = ""
#DEPENDING CLASS
if self.cls.inherits_flag or self.cls.realizes_flag:
#string of classes the current class inherits from/realizes an interface
parent_string = self.getParentString(self.cls)
self.cls_string = self.derived_format.format(imp, self.cls.name, parent_string, comment,
self.static_attributes,self.methods)
#NORMAL CLASS
else:
self.cls_string = self.class_format.format(imp, self.cls.name, comment,
self.static_attributes,self.methods)
return self.cls_string
|
# coding: utf-8
import logging
import flask
import werkzeug
from api import helpers
import config
from main import app
@app.errorhandler(400) # Bad Request
@app.errorhandler(401) # Unauthorized
@app.errorhandler(403) # Forbidden
@app.errorhandler(404) # Not Found
@app.errorhandler(405) # Method Not Allowed
@app.errorhandler(409) # Conflict
@app.errorhandler(410) # Gone
@app.errorhandler(418) # I'm a Teapot
@app.errorhandler(422) # Unprocessable Entity
@app.errorhandler(500) # Internal Server Error
def error_handler(e):
code = getattr(e, 'code', 500)
error_name = getattr(e, 'name', 'Internal Server Error')
logging.error('%d - %s: %s', code, error_name, flask.request.url)
if code != 404:
logging.exception(e)
if flask.request.path.startswith('/api/'):
return helpers.handle_error(e)
return flask.render_template(
'error.html',
title='Error %d (%s)!!1' % (code, error_name),
html_class='error-page',
error=e,
), code
if config.PRODUCTION:
@app.errorhandler(Exception)
def production_error_handler(e):
if isinstance(e, werkzeug.exceptions.HTTPException) and e.code in (301, 302):
return e
return error_handler(e)
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 12/04/2017
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='fitting density profile with tanh and erf function')
## args
parser.add_argument('-i', '--input', default='traj.massf.align.avg', nargs='?',
help='mass fraction profile (npy file format, exclude .npy)')
parser.add_argument('-g', '--guess', default='CENTER', nargs='?',
help='initial guess in center value or highest values (CENTER/any)')
parser.add_argument('-symm', '--symmetry', default='YES', nargs='?',
help='Use symmetry or no symmetry of coexistent mole fractions (YES/any)')
parser.add_argument('-show', '--show', default='YES', nargs='?',
help='Save plotting (YES/any)')
parser.add_argument('-o', '--output', default='.fit', nargs='?',
help='output surfix for fitting result')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import hjung
from hjung import *
import numpy as np
from scipy.special import erf
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('Agg') # avoid to show figures when running bash shell script
import matplotlib.pyplot as plt
# default for args
args.input = args.input + '.npy'
args.output = args.input + args.output
args.output_png = args.output + '.png'
## timer
start_proc, start_prof = hjung.time.init()
## load data files
massfrac_1d = np.load(args.input)
massfrac_1d = np.transpose(massfrac_1d)
massfrac_1d_avg = massfrac_1d[0]
massfrac_1d_std = massfrac_1d[1]
#print(massfrac_1d_avg)
#print(massfrac_1d_std)
curve_fit_std_off = False
if len(np.nonzero(massfrac_1d_std)) != len(massfrac_1d_std):
print("mass fraction std elements have zeros. Turned off curve_fit using std.")
curve_fit_std_off = True
nbin = len(massfrac_1d_avg)
## fitting functional form
# wr: mole fraction in A-rich phase
# wp: mole fraction in A-poor phase
# b: center of A-rich phase
# 2c: half-width of A-rich phase
# 2lamda: half-width of interface
def tanh_symm(x, wr, b, c, lamda):
return 1.0-wr+0.50*(2.0*wr-1.0)*(np.tanh((x-b+c)/lamda)-np.tanh((x-b-c)/lamda))
def erf_symm(x, wr, b, c, lamda):
return 1.0-wr+0.50*(2.0*wr-1.0)*(erf((x-b+c)/lamda)-erf((x-b-c)/lamda))
def tanh_nosymm(x, wr, wp, b, c, lamda):
return wp+0.50*(wr-wp)*(np.tanh((x-b+c)/lamda)-np.tanh((x-b-c)/lamda))
def erf_nosymm(x, wr, wp, b, c, lamda):
return wp+0.50*(wr-wp)*(erf((x-b+c)/lamda)-erf((x-b-c)/lamda))
## initial guess
if 'CENTER' in args.guess:
b = int(nbin/2 - 1)
wr = massfrac_1d_avg[b]
wp = massfrac_1d_avg[0]
print("center wr (avg,std) = {} +- {}".format(wr,massfrac_1d_std[b]))
print("center wp (avg,std) = {} +- {}".format(wp,massfrac_1d_std[0]))
else:
b = np.argmax(massfrac_1d_avg)
wr = np.max(massfrac_1d_avg)
wp = np.min(massfrac_1d_avg)
c = int(nbin/4)
lamda = int(nbin/10)
## curve fit
x_data = np.linspace(1, nbin, num=nbin, endpoint=True)
if 'YES' in args.symmetry:
tanh_opt, tanh_cov = curve_fit(tanh_symm,x_data,massfrac_1d_avg,p0=[wr,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(erf_symm,x_data,massfrac_1d_avg,p0=[wr,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., nbin, nbin/2., nbin/2.]))
else:
if curve_fit_std_off:
tanh_opt, tanh_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
else:
tanh_opt, tanh_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
erf_opt, erf_cov = curve_fit(tanh_nosymm,x_data,massfrac_1d_avg,p0=[wr,wp,b,c,lamda],sigma=massfrac_1d_std,bounds=(0,[1., 1., nbin, nbin/2., nbin/2.]))
## plotting
if 'YES' in args.show:
plt.plot(x_data, massfrac_1d_avg, 'b-', label='data')
if 'YES' in args.symmetry:
plt.plot(x_data, tanh_symm(x_data,*tanh_opt), 'r--',label='fit:tanh_symm')
plt.plot(x_data, erf_symm(x_data,*erf_opt), 'g--',label='fit:erf_symm')
else:
plt.plot(x_data, tanh_nosymm(x_data,*tanh_opt), 'r--',label='fit:tanh_nosymm')
plt.plot(x_data, erf_nosymm(x_data,*erf_opt), 'g--',label='fit:erf_nosymm')
plt.legend()
#plt.show()
plt.savefig(args.output_png)
## display all information
if 'YES' in args.symmetry:
print("tanh wr = {} +- {}".format(tanh_opt[0],tanh_cov[0][0]))
print("tanh b = {} +- {}".format(tanh_opt[1],tanh_cov[1][1]))
print("tanh c = {} +- {}".format(tanh_opt[2],tanh_cov[2][2]))
print("tanh lamda = {} +- {}".format(tanh_opt[3],tanh_cov[3][3]))
print("erf wr = {} +- {}".format(erf_opt[0],erf_cov[0][0]))
print("erf b = {} +- {}".format(erf_opt[1],erf_cov[1][1]))
print("erf c = {} +- {}".format(erf_opt[2],erf_cov[2][2]))
print("erf lamda = {} +- {}".format(erf_opt[3],erf_cov[3][3]))
else:
print("tanh wr = {} +- {}".format(tanh_opt[0],tanh_cov[0][0]))
print("tanh wp = {} +- {}".format(tanh_opt[1],tanh_cov[1][1]))
print("tanh b = {} +- {}".format(tanh_opt[2],tanh_cov[2][2]))
print("tanh c = {} +- {}".format(tanh_opt[3],tanh_cov[3][3]))
print("tanh lamda = {} +- {}".format(tanh_opt[4],tanh_cov[4][4]))
print("erf wr = {} +- {}".format(erf_opt[0],erf_cov[0][0]))
print("erf wp = {} +- {}".format(erf_opt[1],erf_cov[1][1]))
print("erf b = {} +- {}".format(erf_opt[2],erf_cov[2][2]))
print("erf c = {} +- {}".format(erf_opt[3],erf_cov[3][3]))
print("erf lamda = {} +- {}".format(erf_opt[4],erf_cov[4][4]))
## timer
hjung.time.end_print(start_proc, start_prof)
|
# Copyright (c) 2013, Burkhard Ritter
# This code is distributed under the two-clause BSD License.
import os
import ConfigParser
from .serialization import archive_exists
from .indexfile import IndexFile
DEFAULT_CONFIG_FILE='''\
[coma]
; experiment_file = experiment.${experiment_id}
; experiment_index = experiment.index
; measurement_file = measurement.${measurement_id}
; measurement_index = measurement.index
; archive_default_format = json
; archive_pretty_print = yes
; serializer_getstate = coma_getstate
; serializer_setstate = coma_setstate
'''
CONFIG_OPTIONS = [
('experiment_file', 'str'),
('experiment_index', 'str'),
('measurement_file', 'str'),
('measurement_index', 'str'),
('archive_default_format', 'str'),
('archive_pretty_print', 'bool'),
('serializer_getstate', 'str'),
('serializer_setstate', 'str')
]
CONFIG_DIR = '~/.config/coma'
def expand_path(f):
f = os.path.expanduser(f)
if os.path.exists(f) or archive_exists(f):
f = os.path.abspath(f)
else:
p = os.path.expanduser(CONFIG_DIR)
f = os.path.join(p, f)
return f
def load_config(configfile='preferences.conf'):
"""Load config from a file.
The configuration is returned as a dictionary. If no filename is specified
the default config file in "~/.config/coma/preferences.conf" is used.
"""
f = expand_path(configfile)
if not os.path.exists(f):
return {}
c = ConfigParser.RawConfigParser()
c.read(f)
d = {}
opts = CONFIG_OPTIONS
for o,t in opts:
if c.has_option('coma', o):
if t == 'str':
d[o] = c.get('coma', o)
elif t == 'bool':
d[o] = c.getboolean('coma', o)
return d
def create_config_file(configfile):
configfile = os.path.expanduser(configfile)
if os.path.exists(configfile):
print('Config file "{}" already exists'.format(configfile))
return
try:
print('Creating config file "{}"'.format(configfile))
f = open(configfile, 'w')
f.write(DEFAULT_CONFIG_FILE)
f.close()
except IOError:
print('Warning: Could not create config file "{}"'.format(configfile))
def create_default_config(create_experiment_index=True):
"""Create a default config file and experiment index file in ~/.config/coma."""
p = os.path.expanduser(CONFIG_DIR)
cfile = os.path.join(p, 'preferences.conf')
ifile = os.path.join(p, 'experiment.index')
if not os.path.exists(p):
print('Creating directory {}'.format(p))
os.mkdir(p)
create_config_file(cfile)
if create_experiment_index:
i = IndexFile(ifile, 'experiment')
if i.exists():
print('Experiment index "{}" already exists'.format(ifile))
else:
print('Creating experiment index "{}"'.format(ifile))
i.create()
|
#! /usr/bin/python
#
# This script filters a vcf file, where all variants that are physically less than 4 bp apart are dropped (since those might represent small inversions, or more generally, variants with bad alignment). A separate file, containing the scaffold and position (bp) has to be supplied (which can easily be generated with something like "grep -v '#' variants.vcf | cut -f1,2 -d$'\t' > snpids.txt"
# Usage: ./dropCloseVars.py snpids.txt helico30f1.vcf > helico30f2.vcf
import sys
mindist = 4
lc = 1343873 # this is the line count (i.e. the number of variants - a bit clunky but I've never gotten around to changing this into a more elegant form)
scaf = list()
pos = list()
keep = list()
with open(sys.argv[1], 'rb') as snpids:
for j in range(lc):
line = snpids.readline()
scaf.append(line.split('\t')[0])
pos.append(int(line.split('\t')[1]))
for i in range(lc):
if i == lc - 1 :
if scaf[i] == scaf[i-1] and pos[i]-pos[i-1] < mindist:
keep.append(0)
#print i, scaf[i], pos[i], scaf[i-1], pos[i-1], keep[i], pos[i]-pos[i-1]
else:
keep.append(1)
#print i, scaf[i], pos[i], scaf[i-1], pos[i-1], keep[i], pos[i]-pos[i-1]
elif i == 0:
if scaf[i] == scaf[i+1] and pos[i+1]-pos[i] < mindist:
keep.append(0)
#print i, scaf[i], pos[i], scaf[i+1], pos[i+1], keep[i], pos[i+1]-pos[i]
else:
keep.append(1)
#print i, scaf[i], pos[i], scaf[i+1], pos[i+1], keep[i], pos[i+1]-pos[i]
else:
if scaf[i] == scaf[i-1] and pos[i]-pos[i-1] < mindist and scaf[i] == scaf[i+1] and pos[i+1]-pos[i] < mindist:
keep.append(0)
#print i, scaf[i], pos[i], scaf[i+1], pos[i+1], scaf[i-1], pos[i-1], keep[i]
else:
keep.append(1)
#print i, scaf[i], pos[i], scaf[i+1], pos[i+1], scaf[i-1], pos[i-1], keep[i]
#print len(keep), keep.count(0), keep.count(1)
with open(sys.argv[2], 'rb') as vcfile:
for i, line in enumerate(vcfile):
if line[0] == '#':
print line.split('\n')[0]
else:
j = i - 2259
if keep[j] == 1:
print line.split('\n')[0]
else:
continue
#nCommentLines = 0
#for i, line in enumerate(vcfile):
# if line[0] == '#':
# #print line.split('\n')[0]
# nCommentLines += 1
# else:
# indx = i + 2 #+ nCommentLines
# #"head -n829 helico30.vcf | tail -n2"
# cmd = "head -n%i %s | tail -n2" % (indx, sys.argv[1])
# #var = os.system(cmd)
# varStr = subprocess.check_output([cmd], shell=True)
# posList = varStr.split('\n')
# scaf1, pos1 = posList[0].split('\t')[0:2]
# scaf2, pos2 = posList[1].split('\t')[0:2]
# print line.split('\t')[0], line.split('\t')[1], scaf1, pos1, scaf2, pos2, nCommentLines
# for i, line in enumerate(vcfile):
# vcfile.tell()
# if line[0] == '#':
# continue
# else:
# lastline = line
# scaf1, pos1 = lastline.split('\t')[0:2]
# newline = next(vcfile)
# #newline = line
# scaf2, pos2 = newline.split('\t')[0:2]
# print scaf1, pos1, scaf2, pos2 #lastline, newline
# else:
# print scaf1, pos1
|
# -*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2012 Rinze de Laat, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not,
see http://www.gnu.org/licenses/. """
from __future__ import division, print_function
import collections
import logging
from logging.handlers import RotatingFileHandler
from odemis.gui import FG_COLOUR_ERROR, FG_COLOUR_WARNING, FG_COLOUR_DIS, FG_COLOUR_MAIN
from odemis.gui.util import wxlimit_invocation, get_home_folder
from odemis.gui.comp.popup import show_message
from odemis.model import ST_RUNNING, HwError
import os.path
import sys
import threading
import wx
LOG_FILE = "odemis-gui.log"
LOG_LINES = 500 # maximum lines in the GUI logger
log = logging.getLogger() # for compatibility only
def logging_remote_exception(msg, *args):
""" Same as logging.exception, but also display remote exception info from Pyro """
logging.error(msg, exc_info=1, *args)
try:
ex_type, ex_value, ex_tb = sys.exc_info()
remote_tb = ex_value._pyroTraceback
logging.error("Remote exception %s", "".join(remote_tb))
except AttributeError:
pass
# monkey patching
logging.exception = logging_remote_exception
def init_logger(level=logging.DEBUG, log_file=None):
"""
Initializes the logger to some nice defaults
To be called only once, at the initialisation
"""
if level <= logging.INFO:
pyrolog = logging.getLogger("Pyro4")
pyrolog.setLevel(min(pyrolog.getEffectiveLevel(), level))
logging.basicConfig(format=" - %(levelname)s \t%(message)s")
l = logging.getLogger()
l.setLevel(level)
frm = "%(asctime)s\t%(levelname)s\t%(module)s:%(lineno)d:\t%(message)s"
l.handlers[0].setFormatter(logging.Formatter(frm))
# Create file handler
# Path to the log file
logfile_path = log_file or os.path.join(get_home_folder(), LOG_FILE)
file_format = logging.Formatter(frm)
# Max 5 log files of 10Mb
file_handler = RotatingFileHandler(logfile_path, maxBytes=10 * (2 ** 20), backupCount=5)
file_handler.setFormatter(file_format)
log.addHandler(file_handler)
def create_gui_logger(log_field, debug_va=None, level_va=None):
"""
Connect the log output to the text field instead of the standard output
log_field (wx text field)
debug_va (Boolean VigilantAttribute)
"""
# Create gui handler
frm = "%(asctime)s %(levelname)-7s %(module)-15s: %(message)s"
gui_format = logging.Formatter(frm, '%H:%M:%S')
text_field_handler = TextFieldHandler()
text_field_handler.setTextField(log_field)
if debug_va is not None:
text_field_handler.setDebugVA(debug_va)
if level_va is not None:
text_field_handler.setLevelVA(level_va)
text_field_handler.setFormatter(gui_format)
logging.debug("Switching to GUI logger")
# remove standard output handler if still there
for handler in log.handlers:
if isinstance(handler, logging.StreamHandler):
log.removeHandler(handler)
try:
log.addHandler(text_field_handler)
except:
# Use print here because log probably doesn't work
print("Failed to set-up logging handlers")
logging.exception("Failed to set-up logging handlers")
raise
def stop_gui_logger():
"""
Stop the logger from displaying logs to the GUI.
Use just before ending the GUI.
"""
# remove whatever handler was already there
for handler in log.handlers:
if isinstance(handler, TextFieldHandler):
log.removeHandler(handler)
class TextFieldHandler(logging.Handler):
""" Custom log handler, used to output log entries to a text field. """
TEXT_STYLES = (
wx.TextAttr(FG_COLOUR_ERROR, None),
wx.TextAttr(FG_COLOUR_WARNING, None),
wx.TextAttr(FG_COLOUR_MAIN, None),
wx.TextAttr(FG_COLOUR_DIS, None),
)
def __init__(self):
""" Call the parent constructor and initialize the handler """
logging.Handler.__init__(self)
self.textfield = None
self.debug_va = None
self.level_va = None
# queue of tuple (str, TextAttr) = text, style
self._to_print = collections.deque(maxlen=LOG_LINES)
self._print_lock = threading.Lock()
def setTextField(self, textfield):
self.textfield = textfield
self.textfield.Clear()
def setDebugVA(self, debug_va):
self.debug_va = debug_va
def setLevelVA(self, level_va):
self.level_va = level_va
def emit(self, record):
""" Write a record, in colour, to a text field. """
if self.textfield is not None:
if record.levelno >= logging.ERROR:
text_style = self.TEXT_STYLES[0]
elif record.levelno == logging.WARNING:
text_style = self.TEXT_STYLES[1]
elif record.levelno == logging.INFO:
text_style = self.TEXT_STYLES[2]
else:
text_style = self.TEXT_STYLES[3]
if self.level_va and record.levelno > self.level_va.value:
self.level_va.value = record.levelno
# Do the actual writing in a rate-limited thread, so logging won't
# interfere with the GUI drawing process.
# Note: we need to do the formatting now, otherwise it could end-up
# showing the content of a variable delayed by 0.2s.
self._to_print.append((self.format(record), text_style))
self.write_to_field()
@wxlimit_invocation(0.2)
def write_to_field(self):
with self._print_lock:
# Process the latest messages
try:
prev_style = None
while True:
txt, text_style = self._to_print.popleft()
if prev_style != text_style:
self.textfield.SetDefaultStyle(text_style)
prev_style = text_style
self.textfield.AppendText(txt + "\n")
except IndexError:
pass # end of the queue
# Removes the characters from position 0 up to and including the Nth line break
nb_lines = self.textfield.GetNumberOfLines()
nb_old = nb_lines - LOG_LINES
if nb_old > 0:
first_new = 0
txt = self.textfield.Value
for i in range(nb_old):
first_new = txt.find('\n', first_new) + 1
self.textfield.Remove(0, first_new)
self.textfield.Refresh()
# List for passing component.name to the function stage_change_pop_up
state_subscribers = []
# List with reference to components observed for a state change
observed_components = []
def observe_comp_state(comps):
'''
Function which produces an warning/information pop-up in the OS if an error or recovery of an error occurs
:param comps: list with all the components and their data
'''
global observed_components
observed_components = comps
for component in comps:
def state_change_pop_up(component_state_value, component_name=component.name):
if component_state_value == ST_RUNNING:
show_message(wx.GetApp().main_frame, 'Recovered ' + component_name,
'Functionality of the "' + component_name + '" is recovered successfully.',
timeout=3.0, level=logging.INFO)
elif isinstance(component_state_value, HwError):
show_message(wx.GetApp().main_frame, 'Error in ' + component_name, str(component_state_value),
timeout=5.0, level=logging.WARNING)
# Keep a reference to each subscriber function so they won't get dereferenced (because VA's use weakrefs)
state_subscribers.append(state_change_pop_up)
component.state.subscribe(state_change_pop_up)
|
import json
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render
from crits.exploits.exploit import Exploit
from crits.core.class_mapper import class_from_id
from crits.core.crits_mongoengine import EmbeddedCampaign, json_handler
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.crits_mongoengine import create_embedded_source
from crits.core.handlers import build_jtable, jtable_ajax_list
from crits.core.handlers import jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_user_subscribed, user_sources
from crits.core.user_tools import is_user_favorite
from crits.notifications.handlers import remove_user_from_notification
from crits.services.handlers import run_triage, get_supported_services
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.acls import ExploitACL
def generate_exploit_csv(request):
"""
Generate a CSV file of the Exploit information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Exploit)
return response
def generate_exploit_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Exploit
type_ = "exploit"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Exploits",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits-%ss-views-%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits-%ss-views-%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'Add Exploit'",
'text': "'Add Exploit'",
'click': "function () {$('#new-exploit').click()}",
},
]
if option == "inline":
return render(request, "jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button': '%ss_tab' % type_},
)
else:
return render(request, "%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
)
def get_exploit_details(id_, user):
"""
Generate the data to render the Exploit details template.
:param id_: The Exploit ObjectId to get details for.
:type id_: str
:param user: The user requesting this information.
:type user: CRITsUser
:returns: template (str), arguments (dict)
"""
allowed_sources = user_sources(user)
exploit = Exploit.objects(id=id_, source__name__in=allowed_sources).first()
template = None
args = {}
if not user.check_source_tlp(exploit):
exploit = None
if not exploit:
template = "error.html"
error = ('Either no data exists for this Exploit or you do not have'
' permission to view it.')
args = {'error': error}
else:
exploit.sanitize("%s" % user)
# remove pending notifications for user
remove_user_from_notification("%s" % user, exploit.id, 'Exploit')
# subscription
subscription = {
'type': 'Exploit',
'id': exploit.id,
'subscribed': is_user_subscribed("%s" % user,
'Exploit',
exploit.id),
}
#objects
objects = exploit.sort_objects()
#relationships
relationships = exploit.sort_relationships("%s" % user, meta=True)
# relationship
relationship = {
'type': 'Exploit',
'value': exploit.id
}
#comments
comments = {'comments': exploit.get_comments(),
'url_key': exploit.id}
#screenshots
screenshots = exploit.get_screenshots(user)
# favorites
favorite = is_user_favorite("%s" % user, 'Exploit', exploit.id)
# services
service_list = get_supported_services('Exploit')
# analysis results
service_results = exploit.get_analysis_results()
args = {'objects': objects,
'relationships': relationships,
'relationship': relationship,
'subscription': subscription,
'favorite': favorite,
'service_list': service_list,
'service_results': service_results,
'screenshots': screenshots,
'exploit': exploit,
'exploit_id': id_,
'comments': comments,
'ExploitACL': ExploitACL}
return template, args
def add_new_exploit(name, cve=None, description=None, source=None,
source_method=None, source_reference=None, source_tlp=None,
campaign=None, confidence=None, user=None,
bucket_list=None, ticket=None,related_id=None,
related_type=None, relationship_type=None):
"""
Add an Exploit to CRITs.
:param name: The name of the exploit.
:type name: str
:param cve: CVE for the exploit.
:type cve: str
:param description: Description of the exploit.
:type description: str
:param source: Name of the source which provided this information.
:type source: str
:param source_method: Method of acquiring this data.
:type source_method: str
:param source_reference: A reference to this data.
:type source_reference: str
:param source_tlp: TLP value for this source
:type source_reference: str
:param campaign: A campaign to attribute to this exploit.
:type campaign: str
:param confidence: Confidence level in the campaign attribution.
:type confidence: str ("low", "medium", "high")
:param user: The user adding this exploit.
:type user: str
:param bucket_list: Buckets to assign to this exploit.
:type bucket_list: str
:param ticket: Ticket to assign to this exploit.
:type ticket: str
:param related_id: ID of object to create relationship with
:type related_id: str
:param related_type: Type of object to create relationship with
:type related_type: str
:param relationship_type: Type of relationship to create.
:type relationship_type: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"object" (if successful) :class:`crits.exploits.exploit.Exploit`
"""
is_item_new = False
retVal = {}
exploit = Exploit.objects(name=name).first()
if not exploit:
exploit = Exploit()
exploit.name = name
if description:
exploit.description = description.strip()
is_item_new = True
username = user.username
if isinstance(source, basestring):
if user.check_source_write(source):
source = [create_embedded_source(source,
reference=source_reference,
method=source_method,
tlp=source_tlp,
analyst=username)]
else:
return {"success": False,
"message": "User does not have permission to add object \
using source %s." % source}
elif isinstance(source, EmbeddedSource):
source = [source]
if isinstance(campaign, basestring):
c = EmbeddedCampaign(name=campaign, confidence=confidence, analyst=username)
campaign = [c]
if campaign:
for camp in campaign:
exploit.add_campaign(camp)
if source:
for s in source:
exploit.add_source(s)
else:
return {"success" : False, "message" : "Missing source information."}
exploit.cve = cve.strip()
if bucket_list:
exploit.add_bucket_list(bucket_list, user)
if ticket:
exploit.add_ticket(ticket, user)
related_obj = None
if related_id:
related_obj = class_from_id(related_type, related_id)
if not related_obj:
retVal['success'] = False
retVal['message'] = 'Related Object not found.'
return retVal
exploit.save(username=username)
if related_obj and exploit and relationship_type:
relationship_type=RelationshipTypes.inverse(relationship=relationship_type)
exploit.add_relationship(related_obj,
relationship_type,
analyst=user,
get_rels=False)
exploit.save(username=username)
# run exploit triage
if is_item_new:
exploit.reload()
run_triage(exploit, username)
resp_url = reverse('crits-exploits-views-exploit_detail', args=[exploit.id])
retVal['message'] = ('Success! Click here to view the new Exploit: '
'<a href="%s">%s</a>' % (resp_url, exploit.name))
retVal['success'] = True
retVal['object'] = exploit
retVal['id'] = str(exploit.id)
return retVal
def exploit_remove(id_, username):
"""
Remove an Exploit from CRITs.
:param id_: The ObjectId of the Exploit to remove.
:type id_: str
:param username: The user removing this Exploit.
:type username: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
exploit = Exploit.objects(id=id_).first()
if exploit:
exploit.delete(username=username)
return {'success': True}
else:
return {'success': False, 'message': 'Could not find Exploit.'}
def set_exploit_name(id_, name, user, **kwargs):
"""
Set a Exploit name.
:param id_: Exploit ObjectId.
:type id_: str
:param name: The new name.
:type name: str
:param user: The user updating the name.
:type user: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
sources = user_sources(user)
exploit = Exploit.objects(id=id_, source__name__in=sources).first()
if not exploit:
return {'success': False,
'message': "Could not find exploit"}
exploit.name = name.strip()
exploit.save(username=user)
return {'success': True}
def update_exploit_cve(id_, cve, user, **kwargs):
"""
Update CVE for a Exploit.
:param id_: The ObjectId of the Exploit to update.
:type id_: str
:param cve: The CVE we are setting.
:type CVE: str
:param user: The user updating the CVE.
:type user: str
:returns: dict
"""
sources = user_sources(user)
exploit = Exploit.objects(id=id_, source__name__in=sources).first()
if not exploit:
return {'success': False,
'message': 'No exploit could be found.'}
else:
exploit.cve = cve
exploit.save(username=user)
return {'success': True}
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import inspect
from robot.errors import DataError
from .encoding import decode_from_system
from .error import get_error_details
from .platform import JYTHON, IRONPYTHON
from .robotpath import abspath, normpath
from .robottypes import type_name, is_unicode
if JYTHON:
from java.lang.System import getProperty
class Importer(object):
def __init__(self, type=None, logger=None):
if not logger:
from robot.output import LOGGER as logger
self._type = type or ''
self._logger = logger
self._importers = (ByPathImporter(logger),
NonDottedImporter(logger),
DottedImporter(logger))
self._by_path_importer = self._importers[0]
def import_class_or_module(self, name, instantiate_with_args=None,
return_source=False):
"""Imports Python class/module or Java class with given name.
Class can either live in a module/package or be standalone Java class.
In the former case the name is something like 'MyClass' and in the
latter it could be 'your.package.YourLibrary'. Python classes always
live in a module, but if the module name is exactly same as the class
name then simple 'MyLibrary' will import a class.
Python modules can be imported both using format 'MyModule' and
'mymodule.submodule'.
`name` can also be a path to the imported file/directory. In that case
importing is done using `import_class_or_module_by_path` method.
If `instantiate_with_args` is not None, imported classes are
instantiated with the specified arguments automatically.
"""
try:
imported, source = self._import_class_or_module(name)
self._log_import_succeeded(imported, name, source)
imported = self._instantiate_if_needed(imported, instantiate_with_args)
except DataError as err:
self._raise_import_failed(name, err)
return (imported, source) if return_source else imported
def _import_class_or_module(self, name):
for importer in self._importers:
if importer.handles(name):
return importer.import_(name)
def import_class_or_module_by_path(self, path, instantiate_with_args=None):
"""Import a Python module or Java class using a file system path.
When importing a Python file, the path must end with '.py' and the
actual file must also exist. When importing Java classes, the path
must end with '.java' or '.class'. The class file must exist in both
cases and in the former case also the source file must exist.
If `instantiate_with_args` is not None, imported classes are
instantiated with the specified arguments automatically.
"""
try:
imported, source = self._by_path_importer.import_(path)
self._log_import_succeeded(imported, imported.__name__, source)
return self._instantiate_if_needed(imported, instantiate_with_args)
except DataError as err:
self._raise_import_failed(path, err)
def _raise_import_failed(self, name, error):
import_type = '%s ' % self._type if self._type else ''
msg = "Importing %s'%s' failed: %s" % (import_type, name, error.message)
if not error.details:
raise DataError(msg)
msg = [msg, error.details]
msg.extend(self._get_items_in('PYTHONPATH', sys.path))
if JYTHON:
classpath = getProperty('java.class.path').split(os.path.pathsep)
msg.extend(self._get_items_in('CLASSPATH', classpath))
raise DataError('\n'.join(msg))
def _get_items_in(self, type, items):
yield '%s:' % type
for item in items:
if item:
yield ' %s' % (item if is_unicode(item)
else decode_from_system(item))
def _instantiate_if_needed(self, imported, args):
if args is None:
return imported
if inspect.isclass(imported):
return self._instantiate_class(imported, args)
if args:
raise DataError("Modules do not take arguments.")
return imported
def _instantiate_class(self, imported, args):
try:
return imported(*args)
except:
raise DataError('Creating instance failed: %s\n%s' % get_error_details())
def _log_import_succeeded(self, item, name, source):
import_type = '%s ' % self._type if self._type else ''
item_type = 'module' if inspect.ismodule(item) else 'class'
location = ("'%s'" % source) if source else 'unknown location'
self._logger.info("Imported %s%s '%s' from %s."
% (import_type, item_type, name, location))
class _Importer(object):
def __init__(self, logger):
self._logger = logger
def _import(self, name, fromlist=None, retry=True):
try:
try:
return __import__(name, fromlist=fromlist)
except ImportError:
# Hack to support standalone Jython. For more information, see:
# https://github.com/robotframework/robotframework/issues/515
# http://bugs.jython.org/issue1778514
if JYTHON and fromlist and retry:
__import__('%s.%s' % (name, fromlist[0]))
return self._import(name, fromlist, retry=False)
# IronPython loses traceback when using plain raise.
# https://github.com/IronLanguages/main/issues/989
if IRONPYTHON:
exec('raise sys.exc_type, sys.exc_value, sys.exc_traceback')
raise
except:
raise DataError(*get_error_details())
def _verify_type(self, imported):
if inspect.isclass(imported) or inspect.ismodule(imported):
return imported
raise DataError('Expected class or module, got %s.'
% type_name(imported))
def _get_class_from_module(self, module, name=None):
klass = getattr(module, name or module.__name__, None)
return klass if inspect.isclass(klass) else None
def _get_source(self, imported):
try:
return abspath(inspect.getfile(imported))
except TypeError:
return None
class ByPathImporter(_Importer):
_valid_import_extensions = ('.py', '.java', '.class', '')
def handles(self, path):
return os.path.isabs(path)
def import_(self, path):
self._verify_import_path(path)
self._remove_wrong_module_from_sys_modules(path)
module = self._import_by_path(path)
imported = self._get_class_from_module(module) or module
return self._verify_type(imported), path
def _verify_import_path(self, path):
if not os.path.exists(path):
raise DataError('File or directory does not exist.')
if not os.path.isabs(path):
raise DataError('Import path must be absolute.')
if not os.path.splitext(path)[1] in self._valid_import_extensions:
raise DataError('Not a valid file or directory to import.')
def _remove_wrong_module_from_sys_modules(self, path):
importing_from, name = self._split_path_to_module(path)
importing_package = os.path.splitext(path)[1] == ''
if self._wrong_module_imported(name, importing_from, importing_package):
del sys.modules[name]
self._logger.info("Removed module '%s' from sys.modules to import "
"fresh module." % name)
def _split_path_to_module(self, path):
module_dir, module_file = os.path.split(abspath(path))
module_name = os.path.splitext(module_file)[0]
if module_name.endswith('$py'):
module_name = module_name[:-3]
return module_dir, module_name
def _wrong_module_imported(self, name, importing_from, importing_package):
if name not in sys.modules:
return False
source = getattr(sys.modules[name], '__file__', None)
if not source: # play safe (occurs at least with java based modules)
return True
imported_from, imported_package = self._get_import_information(source)
return (normpath(importing_from, case_normalize=True) !=
normpath(imported_from, case_normalize=True) or
importing_package != imported_package)
def _get_import_information(self, source):
imported_from, imported_file = self._split_path_to_module(source)
imported_package = imported_file == '__init__'
if imported_package:
imported_from = os.path.dirname(imported_from)
return imported_from, imported_package
def _import_by_path(self, path):
module_dir, module_name = self._split_path_to_module(path)
sys.path.insert(0, module_dir)
try:
return self._import(module_name)
finally:
sys.path.pop(0)
class NonDottedImporter(_Importer):
def handles(self, name):
return '.' not in name
def import_(self, name):
module = self._import(name)
imported = self._get_class_from_module(module) or module
return self._verify_type(imported), self._get_source(imported)
class DottedImporter(_Importer):
def handles(self, name):
return '.' in name
def import_(self, name):
parent_name, lib_name = name.rsplit('.', 1)
parent = self._import(parent_name, fromlist=[str(lib_name)])
try:
imported = getattr(parent, lib_name)
except AttributeError:
raise DataError("Module '%s' does not contain '%s'."
% (parent_name, lib_name))
imported = self._get_class_from_module(imported, lib_name) or imported
return self._verify_type(imported), self._get_source(imported)
|
# -*- test-case-name: imaginary -*-
"""
This module implements a highly abstract graph-traversal system for actions and
events to locate the objects which can respond to them. The top-level
entry-point to this system is L{Idea.obtain}.
It also implements several basic retrievers related to visibility and physical
reachability.
"""
import attr
from zope.interface import implements
from imaginary.iimaginary import (
INameable, ILitLink, IThing, IObstruction, IElectromagneticMedium,
IDistance, IRetriever, IExit)
@attr.s
class Link(object):
"""
A L{Link} is a connection between two L{Idea}s in a L{Path}.
@ivar source: the idea that this L{Link} originated from.
@type source: L{Idea}
@ivar target: the idea that this L{Link} refers to.
@type target: L{Idea}
@ivar annotations: The domain-specific simulation annotations that apply to
this link.
@type annotations: L{list}
"""
source = attr.ib()
target = attr.ib()
annotations = attr.ib(default=attr.Factory(list))
def annotate(self, annotations):
"""
Annotate this link with a list of annotations.
"""
self.annotations.extend(annotations)
def of(self, interface):
"""
Yield all annotations on this link which provide the given interface.
"""
for annotation in self.annotations:
provider = interface(annotation, None)
if provider is not None:
yield provider
@attr.s(repr=False)
class Path(object):
"""
A list of L{Link}s.
@ivar links: A L{list} of L{Link}s describing a path through the simulation
graph. The order is significant. The target of each link is the
source of the subsequent link.
@type links: L{list} of L{Link}s
"""
links = attr.ib()
def of(self, interface):
"""
@return: an iterator of providers of interfaces, adapted from each link
in this path.
"""
for link in self.links:
for annotation in link.of(interface):
yield annotation
def eachSubPath(self):
"""
Iterate over each path which is a prefix of this path.
@return: A generator which yields L{Path} instances. The first
instance yielded is a L{Path} with only the first L{Link} of this
path. The second instance yielded has the first and second
L{Link}s of this path. This pattern continues until a L{Path} with
the same L{Links} as this L{Path} is yielded.
"""
for x in range(1, len(self.links) + 1):
yield Path(links=self.links[:x])
def eachTargetAs(self, interface):
"""
@return: an iterable of all non-None results of each L{Link.targetAs}
method in this L{Path}'s C{links} attribute.
"""
for link in self.links:
provider = interface(link.target.delegate, None)
if provider is not None:
yield provider
def targetAs(self, interface):
"""
Retrieve the target of the last link of this path, its final
destination, as a given interface.
@param interface: the interface to retrieve.
@type interface: L{zope.interface.interfaces.IInterface}
@return: the last link's target, adapted to the given interface, or
C{None} if no appropriate adapter or component exists.
@rtype: C{interface} or C{NoneType}
"""
return interface(self.links[-1].target.delegate, None)
def isCyclic(self):
"""
Determine if this path is cyclic, to avoid descending down infinite
loops.
@return: a boolean indicating whether this L{Path} is cyclic or not,
i.e. whether the L{Idea} its last link points at is the source of
any of its links.
"""
if len(self.links) < 2:
return False
return (self.links[-1].target in (x.source for x in self.links))
def to(self, link):
"""
Create a new path, extending this one by one new link.
"""
return Path(links=self.links + [link])
def __repr__(self):
"""
@return: an expanded pretty-printed representation of this Path,
suitable for debugging.
"""
s = 'Path('
for link in self.links:
dlgt = link.target.delegate
src = link.source.delegate
s += "\n\t"
s += repr(getattr(src, 'name', src))
s += " => "
s += repr(getattr(dlgt, 'name', dlgt))
s += " "
s += repr(link.annotations)
s += ')'
return s
@attr.s
class Idea(object):
"""
Consider a person's activities with the world around them as having two
layers. One is a physical layer, out in the world, composed of matter and
energy. The other is a cognitive layer, internal to the person, composed
of ideas about that matter and energy.
For example, when a person wants to sit in a wooden chair, they must first
visually locate the arrangement of wood in question, make the determination
of that it is a "chair" based on its properties, and then perform the
appropriate actions to sit upon it.
However, a person may also interact with symbolic abstractions rather than
physical objects. They may read a word, or point at a window on a computer
screen. An L{Idea} is a representation of the common unit that can be
referred to in this way.
Both physical and cognitive layers are present in Imaginary. The cognitive
layer is modeled by L{imaginary.idea}. The physical layer is modeled by a
rudimentary point-of-interest simulation in L{imaginary.objects}. An
L{imaginary.thing.Thing} is a physical object; an L{Idea} is a node in a
non-physical graph, related by links that are annotated to describe the
nature of the relationship between it and other L{Idea}s.
L{Idea} is the most abstract unit of simulation. It does not have any
behavior or simulation semantics of its own; it merely ties together
different related systems.
An L{Idea} is composed of a C{delegate}, which is an object that implements
simulation-defined interfaces; a list of L{ILinkContributor}s, which
produce L{Link}s to other L{Idea}s, an a set of C{ILinkAnnotator}s, which
apply annotations (which themselves implement simulation-defined
link-annotation interfaces) to those links.
Each L{imaginary.thing.Thing} has a corresponding L{Idea} to represent it
in the simulation. The physical simulation defines only a few types of
links: objects have links to their containers, containers have links to
their contents, rooms have links to their exits, exits have links to their
destinations. Any L{imaginary.thing.Thing} can have a powerup applied to
it which adds to the list of linkers or annotators for its L{Idea},
however, which allows users to create arbitrary objects.
For example, the target of the "look" action must implement
L{imaginary.iimaginary.IVisible}, but need not be a
L{iimaginary.objects.Thing}. A simulation might want to provide a piece of
graffiti that you could look at, but would not be a physical object, in the
sense that you couldn't pick it up, weigh it, push it, etc. Such an object
could be implemented as a powerup for both
L{imaginary.iimaginary.IDescriptionContributor}, which would impart some
short flavor text to the room, and L{imaginary.iimaginary.IVisible}, which
would be an acceptable target of 'look'. The
L{imaginary.iimaginary.IVisible} implementation could even be an in-memory
object, not stored in the database at all; and there could be different
implementations for different observers, depending on their level of
knowledge about the in-world graffiti.
@ivar delegate: this object is the object which may be adaptable to a set
of interfaces. This L{Idea} delegates all adaptation to its delegate.
In many cases (when referring to a physical object), this will be an
L{imaginary.thing.Thing}, but not necessarily.
@ivar linkers: a L{list} of L{ILinkContributor}s which are used to gather
L{Link}s from this L{Idea} during L{Idea.obtain} traversal.
@ivar annotators: a L{list} of L{ILinkAnnotator}s which are used to annotate
L{Link}s gathered from this L{Idea} via the C{linkers} list.
"""
delegate = attr.ib()
linkers = attr.ib(default=attr.Factory(list))
annotators = attr.ib(default=attr.Factory(list))
def _allLinks(self):
"""
Return an iterator of all L{Links} away from this idea.
"""
for linker in self.linkers:
for link in linker.links():
yield link
def _applyAnnotators(self, linkiter):
"""
Apply my list of annotators to each link in the given iterable.
"""
for link in linkiter:
self._annotateOneLink(link)
yield link
def _annotateOneLink(self, link):
"""
Apply all L{ILinkAnnotator}s in this L{Idea}'s C{annotators} list.
"""
allAnnotations = []
for annotator in self.annotators:
# XXX important to test: annotators shouldn't mutate the links.
# The annotators show up in a non-deterministic order, so in order
# to facilitate a consistent view of the link in annotationsFor(),
# all annotations are applied at the end.
annotations = list(annotator.annotationsFor(link, self))
allAnnotations.extend(annotations)
link.annotate(allAnnotations)
def obtain(self, retriever):
"""
Traverse the graph of L{Idea}s, starting with C{self}, looking for
objects which the given L{IRetriever} can retrieve.
The graph will be traversed by looking at all the links generated by
this L{Idea}'s C{linkers}, only continuing down those links for which
the given L{IRetriever}'s C{shouldKeepGoing} returns L{True}.
@param retriever: an object which will be passed each L{Path} in turn,
discovered during traversal of the L{Idea} graph. If any
invocation of L{IRetriever.retrieve} on this parameter should
succeed, that will be yielded as a result from this method.
@type retriever: L{IRetriever}
@return: a generator which yields the results of C{retriever.retrieve}
which are not L{None}.
"""
return ObtainResult(self, retriever)
def _doObtain(self, retriever, path, reasonsWhyNot):
"""
A generator that implements the logic for obtain()
"""
if path is None:
# Special case: we only get a self->self link if we are the
# beginning _and_ the end.
path = Path(links=[])
selfLink = Link(source=self, target=self)
self._annotateOneLink(selfLink)
finalPath = path.to(selfLink)
else:
finalPath = Path(links=path.links[:])
self._annotateOneLink(finalPath.links[-1])
result = retriever.retrieve(finalPath)
objections = set(retriever.objectionsTo(finalPath, result))
reasonsWhyNot |= objections
if result is not None:
if not objections:
yield result
for link in self._applyAnnotators(self._allLinks()):
subpath = path.to(link)
if subpath.isCyclic():
continue
if retriever.shouldKeepGoing(subpath):
for obtained in link.target._doObtain(retriever, subpath, reasonsWhyNot):
yield obtained
@attr.s
class ObtainResult(object):
"""
The result of L{Idea.obtain}, this provides an iterable of results.
@ivar reasonsWhyNot: If this iterator has already been exhausted, this will
be a C{set} of L{IWhyNot} objects explaining possible reasons why there
were no results. For example, if the room where the player attempted
to obtain targets is dark, this may contain an L{IWhyNot} provider.
However, until this iterator has been exhausted, it will be C{None}.
@type reasonsWhyNot: C{set} of L{IWhyNot}, or C{NoneType}
@ivar idea: the L{Idea} that L{Idea.obtain} was invoked on.
@type idea: L{Idea}
@ivar retriever: The L{IRetriever} that L{Idea.obtain} was invoked with.
@type retriever: L{IRetriever}
"""
idea = attr.ib()
retriever = attr.ib()
reasonsWhyNot = attr.ib(default=None)
def __iter__(self):
"""
A generator which yields each result of the query, then sets
C{reasonsWhyNot}.
"""
reasonsWhyNot = set()
for result in self.idea._doObtain(self.retriever, None, reasonsWhyNot):
yield result
self.reasonsWhyNot = reasonsWhyNot
@attr.s
class DelegatingRetriever(object):
"""
A delegating retriever, so that retrievers can be easily composed.
See the various methods marked for overriding.
@ivar retriever: A retriever to delegate most operations to.
@type retriever: L{IRetriever}
"""
implements(IRetriever)
retriever = attr.ib()
def moreObjectionsTo(self, path, result):
"""
Override in subclasses to yield objections to add to this
L{DelegatingRetriever}'s C{retriever}'s C{objectionsTo}.
By default, offer no additional objections.
"""
return []
def objectionsTo(self, path, result):
"""
Concatenate C{self.moreObjectionsTo} with C{self.moreObjectionsTo}.
"""
for objection in self.retriever.objectionsTo(path, result):
yield objection
for objection in self.moreObjectionsTo(path, result):
yield objection
def shouldStillKeepGoing(self, path):
"""
Override in subclasses to halt traversal via a C{False} return value for
C{shouldKeepGoing} if this L{DelegatingRetriever}'s C{retriever}'s
C{shouldKeepGoing} returns C{True}.
By default, return C{True} to keep going.
"""
return True
def shouldKeepGoing(self, path):
"""
If this L{DelegatingRetriever}'s C{retriever}'s C{shouldKeepGoing}
returns C{False} for the given path, return C{False} and stop
traversing. Otherwise, delegate to C{shouldStillKeepGoing}.
"""
return (self.retriever.shouldKeepGoing(path) and
self.shouldStillKeepGoing(path))
def resultRetrieved(self, path, retrievedResult):
"""
A result was retrieved. Post-process it if desired.
Override this in subclasses to modify (non-None) results returned from
this L{DelegatingRetriever}'s C{retriever}'s C{retrieve} method.
By default, simply return the result retrieved.
"""
return retrievedResult
def retrieve(self, path):
"""
Delegate to this L{DelegatingRetriever}'s C{retriever}'s C{retrieve}
method, then post-process it with C{resultRetrieved}.
"""
subResult = self.retriever.retrieve(path)
if subResult is None:
return None
return self.resultRetrieved(path, subResult)
class Proximity(DelegatingRetriever):
"""
L{Proximity} is a retriever which will continue traversing any path which
is shorter than its proscribed distance, but not any longer.
@ivar distance: the distance, in meters, to query for.
@type distance: L{float}
"""
def __init__(self, distance, retriever):
DelegatingRetriever.__init__(self, retriever)
self.distance = distance
def shouldStillKeepGoing(self, path):
"""
Implement L{IRetriever.shouldKeepGoing} to stop for paths whose sum of
L{IDistance} annotations is greater than L{Proximity.distance}.
"""
dist = sum(vector.distance for vector in path.of(IDistance))
ok = (self.distance >= dist)
return ok
class Reachable(DelegatingRetriever):
"""
L{Reachable} is a navivator which will object to any path with an
L{IObstruction} annotation on it.
"""
def moreObjectionsTo(self, path, result):
"""
Yield an objection from each L{IObstruction.whyNot} method annotating
the given path.
"""
if result is not None:
for obstruction in path.of(IObstruction):
yield obstruction.whyNot()
class Traversability(DelegatingRetriever):
"""
A path is only traversible if it terminates in *one* exit. Once you've
gotten to an exit, you have to stop, because the player needs to go through
that exit to get to the next one.
"""
def shouldStillKeepGoing(self, path):
"""
Stop at the first exit that you find.
"""
for index, target in enumerate(path.eachTargetAs(IExit)):
if index > 0:
return False
return True
@attr.s
class Vector(object):
"""
A L{Vector} is a link annotation which remembers a distance and a
direction; for example, a link through a 'north' exit between rooms will
have a direction of 'north' and a distance specified by that
L{imaginary.objects.Exit} (defaulting to 1 meter).
"""
implements(IDistance)
distance = attr.ib()
direction = attr.ib()
@attr.s
class ProviderOf(object):
"""
L{ProviderOf} is a retriever which will retrieve the facet which provides
its C{interface}, if any exists at the terminus of the path.
@ivar interface: The interface which defines the type of values returned by
the C{retrieve} method.
@type interface: L{zope.interface.interfaces.IInterface}
"""
implements(IRetriever)
interface = attr.ib()
def retrieve(self, path):
"""
Retrieve the target of the path, as it provides the interface specified
by this L{ProviderOf}.
@return: the target of the path, adapted to this retriever's interface,
as defined by L{Path.targetAs}.
@rtype: L{ProviderOf.interface}
"""
return path.targetAs(self.interface)
def objectionsTo(self, path, result):
"""
Implement L{IRetriever.objectionsTo} to yield no objections.
"""
return []
def shouldKeepGoing(self, path):
"""
Implement L{IRetriever.shouldKeepGoing} to always return C{True}.
"""
return True
@attr.s
class AlsoKnownAs(object):
"""
L{AlsoKnownAs} is an annotation that indicates that the link it annotates
is known as a particular name.
@ivar name: The name that this L{AlsoKnownAs}'s link's target is also known
as.
@type name: C{unicode}
"""
implements(INameable)
name = attr.ib()
def knownTo(self, observer, name):
"""
An L{AlsoKnownAs} is known to all observers as its C{name} attribute.
"""
return (self.name == name)
class Named(DelegatingRetriever):
"""
A retriever which wraps another retriever, but yields only results known to
a particular observer by a particular name.
@ivar name: the name to search for.
@ivar observer: the observer who should identify the target by the name
this L{Named} is searching for.
@type observer: L{Thing}
"""
def __init__(self, name, retriever, observer):
DelegatingRetriever.__init__(self, retriever)
self.name = name
self.observer = observer
def resultRetrieved(self, path, subResult):
"""
Invoke C{retrieve} on the L{IRetriever} which we wrap, but only return
it if the L{INameable} target of the given path is known as this
L{Named}'s C{name}.
"""
if isKnownTo(self.observer, path, self.name):
return subResult
else:
return None
def isKnownTo(observer, path, name):
"""
Is the given path's target known to the given observer by the given name
(as retrieved via the given path?)
For example: a room may be known as the name 'north' but only if you're
standing south of it, so the path via which you retrieved it (starting to
the south) is relevant.
"""
named = path.targetAs(INameable)
# TODO: don't look at the last link only. There should be a specific
# "alias" annotation which knows how to alias a specific target; we should
# give it targetAs(something) so the alias itself can compare.
# (Introducing additional links into traversal should never break things.)
allAliases = list(path.links[-1].of(INameable))
if named is not None:
allAliases += [named]
for alias in allAliases:
if alias.knownTo(observer, name):
return True
return False
class CanSee(DelegatingRetriever):
"""
Wrap a L{ProviderOf}, yielding the results that it would yield, but
applying lighting to the ultimate target based on the last L{IThing} the
path.
@ivar retriever: The lowest-level retriever being wrapped.
@type retriever: L{ProviderOf} (Note: it might be a good idea to add an
'interface' attribute to L{IRetriever} so this no longer depends on a
more specific type than other L{DelegatingRetriever}s, to make the
order of composition more flexible.)
"""
def __init__(self, retriever, observer=None):
"""
@param observer: The L{Thing} which is trying to see things.
"""
DelegatingRetriever.__init__(self, retriever)
self.observer = observer
def resultRetrieved(self, path, subResult):
"""
Post-process retrieved results by determining if lighting applies to
them.
"""
litlinks = list(path.of(ILitLink))
if not litlinks:
return subResult
# XXX what if there aren't any IThings on the path?
litThing = list(path.eachTargetAs(IThing))[-1]
# you need to be able to look from a light room to a dark room, so only
# apply the most "recent" lighting properties.
return litlinks[-1].applyLighting(
litThing, subResult, self.retriever.interface)
def shouldStillKeepGoing(self, path):
"""
Don't keep going through links that are opaque to the observer.
"""
for opacity in path.of(IElectromagneticMedium):
if opacity.isOpaque(self.observer):
return False
return True
def moreObjectionsTo(self, path, result):
"""
Object to paths which have L{ILitLink} annotations which are not lit.
"""
for lighting in path.of(ILitLink):
if not lighting.isItLit(path):
tmwn = lighting.whyNotLit()
yield tmwn
def find(source, interface=IThing, distance=1.0, name=None, onlyReachable=True,
onlyVisible=True, observer=None):
"""
Find a provider of a given interface, within a given distance, known by the
given name, that may or may not be visible or reachable to the source.
@param source: The origin point for the search.
@type source: L{Idea}
@param interface: The type of interface to return.
@type interface: L{IInterface}
@param distance: The maximum distance, in meters, from C{source} to find
objects.
@type distance: L{float}
@param name: only return objects named C{name}.
@type name: L{unicode}
@param onlyReachable: Only return items that the source can reach.
@type onlyReachable: L{bool}
@param onlyVisible: Only return items that the source can see.
@type onlyVisible: L{bool}
@param observer: the observer from whose perspective to do the query; if
unspecified, the same as C{source}.
@type observer: L{Idea}
@return: a generator yielding providers of C{interface}
"""
if observer is None:
observer = source
retriever = ProviderOf(interface)
if onlyVisible:
retriever = CanSee(retriever, observer)
if distance is not None:
retriever = Proximity(distance, retriever)
if name is not None:
retriever = Named(name, retriever, observer)
if onlyReachable:
retriever = Reachable(retriever)
return source.obtain(retriever)
|
"""CallTips.py - An IDLE Extension to Jog Your Memory
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import __main__
import inspect
import re
import sys
import textwrap
import types
from idlelib import CallTipWindow
from idlelib.HyperParser import HyperParser
class CallTips:
menudefs = [
('edit', [
("Show call tip", "<<force-open-calltip>>"),
])
]
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
else:
self.editwin = editwin
self.text = editwin.text
self.active_calltip = None
self._calltip_window = self._make_tk_calltip_window
def close(self):
self._calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return CallTipWindow.CallTip(self.text)
def _remove_calltip_window(self, event=None):
if self.active_calltip:
self.active_calltip.hidetip()
self.active_calltip = None
def force_open_calltip_event(self, event):
"The user selected the menu entry or hotkey, open the tip."
self.open_calltip(True)
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a CallTip, but not really
necessary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
if self.active_calltip and self.active_calltip.is_active():
self.open_calltip(False)
def open_calltip(self, evalfuncs):
self._remove_calltip_window()
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
expression = hp.get_expression()
if not expression:
return
if not evalfuncs and (expression.find('(') != -1):
return
argspec = self.fetch_tip(expression)
if not argspec:
return
self.active_calltip = self._calltip_window()
self.active_calltip.showtip(argspec, sur_paren[0], sur_paren[1])
def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class.
If there is a Python subprocess, get the calltip there. Otherwise,
either this fetch_tip() is running in the subprocess or it was
called in an IDLE running without the subprocess.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(expression,), {})
else:
return get_argspec(get_entity(expression))
def get_entity(expression):
"""Return the object corresponding to expression evaluated
in a namespace spanning sys.modules and __main.dict__.
"""
if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(expression, namespace)
except BaseException:
# An uncaught exception closes idle, and eval can raise any
# exception, especially if user classes are involved.
return None
# The following are used in get_argspec and some in tests
_MAX_COLS = 85
_MAX_LINES = 5 # enough for bytes
_INDENT = ' '*4 # for wrapped signatures
_first_param = re.compile('(?<=\()\w*\,?\s*')
_default_callable_argspec = "See source or doc"
def get_argspec(ob):
'''Return a string describing the signature of a callable object, or ''.
For Python-coded functions and methods, the first line is introspected.
Delete 'self' parameter for classes (.__init__) and bound methods.
The next lines are the first lines of the doc string up to the first
empty line or _MAX_LINES. For builtins, this typically includes
the arguments in addition to the return value.
'''
argspec = ""
try:
ob_call = ob.__call__
except BaseException:
return argspec
if isinstance(ob, type):
fob = ob.__init__
elif isinstance(ob_call, types.MethodType):
fob = ob_call
else:
fob = ob
if isinstance(fob, (types.FunctionType, types.MethodType)):
argspec = inspect.formatargspec(*inspect.getfullargspec(fob))
if (isinstance(ob, (type, types.MethodType)) or
isinstance(ob_call, types.MethodType)):
argspec = _first_param.sub("", argspec)
lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT)
if len(argspec) > _MAX_COLS else [argspec] if argspec else [])
if isinstance(ob_call, types.MethodType):
doc = ob_call.__doc__
else:
doc = getattr(ob, "__doc__", "")
if doc:
for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]:
line = line.strip()
if not line:
break
if len(line) > _MAX_COLS:
line = line[: _MAX_COLS - 3] + '...'
lines.append(line)
argspec = '\n'.join(lines)
if not argspec:
argspec = _default_callable_argspec
return argspec
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_calltips', verbosity=2)
|
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test cases for Gstudio's url_shortener"""
from __future__ import with_statement
import warnings
from django.test import TestCase
from gstudio.url_shortener import get_url_shortener
from gstudio import url_shortener as us_settings
from gstudio.url_shortener.backends.default import backend as default_backend
class URLShortenerTestCase(TestCase):
"""Test cases for gstudio.url_shortener"""
def setUp(self):
self.original_backend = us_settings.URL_SHORTENER_BACKEND
def tearDown(self):
us_settings.URL_SHORTENER_BACKEND = self.original_backend
def test_get_url_shortener(self):
us_settings.URL_SHORTENER_BACKEND = 'mymodule.myclass'
try:
with warnings.catch_warnings(record=True) as w:
self.assertEquals(get_url_shortener(), default_backend)
self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
self.assertEquals(
str(w[-1].message),
'mymodule.myclass backend cannot be imported')
except AttributeError:
# Fail under Python2.5, because of'warnings.catch_warnings'
pass
us_settings.URL_SHORTENER_BACKEND = 'gstudio.tests.custom_url_shortener'
try:
with warnings.catch_warnings(record=True) as w:
self.assertEquals(get_url_shortener(), default_backend)
self.assertTrue(issubclass(w[-1].metatype, RuntimeWarning))
self.assertEquals(
str(w[-1].message),
'This backend only exists for testing')
except AttributeError:
# Fail under Python2.5, because of'warnings.catch_warnings'
pass
us_settings.URL_SHORTENER_BACKEND = 'gstudio.url_shortener'\
'.backends.default'
self.assertEquals(get_url_shortener(), default_backend)
|
import threading
#Because eval(valenc) might require it
import datetime
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import DEncode, List
from DIRAC.Core.Base.ExecutorModule import ExecutorModule
from DIRAC.WorkloadManagementSystem.Client.JobState.CachedJobState import CachedJobState
class OptimizerExecutor( ExecutorModule ):
class JobLog:
class LogWrap:
def __init__( self, log, jid, funcName ):
self.__log = log
self.__jid = jid
self.__funcName = funcName
def __call__( self, msg, varMsg = "" ):
try:
funcObj = getattr( self.__log, self.__funcName )
except AttributeError:
raise AttributeError( "Logger does not have %s method" % self.__funcName )
msg = "\n".join( "[JID %s] %s" % ( self.__jid, line ) for line in msg.split( "\n" ) )
funcObj( msg, varMsg )
def __init__( self, log, jid ):
self.__jid = jid
self.__log = log
def __nonzero__( self ):
return True
def __getattr__( self, name ):
return self.LogWrap( self.__log, self.__jid, name )
@classmethod
def initialize( cls ):
opName = cls.ex_getProperty( 'fullName' )
opName = "/".join( opName.split( "/" )[1:] )
if opName.find( "Agent" ) == len( opName ) - 5:
opName = opName[ :-5]
cls.__optimizerName = opName
maxTasks = cls.ex_getOption( 'Tasks', 1 )
cls.__jobData = threading.local()
cls.__jobData.jobState = None
cls.__jobData.jobLog = None
cls.ex_setProperty( 'optimizerName', cls.__optimizerName )
try:
result = cls.initializeOptimizer()
if not result[ 'OK' ]:
return result
except Exception, excp:
cls.log.exception( "Error while initializing optimizer" )
return S_ERROR( "Error initializing: %s" % str( excp ) )
cls.ex_setMind( "WorkloadManagement/OptimizationMind" )
return S_OK()
@classmethod
def ex_optimizerName( cls ):
return cls.__optimizerName
def initializeOptimizer( self ):
return S_OK()
def processTask( self, jid, jobState ):
self.__jobData.jobState = jobState
self.__jobData.jobLog = self.JobLog( self.log, jid )
try:
self.jobLog.info( "Processing" )
optResult = self.optimizeJob( jid, jobState )
#If the manifest is dirty, update it!
result = jobState.getManifest()
if not result[ 'OK' ]:
return result
manifest = result[ 'Value' ]
if manifest.isDirty():
jobState.setManifest( manifest )
#Did it go as expected? If not Failed!
if not optResult[ 'OK' ]:
self.jobLog.info( "Set to Failed/%s" % optResult[ 'Message' ] )
minorStatus = "%s optimizer" % self.ex_optimizerName()
return jobState.setStatus( "Failed", minorStatus, optResult[ 'Message' ], source = self.ex_optimizerName() )
return S_OK()
finally:
self.__jobData.jobState = None
self.__jobData.jobLog = None
def optimizeJob( self, jid, jobState ):
raise Exception( "You need to overwrite this method to optimize the job!" )
def setNextOptimizer( self, jobState = None ):
if not jobState:
jobState = self.__jobData.jobState
result = jobState.getOptParameter( 'OptimizerChain' )
if not result['OK']:
return result
opChain = List.fromChar( result[ 'Value' ], "," )
opName = self.__optimizerName
try:
opIndex = opChain.index( opName )
except ValueError:
return S_ERROR( "Optimizer %s is not in the chain!" % opName )
chainLength = len( opChain )
if chainLength - 1 == opIndex:
#This is the last optimizer in the chain!
result = jobState.setStatus( self.ex_getOption( 'WaitingStatus', 'Waiting' ),
minorStatus = self.ex_getOption( 'WaitingMinorStatus', 'Pilot Agent Submission' ),
appStatus = "Unknown",
source = opName )
if not result[ 'OK' ]:
return result
result = jobState.insertIntoTQ()
if not result[ 'OK' ]:
return result
return S_OK()
#Keep optimizing!
nextOp = opChain[ opIndex + 1 ]
self.jobLog.info( "Set to Checking/%s" % nextOp )
return jobState.setStatus( "Checking", nextOp, source = opName )
def storeOptimizerParam( self, name, value ):
if not self.__jobData.jobState:
return S_ERROR( "This function can only be called inside the optimizeJob function" )
valenc = DEncode.encode( value )
return self.__jobData.jobState.setOptParameter( name, valenc )
def retrieveOptimizerParam( self, name ):
if not self.__jobData.jobState:
return S_ERROR( "This function can only be called inside the optimizeJob function" )
result = self.__jobData.jobState.getOptParameter( name )
if not result[ 'OK' ]:
return result
valenc = result[ 'Value' ]
try:
value, encLength = DEncode.decode( valenc )
if encLength == len( valenc ):
return S_OK( value )
except Exception:
self.jobLog.warn( "Opt param %s doesn't seem to be dencoded %s" % ( name, valenc ) )
return S_OK( eval( valenc ) )
@property
def jobLog( self ):
if not self.__jobData.jobLog:
raise RuntimeError( "jobLog can only be invoked inside the optimizeJob function" )
return self.__jobData.jobLog
def deserializeTask( self, taskStub ):
return CachedJobState.deserialize( taskStub )
def serializeTask( self, cjs ):
return S_OK( cjs.serialize() )
def fastTrackDispatch( self, jid, jobState ):
result = jobState.getStatus()
if not result[ 'OK' ]:
return S_ERROR( "Could not retrieve job status for %s: %s" % ( jid, result[ 'Message' ] ) )
status, minorStatus = result[ 'Value' ]
if status != "Checking":
self.log.info( "[JID %s] Not in checking state. Avoid fast track" % jid )
return S_OK()
result = jobState.getOptParameter( "OptimizerChain" )
if not result[ 'OK' ]:
return S_ERROR( "Could not retrieve OptimizerChain for job %s: %s" % ( jid, result[ 'Message' ] ) )
optChain = result[ 'Value' ]
if minorStatus not in optChain:
self.log.info( "[JID %s] End of chain for job" % jid )
return S_OK()
self.log.info( "[JID %s] Fast track possible to %s" % ( jid, minorStatus ) )
return S_OK( "WorkloadManagement/%s" % minorStatus )
|
"""
Parser for the .json translation format as used by the WebExtensions API:
https://developer.mozilla.org/en-US/Add-ons/WebExtensions/Internationalization
See also:
https://www.chromium.org/developers/design-documents/extensions/how-the-extension-system-works/i18n
"""
import codecs
import copy
import json
import logging
from collections import OrderedDict
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from pontoon.sync.exceptions import ParseError, SyncError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.utils import create_parent_directory
from pontoon.sync.vcs.models import VCSTranslation
log = logging.getLogger(__name__)
SCHEMA = {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"message": {"type": "string"},
"description": {"type": "string"},
"placeholders": {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"content": {"type": "string"},
"example": {"type": "string"},
},
"required": ["content"],
},
},
},
"required": ["message"],
},
}
class JSONEntity(VCSTranslation):
"""
Represents an entity in a JSON file.
"""
def __init__(self, order, key, data):
self.key = key
self.data = data
self.order = order
self.strings = {None: self.source_string} if self.source_string else {}
@property
def source_string(self):
return self.data["message"]
@property
def source_string_plural(self):
return ""
@property
def comments(self):
return [self.data["description"]] if "description" in self.data else []
@property
def fuzzy(self):
return False
@fuzzy.setter
def fuzzy(self, fuzzy):
pass # We don't use fuzzy in JSON
@property
def source(self):
return self.data.get("placeholders", [])
class JSONResource(ParsedResource):
def __init__(self, path, source_resource=None):
self.path = path
self.entities = {}
self.source_resource = source_resource
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
data = copy.copy(entity.data)
data["message"] = None
self.entities[key] = JSONEntity(entity.order, entity.key, data,)
try:
with codecs.open(path, "r", "utf-8") as resource:
self.json_file = json.load(resource, object_pairs_hook=OrderedDict)
validate(self.json_file, SCHEMA)
except (OSError, ValueError, ValidationError) as err:
# If the file doesn't exist or cannot be decoded,
# but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise ParseError(err)
for order, (key, data) in enumerate(self.json_file.items()):
self.entities[key] = JSONEntity(order, key, data,)
@property
def translations(self):
return sorted(self.entities.values(), key=lambda e: e.order)
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if not self.source_resource:
raise SyncError(
"Cannot save JSON resource {}: No source resource given.".format(
self.path
)
)
with codecs.open(self.source_resource.path, "r", "utf-8") as resource:
json_file = json.load(resource, object_pairs_hook=OrderedDict)
try:
validate(json_file, SCHEMA)
except ValidationError as e:
raise ParseError(e)
# Iterate over a copy, leaving original free to modify
for key, value in json_file.copy().items():
entity = self.entities[key]
if entity.strings:
json_file[key]["message"] = entity.strings[None]
else:
del json_file[key]
create_parent_directory(self.path)
with codecs.open(self.path, "w+", "utf-8") as f:
log.debug("Saving file: %s", self.path)
f.write(
json.dumps(
json_file, ensure_ascii=False, indent=2, separators=(",", ": ")
)
)
f.write("\n") # Add newline
def parse(path, source_path=None, locale=None):
if source_path is not None:
source_resource = JSONResource(source_path)
else:
source_resource = None
return JSONResource(path, source_resource)
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
from tempest.common.rest_client import NegativeRestClient
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.services.baremetal.v1.client_json import BaremetalClientJSON
from tempest.services import botoclients
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
from tempest.services.compute.json.availability_zone_client import \
AvailabilityZoneClientJSON
from tempest.services.compute.json.certificates_client import \
CertificatesClientJSON
from tempest.services.compute.json.extensions_client import \
ExtensionsClientJSON
from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON
from tempest.services.compute.json.flavors_client import FlavorsClientJSON
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClientJSON
from tempest.services.compute.json.hosts_client import HostsClientJSON
from tempest.services.compute.json.hypervisor_client import \
HypervisorClientJSON
from tempest.services.compute.json.images_client import ImagesClientJSON
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientJSON
from tempest.services.compute.json.interfaces_client import \
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
from tempest.services.compute.json.services_client import ServicesClientJSON
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
from tempest.services.compute.v3.json.aggregates_client import \
AggregatesV3ClientJSON
from tempest.services.compute.v3.json.availability_zone_client import \
AvailabilityZoneV3ClientJSON
from tempest.services.compute.v3.json.certificates_client import \
CertificatesV3ClientJSON
from tempest.services.compute.v3.json.extensions_client import \
ExtensionsV3ClientJSON
from tempest.services.compute.v3.json.flavors_client import FlavorsV3ClientJSON
from tempest.services.compute.v3.json.hosts_client import HostsV3ClientJSON
from tempest.services.compute.v3.json.hypervisor_client import \
HypervisorV3ClientJSON
from tempest.services.compute.v3.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogV3ClientJSON
from tempest.services.compute.v3.json.interfaces_client import \
InterfacesV3ClientJSON
from tempest.services.compute.v3.json.keypairs_client import \
KeyPairsV3ClientJSON
from tempest.services.compute.v3.json.quotas_client import \
QuotasV3ClientJSON
from tempest.services.compute.v3.json.servers_client import \
ServersV3ClientJSON
from tempest.services.compute.v3.json.services_client import \
ServicesV3ClientJSON
from tempest.services.compute.v3.json.tenant_usages_client import \
TenantUsagesV3ClientJSON
from tempest.services.compute.v3.json.version_client import \
VersionV3ClientJSON
from tempest.services.compute.xml.aggregates_client import AggregatesClientXML
from tempest.services.compute.xml.availability_zone_client import \
AvailabilityZoneClientXML
from tempest.services.compute.xml.certificates_client import \
CertificatesClientXML
from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
from tempest.services.compute.xml.fixed_ips_client import FixedIPsClientXML
from tempest.services.compute.xml.flavors_client import FlavorsClientXML
from tempest.services.compute.xml.floating_ips_client import \
FloatingIPsClientXML
from tempest.services.compute.xml.hosts_client import HostsClientXML
from tempest.services.compute.xml.hypervisor_client import HypervisorClientXML
from tempest.services.compute.xml.images_client import ImagesClientXML
from tempest.services.compute.xml.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientXML
from tempest.services.compute.xml.interfaces_client import \
InterfacesClientXML
from tempest.services.compute.xml.keypairs_client import KeyPairsClientXML
from tempest.services.compute.xml.limits_client import LimitsClientXML
from tempest.services.compute.xml.quotas_client import QuotasClientXML
from tempest.services.compute.xml.security_groups_client \
import SecurityGroupsClientXML
from tempest.services.compute.xml.servers_client import ServersClientXML
from tempest.services.compute.xml.services_client import ServicesClientXML
from tempest.services.compute.xml.tenant_usages_client import \
TenantUsagesClientXML
from tempest.services.compute.xml.volumes_extensions_client import \
VolumesExtensionsClientXML
from tempest.services.data_processing.v1_1.client import DataProcessingClient
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClientJSON
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.xml.credentials_client import \
CredentialsClientXML
from tempest.services.identity.v3.xml.endpoints_client import EndPointClientXML
from tempest.services.identity.v3.xml.identity_client import \
IdentityV3ClientXML
from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
from tempest.services.identity.v3.xml.service_client import \
ServiceClientXML
from tempest.services.identity.xml.identity_client import IdentityClientXML
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.account_client import \
AccountClientCustomizedHeader
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.object_storage.object_client import \
ObjectClientCustomizedHeader
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.telemetry.xml.telemetry_client import \
TelemetryClientXML
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
from tempest.services.volume.json.volumes_client import VolumesClientJSON
from tempest.services.volume.xml.admin.volume_hosts_client import \
VolumeHostsClientXML
from tempest.services.volume.xml.admin.volume_types_client import \
VolumeTypesClientXML
from tempest.services.volume.xml.extensions_client import \
ExtensionsClientXML as VolumeExtensionClientXML
from tempest.services.volume.xml.snapshots_client import SnapshotsClientXML
from tempest.services.volume.xml.volumes_client import VolumesClientXML
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(object):
"""
Top level manager for OpenStack Compute clients
"""
def __init__(self, username=None, password=None, tenant_name=None,
interface='json', service=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
standard username/password/tenant_name is used.
:param username: Override of the username
:param password: Override of the password
:param tenant_name: Override of the tenant name
"""
self.interface = interface
self.auth_version = CONF.identity.auth_version
# FIXME(andreaf) Change Manager __init__ to accept a credentials dict
if username is None or password is None:
# Tenant None is a valid use case
self.credentials = self.get_default_credentials()
else:
self.credentials = dict(username=username, password=password,
tenant_name=tenant_name)
if self.auth_version == 'v3':
self.credentials['domain_name'] = 'Default'
# Setup an auth provider
auth_provider = self.get_auth_provider(self.credentials)
if self.interface == 'xml':
self.certificates_client = CertificatesClientXML(
auth_provider)
self.servers_client = ServersClientXML(auth_provider)
self.limits_client = LimitsClientXML(auth_provider)
self.images_client = ImagesClientXML(auth_provider)
self.keypairs_client = KeyPairsClientXML(auth_provider)
self.quotas_client = QuotasClientXML(auth_provider)
self.flavors_client = FlavorsClientXML(auth_provider)
self.extensions_client = ExtensionsClientXML(auth_provider)
self.volumes_extensions_client = VolumesExtensionsClientXML(
auth_provider)
self.floating_ips_client = FloatingIPsClientXML(
auth_provider)
self.snapshots_client = SnapshotsClientXML(auth_provider)
self.volumes_client = VolumesClientXML(auth_provider)
self.volume_types_client = VolumeTypesClientXML(
auth_provider)
self.identity_client = IdentityClientXML(auth_provider)
self.identity_v3_client = IdentityV3ClientXML(
auth_provider)
self.security_groups_client = SecurityGroupsClientXML(
auth_provider)
self.interfaces_client = InterfacesClientXML(auth_provider)
self.endpoints_client = EndPointClientXML(auth_provider)
self.fixed_ips_client = FixedIPsClientXML(auth_provider)
self.availability_zone_client = AvailabilityZoneClientXML(
auth_provider)
self.service_client = ServiceClientXML(auth_provider)
self.aggregates_client = AggregatesClientXML(auth_provider)
self.services_client = ServicesClientXML(auth_provider)
self.tenant_usages_client = TenantUsagesClientXML(
auth_provider)
self.policy_client = PolicyClientXML(auth_provider)
self.hosts_client = HostsClientXML(auth_provider)
self.hypervisor_client = HypervisorClientXML(auth_provider)
self.network_client = NetworkClientXML(auth_provider)
self.credentials_client = CredentialsClientXML(
auth_provider)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientXML(auth_provider)
self.volume_hosts_client = VolumeHostsClientXML(
auth_provider)
self.volumes_extension_client = VolumeExtensionClientXML(
auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientXML(
auth_provider)
self.token_client = TokenClientXML()
self.token_v3_client = V3TokenClientXML()
elif self.interface == 'json':
self.certificates_client = CertificatesClientJSON(
auth_provider)
self.certificates_v3_client = CertificatesV3ClientJSON(
auth_provider)
self.baremetal_client = BaremetalClientJSON(auth_provider)
self.servers_client = ServersClientJSON(auth_provider)
self.servers_v3_client = ServersV3ClientJSON(auth_provider)
self.limits_client = LimitsClientJSON(auth_provider)
self.images_client = ImagesClientJSON(auth_provider)
self.keypairs_v3_client = KeyPairsV3ClientJSON(
auth_provider)
self.keypairs_client = KeyPairsClientJSON(auth_provider)
self.keypairs_v3_client = KeyPairsV3ClientJSON(
auth_provider)
self.quotas_client = QuotasClientJSON(auth_provider)
self.quotas_v3_client = QuotasV3ClientJSON(auth_provider)
self.flavors_client = FlavorsClientJSON(auth_provider)
self.flavors_v3_client = FlavorsV3ClientJSON(auth_provider)
self.extensions_v3_client = ExtensionsV3ClientJSON(
auth_provider)
self.extensions_client = ExtensionsClientJSON(
auth_provider)
self.volumes_extensions_client = VolumesExtensionsClientJSON(
auth_provider)
self.floating_ips_client = FloatingIPsClientJSON(
auth_provider)
self.snapshots_client = SnapshotsClientJSON(auth_provider)
self.volumes_client = VolumesClientJSON(auth_provider)
self.volume_types_client = VolumeTypesClientJSON(
auth_provider)
self.identity_client = IdentityClientJSON(auth_provider)
self.identity_v3_client = IdentityV3ClientJSON(
auth_provider)
self.security_groups_client = SecurityGroupsClientJSON(
auth_provider)
self.interfaces_v3_client = InterfacesV3ClientJSON(
auth_provider)
self.interfaces_client = InterfacesClientJSON(
auth_provider)
self.endpoints_client = EndPointClientJSON(auth_provider)
self.fixed_ips_client = FixedIPsClientJSON(auth_provider)
self.availability_zone_v3_client = AvailabilityZoneV3ClientJSON(
auth_provider)
self.availability_zone_client = AvailabilityZoneClientJSON(
auth_provider)
self.services_v3_client = ServicesV3ClientJSON(
auth_provider)
self.service_client = ServiceClientJSON(auth_provider)
self.aggregates_v3_client = AggregatesV3ClientJSON(
auth_provider)
self.aggregates_client = AggregatesClientJSON(
auth_provider)
self.services_client = ServicesClientJSON(auth_provider)
self.tenant_usages_v3_client = TenantUsagesV3ClientJSON(
auth_provider)
self.tenant_usages_client = TenantUsagesClientJSON(
auth_provider)
self.version_v3_client = VersionV3ClientJSON(auth_provider)
self.policy_client = PolicyClientJSON(auth_provider)
self.hosts_client = HostsClientJSON(auth_provider)
self.hypervisor_v3_client = HypervisorV3ClientJSON(
auth_provider)
self.hypervisor_client = HypervisorClientJSON(
auth_provider)
self.network_client = NetworkClientJSON(auth_provider)
self.credentials_client = CredentialsClientJSON(
auth_provider)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientJSON(auth_provider)
self.instance_usages_audit_log_v3_client = \
InstanceUsagesAuditLogV3ClientJSON(auth_provider)
self.volume_hosts_client = VolumeHostsClientJSON(
auth_provider)
self.volumes_extension_client = VolumeExtensionClientJSON(
auth_provider)
self.hosts_v3_client = HostsV3ClientJSON(auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
auth_provider)
self.token_client = TokenClientJSON()
self.token_v3_client = V3TokenClientJSON()
self.negative_client = NegativeRestClient(auth_provider)
self.negative_client.service = service
else:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
# TODO(andreaf) EC2 client still do their auth, v2 only
ec2_client_args = (self.credentials.get('username'),
self.credentials.get('password'),
CONF.identity.uri,
self.credentials.get('tenant_name'))
# common clients
self.account_client = AccountClient(auth_provider)
if CONF.service_available.glance:
self.image_client = ImageClientJSON(auth_provider)
self.image_client_v2 = ImageClientV2JSON(auth_provider)
self.container_client = ContainerClient(auth_provider)
self.object_client = ObjectClient(auth_provider)
self.orchestration_client = OrchestrationClient(
auth_provider)
self.ec2api_client = botoclients.APIClientEC2(*ec2_client_args)
self.s3_client = botoclients.ObjectClientS3(*ec2_client_args)
self.custom_object_client = ObjectClientCustomizedHeader(
auth_provider)
self.custom_account_client = \
AccountClientCustomizedHeader(auth_provider)
self.data_processing_client = DataProcessingClient(
auth_provider)
@classmethod
def get_auth_provider_class(cls, auth_version):
if auth_version == 'v2':
return auth.KeystoneV2AuthProvider
else:
return auth.KeystoneV3AuthProvider
def get_default_credentials(self):
return dict(
username=CONF.identity.username,
password=CONF.identity.password,
tenant_name=CONF.identity.tenant_name
)
def get_auth_provider(self, credentials=None):
auth_params = dict(client_type='tempest',
interface=self.interface)
auth_provider_class = self.get_auth_provider_class(self.auth_version)
# If invalid / incomplete credentials are provided, use default ones
if credentials is None or \
not auth_provider_class.check_credentials(credentials):
credentials = self.credentials
auth_params['credentials'] = credentials
return auth_provider_class(**auth_params)
class AltManager(Manager):
"""
Manager object that uses the alt_XXX credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
super(AltManager, self).__init__(CONF.identity.alt_username,
CONF.identity.alt_password,
CONF.identity.alt_tenant_name,
interface=interface,
service=service)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
super(AdminManager, self).__init__(CONF.identity.admin_username,
CONF.identity.admin_password,
CONF.identity.admin_tenant_name,
interface=interface,
service=service)
class ComputeAdminManager(Manager):
"""
Manager object that uses the compute_admin credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
base = super(ComputeAdminManager, self)
base.__init__(CONF.compute_admin.username,
CONF.compute_admin.password,
CONF.compute_admin.tenant_name,
interface=interface,
service=service)
class OrchestrationManager(Manager):
"""
Manager object that uses the admin credentials for its
so that heat templates can create users
"""
def __init__(self, interface='json', service=None):
base = super(OrchestrationManager, self)
base.__init__(CONF.identity.admin_username,
CONF.identity.admin_password,
CONF.identity.tenant_name,
interface=interface,
service=service)
|
import os, shutil
import cairo
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GdkPixbuf
from lib import graphics
def install_font(font_filename):
fonts_dir = os.path.join(os.environ['HOME'], '.fonts')
if not os.path.exists(fonts_dir):
os.makedirs(fonts_dir)
font_path = os.path.join(fonts_dir, font_filename)
if not os.path.exists(font_path):
shutil.copyfile(os.path.join("assets", font_filename), font_path)
class override(object):
"""decorator that replaces do_render with the declared function and
stores the original _do_render in case we might want it bit later"""
def __init__(self, target_class):
self.target_class = target_class
def __call__(self, fn):
name = fn.__name__
# backup original
setattr(self.target_class, "_original_%s" % name, getattr(self.target_class, name))
# replace with the new one
setattr(self.target_class, name, fn)
images = {}
def get_image(path, left = None, right = None, top = None, bottom = None):
"""returns image sliced up in margins as specified by left, right, top, bottom.
The result is a Slice9 object below that has .render function for simplified
rendering"""
image = images.get((path, left, right, top, bottom))
if not image:
if any((left is not None, right is not None, top is not None, bottom is not None)):
image = Slice9(path, left, right, top, bottom)
else:
image = Image(path)
return image
# TODO - figure if perhaps this belongs to graphics
def vertical_gradient(sprite, start_color, end_color, start_y, end_y):
linear = cairo.LinearGradient(0, start_y, 0, end_y)
linear.add_color_stop_rgb(0, *graphics.Colors.parse(start_color))
linear.add_color_stop_rgb(1, *graphics.Colors.parse(end_color))
sprite.graphics.set_source(linear)
class Image(object):
def __init__(self, image):
if image is None:
return
elif isinstance(image, basestring):
# in case of string we think it's a path - try opening it!
if os.path.exists(image) == False:
return
if os.path.splitext(image)[1].lower() == ".png":
image = cairo.ImageSurface.create_from_png(image)
else:
image = gdk.pixbuf_new_from_file(image)
self.image_data, self.width, self.height = image, image.get_width(), image.get_height()
def render(self, graphics, width = None, height = None, x_offset = 0, y_offset = 0):
graphics.save_context( )
graphics.translate( x_offset, y_offset )
graphics.rectangle( 0, 0, width or self.width, height or self.height)
graphics.clip()
graphics.set_source_surface(self.image_data)
graphics.paint()
graphics.restore_context()
class Slice9(object):
def __init__(self, image, left=0, right=0, top=0, bottom=0,
stretch_w = True, stretch_h = True):
if isinstance(image, basestring):
image = get_image(image)
else:
image = Image(image)
self.width, self.height = image.width, image.height
self.left, self.right = left, right
self.top, self.bottom = top, bottom
self.slices = []
def get_slice(x, y, w, h):
# we are grabbing bigger area and when painting will crop out to
# just the actual needed pixels. This is done because otherwise when
# stretching border, it uses white pixels to blend in
x, y = x - 1, y - 1
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, w+2, h+2)
ctx = cairo.Context(img)
if isinstance(image.image_data, GdkPixbuf.Pixbuf):
ctx.set_source_pixbuf(image.image_data, -x, -y)
else:
ctx.set_source_surface(image.image_data, -x, -y)
ctx.rectangle(0, 0, w+2, h+2)
ctx.clip()
ctx.paint()
return img, w, h
# run left-right, top-down and slice image into 9 pieces
exes = (0, left, image.width - right, image.width)
ys = (0, top, image.height - bottom, image.height)
for y1, y2 in zip(ys, ys[1:]):
for x1, x2 in zip(exes, exes[1:]):
self.slices.append(get_slice(x1, y1, x2 - x1, y2 - y1))
self.stretch_w, self.stretch_h = stretch_w, stretch_h
self.stretch_filter_mode = cairo.FILTER_BEST
def render(self, graphics, width, height, x_offset=0, y_offset=0):
"""renders the image in the given graphics context with the told width
and height"""
def put_pattern(image, x, y, w, h):
if w <= 0 or h <= 0:
return
graphics.save_context()
if not self.stretch_w or not self.stretch_h:
# if we repeat then we have to cut off the top-left margin
# that we put in there so that stretching does not borrow white
# pixels
img = cairo.ImageSurface(cairo.FORMAT_ARGB32, image[1], image[2])
ctx = cairo.Context(img)
ctx.set_source_surface(image[0],
0 if self.stretch_w else -1,
0 if self.stretch_h else -1)
ctx.rectangle(0, 0, image[1], image[2])
ctx.clip()
ctx.paint()
else:
img = image[0]
pattern = cairo.SurfacePattern(img)
pattern.set_extend(cairo.EXTEND_REPEAT)
pattern.set_matrix(cairo.Matrix(x0 = 1 if self.stretch_w else 0,
y0 = 1 if self.stretch_h else 0,
xx = (image[1]) / float(w) if self.stretch_w else 1,
yy = (image[2]) / float(h) if self.stretch_h else 1))
pattern.set_filter(self.stretch_filter_mode)
# truncating as fill on half pixel will lead to nasty gaps
graphics.translate(int(x + x_offset), int(y + y_offset))
graphics.set_source(pattern)
graphics.rectangle(0, 0, int(w), int(h))
graphics.clip()
graphics.paint()
graphics.restore_context()
graphics.save_context()
left, right, = self.left, self.right
top, bottom = self.top, self.bottom
# top-left
put_pattern(self.slices[0], 0, 0, left, top)
# top center - repeat width
put_pattern(self.slices[1], left, 0, width - left - right, top)
# top-right
put_pattern(self.slices[2], width - right, 0, right, top)
# left - repeat height
put_pattern(self.slices[3], 0, top, left, height - top - bottom)
# center - repeat width and height
put_pattern(self.slices[4], left, top, width - left - right, height - top - bottom)
# right - repeat height
put_pattern(self.slices[5], width - right, top, right, height - top - bottom)
# bottom-left
put_pattern(self.slices[6], 0, height - bottom, left, bottom)
# bottom center - repeat width
put_pattern(self.slices[7], left, height - bottom, width - left - right, bottom)
# bottom-right
put_pattern(self.slices[8], width - right, height - bottom, right, bottom)
graphics.rectangle(x_offset, y_offset, width, height)
graphics.new_path()
graphics.restore_context()
class SpriteSheetImage(graphics.Sprite):
def __init__(self, sheet, offset_x, offset_y, width, height, **kwargs):
graphics.Sprite.__init__(self, **kwargs)
#: Image or BitmapSprite object that has the graphics on it
self.sheet = sheet
self.offset_x = offset_x
self.offset_y = offset_y
self.width = width
self.height = height
def _draw(self, context, opacity = 1, parent_matrix = None):
if not getattr(self.sheet, "cache_surface", None):
# create cache surface similar to context and paint the image if not there
# the cache surface is/was essential to performance
# this somewhat upside down as ideally one might want to have a "cache surface instruction"
surface = context.get_target().create_similar(self.sheet.cache_mode,
self.sheet.width,
self.sheet.height)
local_context = cairo.Context(surface)
if isinstance(self.sheet.image_data, GdkPixbuf.Pixbuf):
local_context.set_source_pixbuf(self.sheet.image_data, 0, 0)
else:
local_context.set_source_surface(self.sheet.image_data)
local_context.paint()
self.sheet.cache_surface = surface
# add instructions with the resulting surface
if self._sprite_dirty:
self.graphics.save_context()
self.graphics.set_source_surface(self.sheet.cache_surface, -self.offset_x, -self.offset_y)
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.clip()
self.graphics.paint()
self.graphics.restore_context()
graphics.Sprite._draw(self, context, opacity, parent_matrix)
|
"""
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def is_zipfile(filename):
"""Quickly see if file is a ZIP file by checking the magic number."""
try:
fpin = open(filename, "rb")
endrec = _EndRecData(fpin)
fpin.close()
if endrec:
return True # file has correct magic number
except IOError:
pass
return False
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
fpin.seek(offset - sizeEndCentDir64Locator, 2)
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
if endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile:
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
def __init__(self, fileobj, zipinfo, decrypt=None):
self.fileobj = fileobj
self.decrypter = decrypt
self.bytes_read = 0L
self.rawbuffer = ''
self.readbuffer = ''
self.linebuffer = ''
self.eof = False
self.univ_newlines = False
self.nlSeps = ("\n", )
self.lastdiscard = ''
self.compress_type = zipinfo.compress_type
self.compress_size = zipinfo.compress_size
self.closed = False
self.mode = "r"
self.name = zipinfo.filename
# read from compressed files in 64k blocks
self.compreadsize = 64*1024
if self.compress_type == ZIP_DEFLATED:
self.dc = zlib.decompressobj(-15)
def set_univ_newlines(self, univ_newlines):
self.univ_newlines = univ_newlines
# pick line separator char(s) based on universal newlines flag
self.nlSeps = ("\n", )
if self.univ_newlines:
self.nlSeps = ("\r\n", "\r", "\n")
def __iter__(self):
return self
def next(self):
nextline = self.readline()
if not nextline:
raise StopIteration()
return nextline
def close(self):
self.closed = True
def _checkfornewline(self):
nl, nllen = -1, -1
if self.linebuffer:
# ugly check for cases where half of an \r\n pair was
# read on the last pass, and the \r was discarded. In this
# case we just throw away the \n at the start of the buffer.
if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'):
self.linebuffer = self.linebuffer[1:]
for sep in self.nlSeps:
nl = self.linebuffer.find(sep)
if nl >= 0:
nllen = len(sep)
return nl, nllen
return nl, nllen
def readline(self, size = -1):
"""Read a line with approx. size. If size is negative,
read a whole line.
"""
if size < 0:
size = sys.maxint
elif size == 0:
return ''
# check for a newline already in buffer
nl, nllen = self._checkfornewline()
if nl >= 0:
# the next line was already in the buffer
nl = min(nl, size)
else:
# no line break in buffer - try to read more
size -= len(self.linebuffer)
while nl < 0 and size > 0:
buf = self.read(min(size, 100))
if not buf:
break
self.linebuffer += buf
size -= len(buf)
# check for a newline in buffer
nl, nllen = self._checkfornewline()
# we either ran out of bytes in the file, or
# met the specified size limit without finding a newline,
# so return current buffer
if nl < 0:
s = self.linebuffer
self.linebuffer = ''
return s
buf = self.linebuffer[:nl]
self.lastdiscard = self.linebuffer[nl:nl + nllen]
self.linebuffer = self.linebuffer[nl + nllen:]
# line is always returned with \n as newline char (except possibly
# for a final incomplete line in the file, which is handled above).
return buf + "\n"
def readlines(self, sizehint = -1):
"""Return a list with all (following) lines. The sizehint parameter
is ignored in this implementation.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def read(self, size = None):
# act like file() obj and return empty string if size is 0
if size == 0:
return ''
# determine read size
bytesToRead = self.compress_size - self.bytes_read
# adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information
if self.decrypter is not None:
bytesToRead -= 12
if size is not None and size >= 0:
if self.compress_type == ZIP_STORED:
lr = len(self.readbuffer)
bytesToRead = min(bytesToRead, size - lr)
elif self.compress_type == ZIP_DEFLATED:
if len(self.readbuffer) > size:
# the user has requested fewer bytes than we've already
# pulled through the decompressor; don't read any more
bytesToRead = 0
else:
# user will use up the buffer, so read some more
lr = len(self.rawbuffer)
bytesToRead = min(bytesToRead, self.compreadsize - lr)
# avoid reading past end of file contents
if bytesToRead + self.bytes_read > self.compress_size:
bytesToRead = self.compress_size - self.bytes_read
# try to read from file (if necessary)
if bytesToRead > 0:
bytes = self.fileobj.read(bytesToRead)
self.bytes_read += len(bytes)
self.rawbuffer += bytes
# handle contents of raw buffer
if self.rawbuffer:
newdata = self.rawbuffer
self.rawbuffer = ''
# decrypt new data if we were given an object to handle that
if newdata and self.decrypter is not None:
newdata = ''.join(map(self.decrypter, newdata))
# decompress newly read data if necessary
if newdata and self.compress_type == ZIP_DEFLATED:
newdata = self.dc.decompress(newdata)
self.rawbuffer = self.dc.unconsumed_tail
if self.eof and len(self.rawbuffer) == 0:
# we're out of raw bytes (both from the file and
# the local buffer); flush just to make sure the
# decompressor is done
newdata += self.dc.flush()
# prevent decompressor from being used again
self.dc = None
self.readbuffer += newdata
# return what the user asked for
if size is None or len(self.readbuffer) <= size:
bytes = self.readbuffer
self.readbuffer = ''
else:
bytes = self.readbuffer[:size]
self.readbuffer = self.readbuffer[size:]
return bytes
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
pass
elif key == 'a':
try: # See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile: # file is not a zip file, just append
self.fp.seek(0, 2)
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
endrec = _EndRecData(fp)
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
# build and return a ZipExtFile
if zd is None:
zef = ZipExtFile(zef_file, zinfo)
else:
zef = ZipExtFile(zef_file, zinfo, zd)
# set universal newlines on ZipExtFile if necessary
if "U" in mode:
zef.set_univ_newlines(True)
return zef
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
fp = open(filename, "rb")
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
fp.close()
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.testzip()
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
|
from __future__ import print_function, division
from random import randrange, choice
from math import log
from sympy.core import Basic
from sympy.core.compatibility import range
from sympy.combinatorics import Permutation
from sympy.combinatorics.permutations import (_af_commutes_with, _af_invert,
_af_rmul, _af_rmuln, _af_pow, Cycle)
from sympy.combinatorics.util import (_check_cycles_alt_sym,
_distribute_gens_by_base, _orbits_transversals_from_bsgs,
_handle_precomputed_bsgs, _base_ordering, _strong_gens_from_distr,
_strip, _strip_af)
from sympy.functions.combinatorial.factorials import factorial
from sympy.ntheory import sieve
from sympy.utilities.iterables import has_variety, is_sequence, uniq
from sympy.utilities.randtest import _randrange
rmul = Permutation.rmul_with_af
_af_new = Permutation._af_new
class PermutationGroup(Basic):
"""The class defining a Permutation group.
PermutationGroup([p1, p2, ..., pn]) returns the permutation group
generated by the list of permutations. This group can be supplied
to Polyhedron if one desires to decorate the elements to which the
indices of the permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.permutations import Cycle
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> from sympy.combinatorics.perm_groups import PermutationGroup
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
[2] Seress, A.
"Permutation Group Algorithms"
[3] http://en.wikipedia.org/wiki/Schreier_vector
[4] http://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] Frank Celler, Charles R.Leedham-Green, Scott H.Murray,
Alice C.Niemeyer, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
[6] http://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] http://www.algorithmist.com/index.php/Union_Find
[8] http://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] http://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] http://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] http://groupprops.subwiki.org/wiki/Derived_subgroup
[12] http://en.wikipedia.org/wiki/Nilpotent_group
[13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
"""
def __new__(cls, *args, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is False.
"""
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
raise ValueError('must supply one or more permutations '
'to define the group')
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if kwargs.pop('dups', True):
args = list(uniq([_af_new(list(a)) for a in args]))
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def __getitem__(self, i):
return self._generators[i]
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return True if self and other have the same generators.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
return set(self.generators) == set(other.generators)
def __hash__(self):
return super(PermutationGroup, self).__hash__()
def __mul__(self, other):
"""Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1 + n2
points.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
(9)(0 1 2 3 4),
(5 6 7 8 9)])
>>> H.order()
25
"""
gens1 = [perm._array_form for perm in self.generators]
gens2 = [perm._array_form for perm in other.generators]
n1 = self._degree
n2 = other._degree
start = list(range(n1))
end = list(range(n1, n1 + n2))
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group ``G`` with a set of generators
``S``. For the initialization ``_random_pr_init``, a list ``R`` of
``\max\{r, |S|\}`` group generators is created as the attribute
``G._random_gens``, repeating elements of ``S`` if necessary, and the
identity element of ``G`` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of ``G`` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from ``\{gh, g(~h), hg, (~h)g\}``. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across ``G`` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = [x._array_form for x in self.generators]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = list(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n is None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec=_random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] Holt, D., Eick, B., O'Brien, E.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
For a permutation group ``G``, a base is a sequence of points
``B = (b_1, b_2, ..., b_k)`` such that no element of ``G`` apart
from the identity fixes all the points in ``B``. The concepts of
a base and strong generating set and their applications are
discussed in depth in [1], pp. 87-89 and [2], pp. 55-57.
An alternative way to think of ``B`` is that it gives the
indices of the stabilizer cosets that contain more than the
identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> G = PermutationGroup([Permutation(0, 1, 3)(2, 4)])
>>> G.base
[0, 2]
See Also
========
strong_gens, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._base == []:
self.schreier_sims()
return self._base
def baseswap(self, base, strong_gens, pos, randomized=False,
transversals=None, basic_orbits=None, strong_gens_distr=None):
r"""Swap two consecutive base points in base and strong generating set.
If a base for a group ``G`` is given by ``(b_1, b_2, ..., b_k)``, this
function returns a base ``(b_1, b_2, ..., b_{i+1}, b_i, ..., b_k)``,
where ``i`` is given by ``pos``, and a strong generating set relative
to that base. The original base and strong generating set are not
modified.
The randomized version (default) is of Las Vegas type.
Parameters
==========
base, strong_gens
The base and strong generating set.
pos
The position at which swapping is performed.
randomized
A switch between randomized and deterministic version.
transversals
The transversals for the basic orbits, if known.
basic_orbits
The basic orbits, if known.
strong_gens_distr
The strong generators distributed by basic stabilizers, if known.
Returns
=======
(base, strong_gens)
``base`` is the new base, and ``strong_gens`` is a generating set
relative to it.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> S = SymmetricGroup(4)
>>> S.schreier_sims()
>>> S.base
[0, 1, 2]
>>> base, gens = S.baseswap(S.base, S.strong_gens, 1, randomized=False)
>>> base, gens
([0, 2, 1],
[(0 1 2 3), (3)(0 1), (1 3 2),
(2 3), (1 3)])
check that base, gens is a BSGS
>>> S1 = PermutationGroup(gens)
>>> _verify_bsgs(S1, base, gens)
True
See Also
========
schreier_sims
Notes
=====
The deterministic version of the algorithm is discussed in
[1], pp. 102-103; the randomized version is discussed in [1], p.103, and
[2], p.98. It is of Las Vegas type.
Notice that [1] contains a mistake in the pseudocode and
discussion of BASESWAP: on line 3 of the pseudocode,
``|\beta_{i+1}^{\left\langle T\right\rangle}|`` should be replaced by
``|\beta_{i}^{\left\langle T\right\rangle}|``, and the same for the
discussion of the algorithm.
"""
# construct the basic orbits, generators for the stabilizer chain
# and transversal elements from whatever was provided
transversals, basic_orbits, strong_gens_distr = \
_handle_precomputed_bsgs(base, strong_gens, transversals,
basic_orbits, strong_gens_distr)
base_len = len(base)
degree = self.degree
# size of orbit of base[pos] under the stabilizer we seek to insert
# in the stabilizer chain at position pos + 1
size = len(basic_orbits[pos])*len(basic_orbits[pos + 1]) \
//len(_orbit(degree, strong_gens_distr[pos], base[pos + 1]))
# initialize the wanted stabilizer by a subgroup
if pos + 2 > base_len - 1:
T = []
else:
T = strong_gens_distr[pos + 2][:]
# randomized version
if randomized is True:
stab_pos = PermutationGroup(strong_gens_distr[pos])
schreier_vector = stab_pos.schreier_vector(base[pos + 1])
# add random elements of the stabilizer until they generate it
while len(_orbit(degree, T, base[pos])) != size:
new = stab_pos.random_stab(base[pos + 1],
schreier_vector=schreier_vector)
T.append(new)
# deterministic version
else:
Gamma = set(basic_orbits[pos])
Gamma.remove(base[pos])
if base[pos + 1] in Gamma:
Gamma.remove(base[pos + 1])
# add elements of the stabilizer until they generate it by
# ruling out member of the basic orbit of base[pos] along the way
while len(_orbit(degree, T, base[pos])) != size:
gamma = next(iter(Gamma))
x = transversals[pos][gamma]
temp = x._array_form.index(base[pos + 1]) # (~x)(base[pos + 1])
if temp not in basic_orbits[pos + 1]:
Gamma = Gamma - _orbit(degree, T, gamma)
else:
y = transversals[pos + 1][temp]
el = rmul(x, y)
if el(base[pos]) not in _orbit(degree, T, base[pos]):
T.append(el)
Gamma = Gamma - _orbit(degree, T, base[pos])
# build the new base and strong generating set
strong_gens_new_distr = strong_gens_distr[:]
strong_gens_new_distr[pos + 1] = T
base_new = base[:]
base_new[pos], base_new[pos + 1] = base_new[pos + 1], base_new[pos]
strong_gens_new = _strong_gens_from_distr(strong_gens_new_distr)
for gen in T:
if gen not in strong_gens_new:
strong_gens_new.append(gen)
return base_new, strong_gens_new
@property
def basic_orbits(self):
"""
Return the basic orbits relative to a base and strong generating set.
If ``(b_1, b_2, ..., b_k)`` is a base for a group ``G``, and
``G^{(i)} = G_{b_1, b_2, ..., b_{i-1}}`` is the ``i``-th basic stabilizer
(so that ``G^{(1)} = G``), the ``i``-th basic orbit relative to this base
is the orbit of ``b_i`` under ``G^{(i)}``. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(4)
>>> S.basic_orbits
[[0, 1, 2, 3], [1, 2, 3], [2, 3]]
See Also
========
base, strong_gens, basic_transversals, basic_stabilizers
"""
if self._basic_orbits == []:
self.schreier_sims()
return self._basic_orbits
@property
def basic_stabilizers(self):
"""
Return a chain of stabilizers relative to a base and strong generating
set.
The ``i``-th basic stabilizer ``G^{(i)}`` relative to a base
``(b_1, b_2, ..., b_k)`` is ``G_{b_1, b_2, ..., b_{i-1}}``. For more
information, see [1], pp. 87-89.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> A.base
[0, 1]
>>> for g in A.basic_stabilizers:
... print(g)
...
PermutationGroup([
(3)(0 1 2),
(1 2 3)])
PermutationGroup([
(1 2 3)])
See Also
========
base, strong_gens, basic_orbits, basic_transversals
"""
if self._transversals == []:
self.schreier_sims()
strong_gens = self._strong_gens
base = self._base
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_stabilizers = []
for gens in strong_gens_distr:
basic_stabilizers.append(PermutationGroup(gens))
return basic_stabilizers
@property
def basic_transversals(self):
"""
Return basic transversals relative to a base and strong generating set.
The basic transversals are transversals of the basic orbits. They
are provided as a list of dictionaries, each dictionary having
keys - the elements of one of the basic orbits, and values - the
corresponding transversal elements. See [1], pp. 87-89 for more
information.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> A = AlternatingGroup(4)
>>> A.basic_transversals
[{0: (3), 1: (3)(0 1 2), 2: (3)(0 2 1), 3: (0 3 1)}, {1: (3), 2: (1 2 3), 3: (1 3 2)}]
See Also
========
strong_gens, base, basic_orbits, basic_stabilizers
"""
if self._transversals == []:
self.schreier_sims()
return self._transversals
def center(self):
r"""
Return the center of a permutation group.
The center for a group ``G`` is defined as
``Z(G) = \{z\in G | \forall g\in G, zg = gz \}``,
the set of elements of ``G`` that commute with all elements of ``G``.
It is equal to the centralizer of ``G`` inside ``G``, and is naturally a
subgroup of ``G`` ([9]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> G = D.center()
>>> G.order()
2
See Also
========
centralizer
Notes
=====
This is a naive implementation that is a straightforward application
of ``.centralizer()``
"""
return self.centralizer(self)
def centralizer(self, other):
r"""
Return the centralizer of a group/set/element.
The centralizer of a set of permutations ``S`` inside
a group ``G`` is the set of elements of ``G`` that commute with all
elements of ``S``::
``C_G(S) = \{ g \in G | gs = sg \forall s \in S\}`` ([10])
Usually, ``S`` is a subset of ``G``, but if ``G`` is a proper subgroup of
the full symmetric group, we allow for ``S`` to have elements outside
``G``.
It is naturally a subgroup of ``G``; the centralizer of a permutation
group is equal to the centralizer of any set of generators for that
group, since any element commuting with the generators commutes with
any product of the generators.
Parameters
==========
other
a permutation group/list of permutations/single permutation
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> S = SymmetricGroup(6)
>>> C = CyclicGroup(6)
>>> H = S.centralizer(C)
>>> H.is_subgroup(C)
True
See Also
========
subgroup_search
Notes
=====
The implementation is an application of ``.subgroup_search()`` with
tests using a specific base for the group ``G``.
"""
if hasattr(other, 'generators'):
if other.is_trivial or self.is_trivial:
return self
degree = self.degree
identity = _af_new(list(range(degree)))
orbits = other.orbits()
num_orbits = len(orbits)
orbits.sort(key=lambda x: -len(x))
long_base = []
orbit_reps = [None]*num_orbits
orbit_reps_indices = [None]*num_orbits
orbit_descr = [None]*degree
for i in range(num_orbits):
orbit = list(orbits[i])
orbit_reps[i] = orbit[0]
orbit_reps_indices[i] = len(long_base)
for point in orbit:
orbit_descr[point] = i
long_base = long_base + orbit
base, strong_gens = self.schreier_sims_incremental(base=long_base)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
i = 0
for i in range(len(base)):
if strong_gens_distr[i] == [identity]:
break
base = base[:i]
base_len = i
for j in range(num_orbits):
if base[base_len - 1] in orbits[j]:
break
rel_orbits = orbits[: j + 1]
num_rel_orbits = len(rel_orbits)
transversals = [None]*num_rel_orbits
for j in range(num_rel_orbits):
rep = orbit_reps[j]
transversals[j] = dict(
other.orbit_transversal(rep, pairs=True))
trivial_test = lambda x: True
tests = [None]*base_len
for l in range(base_len):
if base[l] in orbit_reps:
tests[l] = trivial_test
else:
def test(computed_words, l=l):
g = computed_words[l]
rep_orb_index = orbit_descr[base[l]]
rep = orbit_reps[rep_orb_index]
im = g._array_form[base[l]]
im_rep = g._array_form[rep]
tr_el = transversals[rep_orb_index][base[l]]
# using the definition of transversal,
# base[l]^g = rep^(tr_el*g);
# if g belongs to the centralizer, then
# base[l]^g = (rep^g)^tr_el
return im == tr_el._array_form[im_rep]
tests[l] = test
def prop(g):
return [rmul(g, gen) for gen in other.generators] == \
[rmul(gen, g) for gen in other.generators]
return self.subgroup_search(prop, base=base,
strong_gens=strong_gens, tests=tests)
elif hasattr(other, '__getitem__'):
gens = list(other)
return self.centralizer(PermutationGroup(gens))
elif hasattr(other, 'array_form'):
return self.centralizer(PermutationGroup([other]))
def commutator(self, G, H):
"""
Return the commutator of two subgroups.
For a permutation group ``K`` and subgroups ``G``, ``H``, the
commutator of ``G`` and ``H`` is defined as the group generated
by all the commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g`` in ``G`` and
``h`` in ``H``. It is naturally a subgroup of ``K`` ([1], p.27).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> G = S.commutator(S, A)
>>> G.is_subgroup(A)
True
See Also
========
derived_subgroup
Notes
=====
The commutator of two subgroups ``H, G`` is equal to the normal closure
of the commutators of all the generators, i.e. ``hgh^{-1}g^{-1}`` for ``h``
a generator of ``H`` and ``g`` a generator of ``G`` ([1], p.28)
"""
ggens = G.generators
hgens = H.generators
commutators = []
for ggen in ggens:
for hgen in hgens:
commutator = rmul(hgen, ggen, ~hgen, ~ggen)
if commutator not in commutators:
commutators.append(commutator)
res = self.normal_closure(commutators)
return res
def coset_factor(self, g, factor_index=False):
"""Return ``G``'s (self's) coset factorization of ``g``
If ``g`` is an element of ``G`` then it can be written as the product
of permutations drawn from the Schreier-Sims coset decomposition,
The permutations returned in ``f`` are those for which
the product gives ``g``: ``g = f[n]*...f[1]*f[0]`` where ``n = len(B)``
and ``B = G.base``. f[i] is one of the permutations in
``self._basic_orbits[i]``.
If factor_index==True,
returns a tuple ``[b[0],..,b[n]]``, where ``b[i]``
belongs to ``self._basic_orbits[i]``
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> Permutation.print_cyclic = True
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
Define g:
>>> g = Permutation(7)(1, 2, 4)(3, 6, 5)
Confirm that it is an element of G:
>>> G.contains(g)
True
Thus, it can be written as a product of factors (up to
3) drawn from u. See below that a factor from u1 and u2
and the Identity permutation have been used:
>>> f = G.coset_factor(g)
>>> f[2]*f[1]*f[0] == g
True
>>> f1 = G.coset_factor(g, True); f1
[0, 4, 4]
>>> tr = G.basic_transversals
>>> f[0] == tr[0][f1[0]]
True
If g is not an element of G then [] is returned:
>>> c = Permutation(5, 6, 7)
>>> G.coset_factor(c)
[]
see util._strip
"""
if isinstance(g, (Cycle, Permutation)):
g = g.list()
if len(g) != self._degree:
# this could either adjust the size or return [] immediately
# but we don't choose between the two and just signal a possible
# error
raise ValueError('g should be the same size as permutations of G')
I = list(range(self._degree))
basic_orbits = self.basic_orbits
transversals = self._transversals
factors = []
base = self.base
h = g
for i in range(len(base)):
beta = h[base[i]]
if beta == base[i]:
factors.append(beta)
continue
if beta not in basic_orbits[i]:
return []
u = transversals[i][beta]._array_form
h = _af_rmul(_af_invert(u), h)
factors.append(beta)
if h != I:
return []
if factor_index:
return factors
tr = self.basic_transversals
factors = [tr[i][factors[i]] for i in range(len(base))]
return factors
def coset_rank(self, g):
"""rank using Schreier-Sims representation
The coset rank of ``g`` is the ordering number in which
it appears in the lexicographic listing according to the
coset decomposition
The ordering is the same as in G.generate(method='coset').
If ``g`` does not belong to the group it returns None.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(0, 1, 3, 7, 6, 4)(2, 5)
>>> b = Permutation(0, 1, 3, 2)(4, 5, 7, 6)
>>> G = PermutationGroup([a, b])
>>> c = Permutation(7)(2, 4)(3, 5)
>>> G.coset_rank(c)
16
>>> G.coset_unrank(16)
(7)(2 4)(3 5)
See Also
========
coset_factor
"""
factors = self.coset_factor(g, True)
if not factors:
return None
rank = 0
b = 1
transversals = self._transversals
base = self._base
basic_orbits = self._basic_orbits
for i in range(len(base)):
k = factors[i]
j = basic_orbits[i].index(k)
rank += b*j
b = b*len(transversals[i])
return rank
def coset_unrank(self, rank, af=False):
"""unrank using Schreier-Sims representation
coset_unrank is the inverse operation of coset_rank
if 0 <= rank < order; otherwise it returns None.
"""
if rank < 0 or rank >= self.order():
return None
base = self._base
transversals = self._transversals
basic_orbits = self._basic_orbits
m = len(base)
v = [0]*m
for i in range(m):
rank, c = divmod(rank, len(transversals[i]))
v[i] = basic_orbits[i][c]
a = [transversals[i][v[i]]._array_form for i in range(m)]
h = _af_rmuln(*a)
if af:
return h
else:
return _af_new(h)
@property
def degree(self):
"""Returns the size of the permutations in the group.
The number of permutations comprising the group is given by
len(group); the number of permutations that can be generated
by the group is given by group.order().
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
See Also
========
order
"""
return self._degree
def derived_series(self):
r"""Return the derived series for the group.
The derived series for a group ``G`` is defined as
``G = G_0 > G_1 > G_2 > \ldots`` where ``G_i = [G_{i-1}, G_{i-1}]``,
i.e. ``G_i`` is the derived subgroup of ``G_{i-1}``, for
``i\in\mathbb{N}``. When we have ``G_k = G_{k-1}`` for some
``k\in\mathbb{N}``, the series terminates.
Returns
=======
A list of permutation groups containing the members of the derived
series in the order ``G = G_0, G_1, G_2, \ldots``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup, DihedralGroup)
>>> A = AlternatingGroup(5)
>>> len(A.derived_series())
1
>>> S = SymmetricGroup(4)
>>> len(S.derived_series())
4
>>> S.derived_series()[1].is_subgroup(AlternatingGroup(4))
True
>>> S.derived_series()[2].is_subgroup(DihedralGroup(2))
True
See Also
========
derived_subgroup
"""
res = [self]
current = self
next = self.derived_subgroup()
while not current.is_subgroup(next):
res.append(next)
current = next
next = next.derived_subgroup()
return res
def derived_subgroup(self):
"""Compute the derived subgroup.
The derived subgroup, or commutator subgroup is the subgroup generated
by all commutators ``[g, h] = hgh^{-1}g^{-1}`` for ``g, h\in G`` ; it is
equal to the normal closure of the set of commutators of the generators
([1], p.28, [11]).
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2, 4, 3])
>>> b = Permutation([0, 1, 3, 2, 4])
>>> G = PermutationGroup([a, b])
>>> C = G.derived_subgroup()
>>> list(C.generate(af=True))
[[0, 1, 2, 3, 4], [0, 1, 3, 4, 2], [0, 1, 4, 2, 3]]
See Also
========
derived_series
"""
r = self._r
gens = [p._array_form for p in self.generators]
gens_inv = [_af_invert(p) for p in gens]
set_commutators = set()
degree = self._degree
rng = list(range(degree))
for i in range(r):
for j in range(r):
p1 = gens[i]
p2 = gens[j]
c = list(range(degree))
for k in rng:
c[p2[p1[k]]] = p1[p2[k]]
ct = tuple(c)
if not ct in set_commutators:
set_commutators.add(ct)
cms = [_af_new(p) for p in set_commutators]
G2 = self.normal_closure(cms)
return G2
def generate(self, method="coset", af=False):
"""Return iterator to generate the elements of the group
Iteration is done with one of these methods::
method='coset' using the Schreier-Sims coset representation
method='dimino' using the Dimino method
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is not
true groups:
>>> G = tetrahedron.pgroup
>>> G.is_group()
False
But the group generated by the permutations in the tetrahedron
pgroup -- even the first two -- is a proper group:
>>> H = PermutationGroup(G[0], G[1])
>>> J = PermutationGroup(list(H.generate())); J
PermutationGroup([
(0 1)(2 3),
(3),
(1 2 3),
(1 3 2),
(0 3 1),
(0 2 3),
(0 3)(1 2),
(0 1 3),
(3)(0 2 1),
(0 3 2),
(3)(0 1 2),
(0 2)(1 3)])
>>> _.is_group()
True
"""
if method == "coset":
return self.generate_schreier_sims(af)
elif method == "dimino":
return self.generate_dimino(af)
else:
raise NotImplementedError('No generation defined for %s' % method)
def generate_dimino(self, af=False):
"""Yield group elements using Dimino's algorithm
If af == True it yields the array form of the permutations
References
==========
[1] The Implementation of Various Algorithms for Permutation Groups in
the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_dimino(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],
[0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]
"""
idn = list(range(self.degree))
order = 0
element_list = [idn]
set_element_list = set([tuple(idn)])
if af:
yield idn
else:
yield _af_new(idn)
gens = [p._array_form for p in self.generators]
for i in range(len(gens)):
# D elements of the subgroup G_i generated by gens[:i]
D = element_list[:]
N = [idn]
while N:
A = N
N = []
for a in A:
for g in gens[:i + 1]:
ag = _af_rmul(a, g)
if tuple(ag) not in set_element_list:
# produce G_i*g
for d in D:
order += 1
ap = _af_rmul(d, ag)
if af:
yield ap
else:
p = _af_new(ap)
yield p
element_list.append(ap)
set_element_list.add(tuple(ap))
N.append(ap)
self._order = len(element_list)
def generate_schreier_sims(self, af=False):
"""Yield group elements using the Schreier-Sims representation
in coset_rank order
If af = True it yields the array form of the permutations
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([0, 2, 3, 1])
>>> g = PermutationGroup([a, b])
>>> list(g.generate_schreier_sims(af=True))
[[0, 1, 2, 3], [0, 2, 1, 3], [0, 3, 2, 1],
[0, 1, 3, 2], [0, 2, 3, 1], [0, 3, 1, 2]]
"""
n = self._degree
u = self.basic_transversals
basic_orbits = self._basic_orbits
if len(u) == 0:
for x in self.generators:
if af:
yield x._array_form
else:
yield x
raise StopIteration
if len(u) == 1:
for i in basic_orbits[0]:
if af:
yield u[0][i]._array_form
else:
yield u[0][i]
raise StopIteration
u = list(reversed(u))
basic_orbits = basic_orbits[::-1]
# stg stack of group elements
stg = [list(range(n))]
posmax = [len(x) for x in u]
n1 = len(posmax) - 1
pos = [0]*n1
h = 0
while 1:
# backtrack when finished iterating over coset
if pos[h] >= posmax[h]:
if h == 0:
raise StopIteration
pos[h] = 0
h -= 1
stg.pop()
continue
p = _af_rmul(u[h][basic_orbits[h][pos[h]]]._array_form, stg[-1])
pos[h] += 1
stg.append(p)
h += 1
if h == n1:
if af:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
yield p
else:
for i in basic_orbits[-1]:
p = _af_rmul(u[-1][i]._array_form, stg[-1])
p1 = _af_new(p)
yield p1
stg.pop()
h -= 1
@property
def generators(self):
"""Returns the generators of the group.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.generators
[(1 2), (2)(0 1)]
"""
return self._generators
def contains(self, g, strict=True):
"""Test if permutation ``g`` belong to self, ``G``.
If ``g`` is an element of ``G`` it can be written as a product
of factors drawn from the cosets of ``G``'s stabilizers. To see
if ``g`` is one of the actual generators defining the group use
``G.has(g)``.
If ``strict`` is not True, ``g`` will be resized, if necessary,
to match the size of permutations in ``self``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1, 2)
>>> b = Permutation(2, 3, 1)
>>> G = PermutationGroup(a, b, degree=5)
>>> G.contains(G[0]) # trivial check
True
>>> elem = Permutation([[2, 3]], size=5)
>>> G.contains(elem)
True
>>> G.contains(Permutation(4)(0, 1, 2, 3))
False
If strict is False, a permutation will be resized, if
necessary:
>>> H = PermutationGroup(Permutation(5))
>>> H.contains(Permutation(3))
False
>>> H.contains(Permutation(3), strict=False)
True
To test if a given permutation is present in the group:
>>> elem in G.generators
False
>>> G.has(elem)
False
See Also
========
coset_factor, has, in
"""
if not isinstance(g, Permutation):
return False
if g.size != self.degree:
if strict:
return False
g = Permutation(g, size=self.degree)
if g in self.generators:
return True
return bool(self.coset_factor(g.array_form, True))
@property
def is_abelian(self):
"""Test if the group is Abelian.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.is_abelian
False
>>> a = Permutation([0, 2, 1])
>>> G = PermutationGroup([a])
>>> G.is_abelian
True
"""
if self._is_abelian is not None:
return self._is_abelian
self._is_abelian = True
gens = [p._array_form for p in self.generators]
for x in gens:
for y in gens:
if y <= x:
continue
if not _af_commutes_with(x, y):
self._is_abelian = False
return False
return True
def is_alt_sym(self, eps=0.05, _random_prec=None):
r"""Monte Carlo test for the symmetric/alternating group for degrees
>= 8.
More specifically, it is one-sided Monte Carlo with the
answer True (i.e., G is symmetric/alternating) guaranteed to be
correct, and the answer False being incorrect with probability eps.
Notes
=====
The algorithm itself uses some nontrivial results from group theory and
number theory:
1) If a transitive group ``G`` of degree ``n`` contains an element
with a cycle of length ``n/2 < p < n-2`` for ``p`` a prime, ``G`` is the
symmetric or alternating group ([1], pp. 81-82)
2) The proportion of elements in the symmetric/alternating group having
the property described in 1) is approximately ``\log(2)/\log(n)``
([1], p.82; [2], pp. 226-227).
The helper function ``_check_cycles_alt_sym`` is used to
go over the cycles in a permutation and look for ones satisfying 1).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_alt_sym()
False
See Also
========
_check_cycles_alt_sym
"""
if _random_prec is None:
n = self.degree
if n < 8:
return False
if not self.is_transitive():
return False
if n < 17:
c_n = 0.34
else:
c_n = 0.57
d_n = (c_n*log(2))/log(n)
N_eps = int(-log(eps)/d_n)
for i in range(N_eps):
perm = self.random_pr()
if _check_cycles_alt_sym(perm):
return True
return False
else:
for i in range(_random_prec['N_eps']):
perm = _random_prec[i]
if _check_cycles_alt_sym(perm):
return True
return False
@property
def is_nilpotent(self):
"""Test if the group is nilpotent.
A group ``G`` is nilpotent if it has a central series of finite length.
Alternatively, ``G`` is nilpotent if its lower central series terminates
with the trivial group. Every nilpotent group is also solvable
([1], p.29, [12]).
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
>>> C = CyclicGroup(6)
>>> C.is_nilpotent
True
>>> S = SymmetricGroup(5)
>>> S.is_nilpotent
False
See Also
========
lower_central_series, is_solvable
"""
if self._is_nilpotent is None:
lcs = self.lower_central_series()
terminator = lcs[len(lcs) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
self._is_nilpotent = True
return True
else:
self._is_nilpotent = False
return False
else:
return self._is_nilpotent
def is_normal(self, gr):
"""Test if G=self is a normal subgroup of gr.
G is normal in gr if
for each g2 in G, g1 in gr, g = g1*g2*g1**-1 belongs to G
It is sufficient to check this for each g1 in gr.generator and
g2 g2 in G.generator
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G1 = PermutationGroup([a, Permutation([2, 0, 1])])
>>> G1.is_normal(G)
True
"""
gens2 = [p._array_form for p in self.generators]
gens1 = [p._array_form for p in gr.generators]
for g1 in gens1:
for g2 in gens2:
p = _af_rmuln(g1, g2, _af_invert(g1))
if not self.coset_factor(p, True):
return False
return True
def is_primitive(self, randomized=True):
"""Test if a group is primitive.
A permutation group ``G`` acting on a set ``S`` is called primitive if
``S`` contains no nontrivial block under the action of ``G``
(a block is nontrivial if its cardinality is more than ``1``).
Notes
=====
The algorithm is described in [1], p.83, and uses the function
minimal_block to search for blocks of the form ``\{0, k\}`` for ``k``
ranging over representatives for the orbits of ``G_0``, the stabilizer of
``0``. This algorithm has complexity ``O(n^2)`` where ``n`` is the degree
of the group, and will perform badly if ``G_0`` is small.
There are two implementations offered: one finds ``G_0``
deterministically using the function ``stabilizer``, and the other
(default) produces random elements of ``G_0`` using ``random_stab``,
hoping that they generate a subgroup of ``G_0`` with not too many more
orbits than G_0 (this is suggested in [1], p.83). Behavior is changed
by the ``randomized`` flag.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.is_primitive()
False
See Also
========
minimal_block, random_stab
"""
if self._is_primitive is not None:
return self._is_primitive
n = self.degree
if randomized:
random_stab_gens = []
v = self.schreier_vector(0)
for i in range(len(self)):
random_stab_gens.append(self.random_stab(0, v))
stab = PermutationGroup(random_stab_gens)
else:
stab = self.stabilizer(0)
orbits = stab.orbits()
for orb in orbits:
x = orb.pop()
if x != 0 and self.minimal_block([0, x]) != [0]*n:
self._is_primitive = False
return False
self._is_primitive = True
return True
@property
def is_solvable(self):
"""Test if the group is solvable.
``G`` is solvable if its derived series terminates with the trivial
group ([1], p.29).
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(3)
>>> S.is_solvable
True
See Also
========
is_nilpotent, derived_series
"""
if self._is_solvable is None:
ds = self.derived_series()
terminator = ds[len(ds) - 1]
gens = terminator.generators
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in gens):
self._is_solvable = True
return True
else:
self._is_solvable = False
return False
else:
return self._is_solvable
def is_subgroup(self, G, strict=True):
"""Return True if all elements of self belong to G.
If ``strict`` is False then if ``self``'s degree is smaller
than ``G``'s, the elements will be resized to have the same degree.
Examples
========
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup)
Testing is strict by default: the degree of each group must be the
same:
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G1 = PermutationGroup([Permutation(0, 1, 2), Permutation(0, 1)])
>>> G2 = PermutationGroup([Permutation(0, 2), Permutation(0, 1, 2)])
>>> G3 = PermutationGroup([p, p**2])
>>> assert G1.order() == G2.order() == G3.order() == 6
>>> G1.is_subgroup(G2)
True
>>> G1.is_subgroup(G3)
False
>>> G3.is_subgroup(PermutationGroup(G3[1]))
False
>>> G3.is_subgroup(PermutationGroup(G3[0]))
True
To ignore the size, set ``strict`` to False:
>>> S3 = SymmetricGroup(3)
>>> S5 = SymmetricGroup(5)
>>> S3.is_subgroup(S5, strict=False)
True
>>> C7 = CyclicGroup(7)
>>> G = S5*C7
>>> S5.is_subgroup(G, False)
True
>>> C7.is_subgroup(G, 0)
False
"""
if not isinstance(G, PermutationGroup):
return False
if self == G:
return True
if G.order() % self.order() != 0:
return False
if self.degree == G.degree or \
(self.degree < G.degree and not strict):
gens = self.generators
else:
return False
return all(G.contains(g, strict=strict) for g in gens)
def is_transitive(self, strict=True):
"""Test if the group is transitive.
A group is transitive if it has a single orbit.
If ``strict`` is False the group is transitive if it has
a single orbit of length different from 1.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1, 3])
>>> b = Permutation([2, 0, 1, 3])
>>> G1 = PermutationGroup([a, b])
>>> G1.is_transitive()
False
>>> G1.is_transitive(strict=False)
True
>>> c = Permutation([2, 3, 0, 1])
>>> G2 = PermutationGroup([a, c])
>>> G2.is_transitive()
True
>>> d = Permutation([1, 0, 2, 3])
>>> e = Permutation([0, 1, 3, 2])
>>> G3 = PermutationGroup([d, e])
>>> G3.is_transitive() or G3.is_transitive(strict=False)
False
"""
if self._is_transitive: # strict or not, if True then True
return self._is_transitive
if strict:
if self._is_transitive is not None: # we only store strict=True
return self._is_transitive
ans = len(self.orbit(0)) == self.degree
self._is_transitive = ans
return ans
got_orb = False
for x in self.orbits():
if len(x) > 1:
if got_orb:
return False
got_orb = True
return got_orb
@property
def is_trivial(self):
"""Test if the group is the trivial group.
This is true if the group contains only the identity permutation.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0, 1, 2])])
>>> G.is_trivial
True
"""
if self._is_trivial is None:
self._is_trivial = len(self) == 1 and self[0].is_Identity
return self._is_trivial
def lower_central_series(self):
r"""Return the lower central series for the group.
The lower central series for a group ``G`` is the series
``G = G_0 > G_1 > G_2 > \ldots`` where
``G_k = [G, G_{k-1}]``, i.e. every term after the first is equal to the
commutator of ``G`` and the previous term in ``G1`` ([1], p.29).
Returns
=======
A list of permutation groups in the order
``G = G_0, G_1, G_2, \ldots``
Examples
========
>>> from sympy.combinatorics.named_groups import (AlternatingGroup,
... DihedralGroup)
>>> A = AlternatingGroup(4)
>>> len(A.lower_central_series())
2
>>> A.lower_central_series()[1].is_subgroup(DihedralGroup(2))
True
See Also
========
commutator, derived_series
"""
res = [self]
current = self
next = self.commutator(self, current)
while not current.is_subgroup(next):
res.append(next)
current = next
next = self.commutator(self, current)
return res
@property
def max_div(self):
"""Maximum proper divisor of the degree of a permutation group.
Notes
=====
Obviously, this is the degree divided by its minimal proper divisor
(larger than ``1``, if one exists). As it is guaranteed to be prime,
the ``sieve`` from ``sympy.ntheory`` is used.
This function is also used as an optimization tool for the functions
``minimal_block`` and ``_union_find_merge``.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> G = PermutationGroup([Permutation([0, 2, 1, 3])])
>>> G.max_div
2
See Also
========
minimal_block, _union_find_merge
"""
if self._max_div is not None:
return self._max_div
n = self.degree
if n == 1:
return 1
for x in sieve:
if n % x == 0:
d = n//x
self._max_div = d
return d
def minimal_block(self, points):
r"""For a transitive group, finds the block system generated by
``points``.
If a group ``G`` acts on a set ``S``, a nonempty subset ``B`` of ``S``
is called a block under the action of ``G`` if for all ``g`` in ``G``
we have ``gB = B`` (``g`` fixes ``B``) or ``gB`` and ``B`` have no
common points (``g`` moves ``B`` entirely). ([1], p.23; [6]).
The distinct translates ``gB`` of a block ``B`` for ``g`` in ``G``
partition the set ``S`` and this set of translates is known as a block
system. Moreover, we obviously have that all blocks in the partition
have the same size, hence the block size divides ``|S|`` ([1], p.23).
A ``G``-congruence is an equivalence relation ``~`` on the set ``S``
such that ``a ~ b`` implies ``g(a) ~ g(b)`` for all ``g`` in ``G``.
For a transitive group, the equivalence classes of a ``G``-congruence
and the blocks of a block system are the same thing ([1], p.23).
The algorithm below checks the group for transitivity, and then finds
the ``G``-congruence generated by the pairs ``(p_0, p_1), (p_0, p_2),
..., (p_0,p_{k-1})`` which is the same as finding the maximal block
system (i.e., the one with minimum block size) such that
``p_0, ..., p_{k-1}`` are in the same block ([1], p.83).
It is an implementation of Atkinson's algorithm, as suggested in [1],
and manipulates an equivalence relation on the set ``S`` using a
union-find data structure. The running time is just above
``O(|points||S|)``. ([1], pp. 83-87; [7]).
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(10)
>>> D.minimal_block([0, 5])
[0, 6, 2, 8, 4, 0, 6, 2, 8, 4]
>>> D.minimal_block([0, 1])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
See Also
========
_union_find_rep, _union_find_merge, is_transitive, is_primitive
"""
if not self.is_transitive():
return False
n = self.degree
gens = self.generators
# initialize the list of equivalence class representatives
parents = list(range(n))
ranks = [1]*n
not_rep = []
k = len(points)
# the block size must divide the degree of the group
if k > self.max_div:
return [0]*n
for i in range(k - 1):
parents[points[i + 1]] = points[0]
not_rep.append(points[i + 1])
ranks[points[0]] = k
i = 0
len_not_rep = k - 1
while i < len_not_rep:
temp = not_rep[i]
i += 1
for gen in gens:
# find has side effects: performs path compression on the list
# of representatives
delta = self._union_find_rep(temp, parents)
# union has side effects: performs union by rank on the list
# of representatives
temp = self._union_find_merge(gen(temp), gen(delta), ranks,
parents, not_rep)
if temp == -1:
return [0]*n
len_not_rep += temp
for i in range(n):
# force path compression to get the final state of the equivalence
# relation
self._union_find_rep(i, parents)
return parents
def normal_closure(self, other, k=10):
r"""Return the normal closure of a subgroup/set of permutations.
If ``S`` is a subset of a group ``G``, the normal closure of ``A`` in ``G``
is defined as the intersection of all normal subgroups of ``G`` that
contain ``A`` ([1], p.14). Alternatively, it is the group generated by
the conjugates ``x^{-1}yx`` for ``x`` a generator of ``G`` and ``y`` a
generator of the subgroup ``\left\langle S\right\rangle`` generated by
``S`` (for some chosen generating set for ``\left\langle S\right\rangle``)
([1], p.73).
Parameters
==========
other
a subgroup/list of permutations/single permutation
k
an implementation-specific parameter that determines the number
of conjugates that are adjoined to ``other`` at once
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... CyclicGroup, AlternatingGroup)
>>> S = SymmetricGroup(5)
>>> C = CyclicGroup(5)
>>> G = S.normal_closure(C)
>>> G.order()
60
>>> G.is_subgroup(AlternatingGroup(5))
True
See Also
========
commutator, derived_subgroup, random_pr
Notes
=====
The algorithm is described in [1], pp. 73-74; it makes use of the
generation of random elements for permutation groups by the product
replacement algorithm.
"""
if hasattr(other, 'generators'):
degree = self.degree
identity = _af_new(list(range(degree)))
if all(g == identity for g in other.generators):
return other
Z = PermutationGroup(other.generators[:])
base, strong_gens = Z.schreier_sims_incremental()
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base, strong_gens_distr)
self._random_pr_init(r=10, n=20)
_loop = True
while _loop:
Z._random_pr_init(r=10, n=10)
for i in range(k):
g = self.random_pr()
h = Z.random_pr()
conj = h^g
res = _strip(conj, base, basic_orbits, basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
gens = Z.generators
gens.append(conj)
Z = PermutationGroup(gens)
strong_gens.append(conj)
temp_base, temp_strong_gens = \
Z.schreier_sims_incremental(base, strong_gens)
base, strong_gens = temp_base, temp_strong_gens
strong_gens_distr = \
_distribute_gens_by_base(base, strong_gens)
basic_orbits, basic_transversals = \
_orbits_transversals_from_bsgs(base,
strong_gens_distr)
_loop = False
for g in self.generators:
for h in Z.generators:
conj = h^g
res = _strip(conj, base, basic_orbits,
basic_transversals)
if res[0] != identity or res[1] != len(base) + 1:
_loop = True
break
if _loop:
break
return Z
elif hasattr(other, '__getitem__'):
return self.normal_closure(PermutationGroup(other))
elif hasattr(other, 'array_form'):
return self.normal_closure(PermutationGroup([other]))
def orbit(self, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1,2,3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> G.orbit(0)
set([0, 1, 2])
>>> G.orbit([0, 4], 'union')
set([0, 1, 2, 3, 4, 5, 6])
See Also
========
orbit_transversal
"""
return _orbit(self.degree, self.generators, alpha, action)
def orbit_rep(self, alpha, beta, schreier_vector=None):
"""Return a group element which sends ``alpha`` to ``beta``.
If ``beta`` is not in the orbit of ``alpha``, the function returns
``False``. This implementation makes use of the schreier vector.
For a proof of correctness, see [1], p.80
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> G = AlternatingGroup(5)
>>> G.orbit_rep(0, 4)
(0 4 1 2 3)
See Also
========
schreier_vector
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if schreier_vector[beta] is None:
return False
k = schreier_vector[beta]
gens = [x._array_form for x in self.generators]
a = []
while k != -1:
a.append(gens[k])
beta = gens[k].index(beta) # beta = (~gens[k])(beta)
k = schreier_vector[beta]
if a:
return _af_new(_af_rmuln(*a))
else:
return _af_new(list(range(self._degree)))
def orbit_transversal(self, alpha, pairs=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.orbit_transversal(0)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
See Also
========
orbit
"""
return _orbit_transversal(self._degree, self.generators, alpha, pairs)
def orbits(self, rep=False):
"""Return the orbits of self, ordered according to lowest element
in each orbit.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation(1, 5)(2, 3)(4, 0, 6)
>>> b = Permutation(1, 5)(3, 4)(2, 6, 0)
>>> G = PermutationGroup([a, b])
>>> G.orbits()
[set([0, 2, 3, 4, 6]), set([1, 5])]
"""
return _orbits(self._degree, self._generators)
def order(self):
"""Return the order of the group: the number of permutations that
can be generated from elements of the group.
The number of permutations comprising the group is given by
len(group); the length of each permutation in the group is
given by group.size.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([1, 0, 2])
>>> G = PermutationGroup([a])
>>> G.degree
3
>>> len(G)
1
>>> G.order()
2
>>> list(G.generate())
[(2), (2)(0 1)]
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.order()
6
See Also
========
degree
"""
if self._order != None:
return self._order
if self._is_sym:
n = self._degree
self._order = factorial(n)
return self._order
if self._is_alt:
n = self._degree
self._order = factorial(n)/2
return self._order
basic_transversals = self.basic_transversals
m = 1
for x in basic_transversals:
m *= len(x)
self._order = m
return m
def pointwise_stabilizer(self, points, incremental=True):
r"""Return the pointwise stabilizer for a set of points.
For a permutation group ``G`` and a set of points
``\{p_1, p_2,\ldots, p_k\}``, the pointwise stabilizer of
``p_1, p_2, \ldots, p_k`` is defined as
``G_{p_1,\ldots, p_k} =
\{g\in G | g(p_i) = p_i \forall i\in\{1, 2,\ldots,k\}\} ([1],p20).
It is a subgroup of ``G``.
Examples
========
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(7)
>>> Stab = S.pointwise_stabilizer([2, 3, 5])
>>> Stab.is_subgroup(S.stabilizer(2).stabilizer(3).stabilizer(5))
True
See Also
========
stabilizer, schreier_sims_incremental
Notes
=====
When incremental == True,
rather than the obvious implementation using successive calls to
.stabilizer(), this uses the incremental Schreier-Sims algorithm
to obtain a base with starting segment - the given points.
"""
if incremental:
base, strong_gens = self.schreier_sims_incremental(base=points)
stab_gens = []
degree = self.degree
for gen in strong_gens:
if [gen(point) for point in points] == points:
stab_gens.append(gen)
if not stab_gens:
stab_gens = _af_new(list(range(degree)))
return PermutationGroup(stab_gens)
else:
gens = self._generators
degree = self.degree
for x in points:
gens = _stabilizer(degree, gens, x)
return PermutationGroup(gens)
def make_perm(self, n, seed=None):
"""
Multiply ``n`` randomly selected permutations from
pgroup together, starting with the identity
permutation. If ``n`` is a list of integers, those
integers will be used to select the permutations and they
will be applied in L to R order: make_perm((A, B, C)) will
give CBA(I) where I is the identity permutation.
``seed`` is used to set the seed for the random selection
of permutations from pgroup. If this is a list of integers,
the corresponding permutations from pgroup will be selected
in the order give. This is mainly used for testing purposes.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a, b = [Permutation([1, 0, 3, 2]), Permutation([1, 3, 0, 2])]
>>> G = PermutationGroup([a, b])
>>> G.make_perm(1, [0])
(0 1)(2 3)
>>> G.make_perm(3, [0, 1, 0])
(0 2 3 1)
>>> G.make_perm([0, 1, 0])
(0 2 3 1)
See Also
========
random
"""
if is_sequence(n):
if seed is not None:
raise ValueError('If n is a sequence, seed should be None')
n, seed = len(n), n
else:
try:
n = int(n)
except TypeError:
raise ValueError('n must be an integer or a sequence.')
randrange = _randrange(seed)
# start with the identity permutation
result = Permutation(list(range(self.degree)))
m = len(self)
for i in range(n):
p = self[randrange(m)]
result = rmul(result, p)
return result
def random(self, af=False):
"""Return a random group element
"""
rank = randrange(self.order())
return self.coset_unrank(rank, af)
def random_pr(self, gen_count=11, iterations=50, _random_prec=None):
"""Return a random group element using product replacement.
For the details of the product replacement algorithm, see
``_random_pr_init`` In ``random_pr`` the actual 'product replacement'
is performed. Notice that if the attribute ``_random_gens``
is empty, it needs to be initialized by ``_random_pr_init``.
See Also
========
_random_pr_init
"""
if self._random_gens == []:
self._random_pr_init(gen_count, iterations)
random_gens = self._random_gens
r = len(random_gens) - 1
# handle randomized input for testing purposes
if _random_prec is None:
s = randrange(r)
t = randrange(r - 1)
if t == s:
t = r - 1
x = choice([1, 2])
e = choice([-1, 1])
else:
s = _random_prec['s']
t = _random_prec['t']
if t == s:
t = r - 1
x = _random_prec['x']
e = _random_prec['e']
if x == 1:
random_gens[s] = _af_rmul(random_gens[s], _af_pow(random_gens[t], e))
random_gens[r] = _af_rmul(random_gens[r], random_gens[s])
else:
random_gens[s] = _af_rmul(_af_pow(random_gens[t], e), random_gens[s])
random_gens[r] = _af_rmul(random_gens[s], random_gens[r])
return _af_new(random_gens[r])
def random_stab(self, alpha, schreier_vector=None, _random_prec=None):
"""Random element from the stabilizer of ``alpha``.
The schreier vector for ``alpha`` is an optional argument used
for speeding up repeated calls. The algorithm is described in [1], p.81
See Also
========
random_pr, orbit_rep
"""
if schreier_vector is None:
schreier_vector = self.schreier_vector(alpha)
if _random_prec is None:
rand = self.random_pr()
else:
rand = _random_prec['rand']
beta = rand(alpha)
h = self.orbit_rep(alpha, beta, schreier_vector)
return rmul(~h, rand)
def schreier_sims(self):
"""Schreier-Sims algorithm.
It computes the generators of the chain of stabilizers
G > G_{b_1} > .. > G_{b1,..,b_r} > 1
in which G_{b_1,..,b_i} stabilizes b_1,..,b_i,
and the corresponding ``s`` cosets.
An element of the group can be written as the product
h_1*..*h_s.
We use the incremental Schreier-Sims algorithm.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_sims()
>>> G.basic_transversals
[{0: (2)(0 1), 1: (2), 2: (1 2)},
{0: (2), 2: (0 2)}]
"""
if self._transversals:
return
base, strong_gens = self.schreier_sims_incremental()
self._base = base
self._strong_gens = strong_gens
if not base:
self._transversals = []
self._basic_orbits = []
return
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,\
strong_gens_distr)
self._transversals = transversals
self._basic_orbits = [sorted(x) for x in basic_orbits]
def schreier_sims_incremental(self, base=None, gens=None):
"""Extend a sequence of points and generating set to a base and strong
generating set.
Parameters
==========
base
The sequence of points to be extended to a base. Optional
parameter with default value ``[]``.
gens
The generating set to be extended to a strong generating set
relative to the base obtained. Optional parameter with default
value ``self.generators``.
Returns
=======
(base, strong_gens)
``base`` is the base obtained, and ``strong_gens`` is the strong
generating set relative to it. The original parameters ``base``,
``gens`` remain unchanged.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(7)
>>> base = [2, 3]
>>> seq = [2, 3]
>>> base, strong_gens = A.schreier_sims_incremental(base=seq)
>>> _verify_bsgs(A, base, strong_gens)
True
>>> base[:2]
[2, 3]
Notes
=====
This version of the Schreier-Sims algorithm runs in polynomial time.
There are certain assumptions in the implementation - if the trivial
group is provided, ``base`` and ``gens`` are returned immediately,
as any sequence of points is a base for the trivial group. If the
identity is present in the generators ``gens``, it is removed as
it is a redundant generator.
The implementation is described in [1], pp. 90-93.
See Also
========
schreier_sims, schreier_sims_random
"""
if base is None:
base = []
if gens is None:
gens = self.generators[:]
degree = self.degree
id_af = list(range(degree))
# handle the trivial group
if len(gens) == 1 and gens[0].is_Identity:
return base, gens
# prevent side effects
_base, _gens = base[:], gens[:]
# remove the identity as a generator
_gens = [x for x in _gens if not x.is_Identity]
# make sure no generator fixes all base points
for gen in _gens:
if all(x == gen._array_form[x] for x in _base):
for new in id_af:
if gen._array_form[new] != new:
break
else:
assert None # can this ever happen?
_base.append(new)
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(_base, _gens)
# initialize the basic stabilizers, basic orbits and basic transversals
orbs = {}
transversals = {}
base_len = len(_base)
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(degree, strong_gens_distr[i],
_base[i], pairs=True, af=True))
orbs[i] = list(transversals[i].keys())
# main loop: amend the stabilizer chain until we have generators
# for all stabilizers
i = base_len - 1
while i >= 0:
# this flag is used to continue with the main loop from inside
# a nested loop
continue_i = False
# test the generators for being a strong generating set
db = {}
for beta, u_beta in list(transversals[i].items()):
for gen in strong_gens_distr[i]:
gb = gen._array_form[beta]
u1 = transversals[i][gb]
g1 = _af_rmul(gen._array_form, u_beta)
if g1 != u1:
# test if the schreier generator is in the i+1-th
# would-be basic stabilizer
y = True
try:
u1_inv = db[gb]
except KeyError:
u1_inv = db[gb] = _af_invert(u1)
schreier_gen = _af_rmul(u1_inv, g1)
h, j = _strip_af(schreier_gen, _base, orbs, transversals, i)
if j <= base_len:
# new strong generator h at level j
y = False
elif h:
# h fixes all base points
y = False
moved = 0
while h[moved] == moved:
moved += 1
_base.append(moved)
base_len += 1
strong_gens_distr.append([])
if y is False:
# if a new strong generator is found, update the
# data structures and start over
h = _af_new(h)
for l in range(i + 1, j):
strong_gens_distr[l].append(h)
transversals[l] =\
dict(_orbit_transversal(degree, strong_gens_distr[l],
_base[l], pairs=True, af=True))
orbs[l] = list(transversals[l].keys())
i = j - 1
# continue main loop using the flag
continue_i = True
if continue_i is True:
break
if continue_i is True:
break
if continue_i is True:
continue
i -= 1
# build the strong generating set
strong_gens = list(uniq(i for gens in strong_gens_distr for i in gens))
return _base, strong_gens
def schreier_sims_random(self, base=None, gens=None, consec_succ=10,
_random_prec=None):
r"""Randomized Schreier-Sims algorithm.
The randomized Schreier-Sims algorithm takes the sequence ``base``
and the generating set ``gens``, and extends ``base`` to a base, and
``gens`` to a strong generating set relative to that base with
probability of a wrong answer at most ``2^{-consec\_succ}``,
provided the random generators are sufficiently random.
Parameters
==========
base
The sequence to be extended to a base.
gens
The generating set to be extended to a strong generating set.
consec_succ
The parameter defining the probability of a wrong answer.
_random_prec
An internal parameter used for testing purposes.
Returns
=======
(base, strong_gens)
``base`` is the base and ``strong_gens`` is the strong generating
set relative to it.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> S = SymmetricGroup(5)
>>> base, strong_gens = S.schreier_sims_random(consec_succ=5)
>>> _verify_bsgs(S, base, strong_gens) #doctest: +SKIP
True
Notes
=====
The algorithm is described in detail in [1], pp. 97-98. It extends
the orbits ``orbs`` and the permutation groups ``stabs`` to
basic orbits and basic stabilizers for the base and strong generating
set produced in the end.
The idea of the extension process
is to "sift" random group elements through the stabilizer chain
and amend the stabilizers/orbits along the way when a sift
is not successful.
The helper function ``_strip`` is used to attempt
to decompose a random group element according to the current
state of the stabilizer chain and report whether the element was
fully decomposed (successful sift) or not (unsuccessful sift). In
the latter case, the level at which the sift failed is reported and
used to amend ``stabs``, ``base``, ``gens`` and ``orbs`` accordingly.
The halting condition is for ``consec_succ`` consecutive successful
sifts to pass. This makes sure that the current ``base`` and ``gens``
form a BSGS with probability at least ``1 - 1/\text{consec\_succ}``.
See Also
========
schreier_sims
"""
if base is None:
base = []
if gens is None:
gens = self.generators
base_len = len(base)
n = self.degree
# make sure no generator fixes all base points
for gen in gens:
if all(gen(x) == x for x in base):
new = 0
while gen._array_form[new] == new:
new += 1
base.append(new)
base_len += 1
# distribute generators according to basic stabilizers
strong_gens_distr = _distribute_gens_by_base(base, gens)
# initialize the basic stabilizers, basic transversals and basic orbits
transversals = {}
orbs = {}
for i in range(base_len):
transversals[i] = dict(_orbit_transversal(n, strong_gens_distr[i],
base[i], pairs=True))
orbs[i] = list(transversals[i].keys())
# initialize the number of consecutive elements sifted
c = 0
# start sifting random elements while the number of consecutive sifts
# is less than consec_succ
while c < consec_succ:
if _random_prec is None:
g = self.random_pr()
else:
g = _random_prec['g'].pop()
h, j = _strip(g, base, orbs, transversals)
y = True
# determine whether a new base point is needed
if j <= base_len:
y = False
elif not h.is_Identity:
y = False
moved = 0
while h(moved) == moved:
moved += 1
base.append(moved)
base_len += 1
strong_gens_distr.append([])
# if the element doesn't sift, amend the strong generators and
# associated stabilizers and orbits
if y is False:
for l in range(1, j):
strong_gens_distr[l].append(h)
transversals[l] = dict(_orbit_transversal(n,
strong_gens_distr[l], base[l], pairs=True))
orbs[l] = list(transversals[l].keys())
c = 0
else:
c += 1
# build the strong generating set
strong_gens = strong_gens_distr[0][:]
for gen in strong_gens_distr[1]:
if gen not in strong_gens:
strong_gens.append(gen)
return base, strong_gens
def schreier_vector(self, alpha):
"""Computes the schreier vector for ``alpha``.
The Schreier vector efficiently stores information
about the orbit of ``alpha``. It can later be used to quickly obtain
elements of the group that send ``alpha`` to a particular element
in the orbit. Notice that the Schreier vector depends on the order
in which the group generators are listed. For a definition, see [3].
Since list indices start from zero, we adopt the convention to use
"None" instead of 0 to signify that an element doesn't belong
to the orbit.
For the algorithm and its correctness, see [2], pp.78-80.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([2, 4, 6, 3, 1, 5, 0])
>>> b = Permutation([0, 1, 3, 5, 4, 6, 2])
>>> G = PermutationGroup([a, b])
>>> G.schreier_vector(0)
[-1, None, 0, 1, None, 1, 0]
See Also
========
orbit
"""
n = self.degree
v = [None]*n
v[alpha] = -1
orb = [alpha]
used = [False]*n
used[alpha] = True
gens = self.generators
r = len(gens)
for b in orb:
for i in range(r):
temp = gens[i]._array_form[b]
if used[temp] is False:
orb.append(temp)
used[temp] = True
v[temp] = i
return v
def stabilizer(self, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> G.stabilizer(5)
PermutationGroup([
(5)(0 4)(1 3),
(5)])
See Also
========
orbit
"""
return PermGroup(_stabilizer(self._degree, self._generators, alpha))
@property
def strong_gens(self):
"""Return a strong generating set from the Schreier-Sims algorithm.
A generating set ``S = \{g_1, g_2, ..., g_t\}`` for a permutation group
``G`` is a strong generating set relative to the sequence of points
(referred to as a "base") ``(b_1, b_2, ..., b_k)`` if, for
``1 \leq i \leq k`` we have that the intersection of the pointwise
stabilizer ``G^{(i+1)} := G_{b_1, b_2, ..., b_i}`` with ``S`` generates
the pointwise stabilizer ``G^{(i+1)}``. The concepts of a base and
strong generating set and their applications are discussed in depth
in [1], pp. 87-89 and [2], pp. 55-57.
Examples
========
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> D.strong_gens
[(0 1 2 3), (0 3)(1 2), (1 3)]
>>> D.base
[0, 1]
See Also
========
base, basic_transversals, basic_orbits, basic_stabilizers
"""
if self._strong_gens == []:
self.schreier_sims()
return self._strong_gens
def subgroup_search(self, prop, base=None, strong_gens=None, tests=None,
init_subgroup=None):
"""Find the subgroup of all elements satisfying the property ``prop``.
This is done by a depth-first search with respect to base images that
uses several tests to prune the search tree.
Parameters
==========
prop
The property to be used. Has to be callable on group elements
and always return ``True`` or ``False``. It is assumed that
all group elements satisfying ``prop`` indeed form a subgroup.
base
A base for the supergroup.
strong_gens
A strong generating set for the supergroup.
tests
A list of callables of length equal to the length of ``base``.
These are used to rule out group elements by partial base images,
so that ``tests[l](g)`` returns False if the element ``g`` is known
not to satisfy prop base on where g sends the first ``l + 1`` base
points.
init_subgroup
if a subgroup of the sought group is
known in advance, it can be passed to the function as this
parameter.
Returns
=======
res
The subgroup of all elements satisfying ``prop``. The generating
set for this group is guaranteed to be a strong generating set
relative to the base ``base``.
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> S = SymmetricGroup(7)
>>> prop_even = lambda x: x.is_even
>>> base, strong_gens = S.schreier_sims_incremental()
>>> G = S.subgroup_search(prop_even, base=base, strong_gens=strong_gens)
>>> G.is_subgroup(AlternatingGroup(7))
True
>>> _verify_bsgs(G, base, G.generators)
True
Notes
=====
This function is extremely lenghty and complicated and will require
some careful attention. The implementation is described in
[1], pp. 114-117, and the comments for the code here follow the lines
of the pseudocode in the book for clarity.
The complexity is exponential in general, since the search process by
itself visits all members of the supergroup. However, there are a lot
of tests which are used to prune the search tree, and users can define
their own tests via the ``tests`` parameter, so in practice, and for
some computations, it's not terrible.
A crucial part in the procedure is the frequent base change performed
(this is line 11 in the pseudocode) in order to obtain a new basic
stabilizer. The book mentiones that this can be done by using
``.baseswap(...)``, however the current imlementation uses a more
straightforward way to find the next basic stabilizer - calling the
function ``.stabilizer(...)`` on the previous basic stabilizer.
"""
# initialize BSGS and basic group properties
def get_reps(orbits):
# get the minimal element in the base ordering
return [min(orbit, key = lambda x: base_ordering[x]) \
for orbit in orbits]
def update_nu(l):
temp_index = len(basic_orbits[l]) + 1 -\
len(res_basic_orbits_init_base[l])
# this corresponds to the element larger than all points
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
if base is None:
base, strong_gens = self.schreier_sims_incremental()
base_len = len(base)
degree = self.degree
identity = _af_new(list(range(degree)))
base_ordering = _base_ordering(base, degree)
# add an element larger than all points
base_ordering.append(degree)
# add an element smaller than all points
base_ordering.append(-1)
# compute BSGS-related structures
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
basic_orbits, transversals = _orbits_transversals_from_bsgs(base,
strong_gens_distr)
# handle subgroup initialization and tests
if init_subgroup is None:
init_subgroup = PermutationGroup([identity])
if tests is None:
trivial_test = lambda x: True
tests = []
for i in range(base_len):
tests.append(trivial_test)
# line 1: more initializations.
res = init_subgroup
f = base_len - 1
l = base_len - 1
# line 2: set the base for K to the base for G
res_base = base[:]
# line 3: compute BSGS and related structures for K
res_base, res_strong_gens = res.schreier_sims_incremental(
base=res_base)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_generators = res.generators
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i])\
for i in range(base_len)]
# initialize orbit representatives
orbit_reps = [None]*base_len
# line 4: orbit representatives for f-th basic stabilizer of K
orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(orbits)
# line 5: remove the base point from the representatives to avoid
# getting the identity element as a generator for K
orbit_reps[f].remove(base[f])
# line 6: more initializations
c = [0]*base_len
u = [identity]*base_len
sorted_orbits = [None]*base_len
for i in range(base_len):
sorted_orbits[i] = basic_orbits[i][:]
sorted_orbits[i].sort(key=lambda point: base_ordering[point])
# line 7: initializations
mu = [None]*base_len
nu = [None]*base_len
# this corresponds to the element smaller than all points
mu[l] = degree + 1
update_nu(l)
# initialize computed words
computed_words = [identity]*base_len
# line 8: main loop
while True:
# apply all the tests
while l < base_len - 1 and \
computed_words[l](base[l]) in orbit_reps[l] and \
base_ordering[mu[l]] < \
base_ordering[computed_words[l](base[l])] < \
base_ordering[nu[l]] and \
tests[l](computed_words):
# line 11: change the (partial) base of K
new_point = computed_words[l](base[l])
res_base[l] = new_point
new_stab_gens = _stabilizer(degree, res_strong_gens_distr[l],
new_point)
res_strong_gens_distr[l + 1] = new_stab_gens
# line 12: calculate minimal orbit representatives for the
# l+1-th basic stabilizer
orbits = _orbits(degree, new_stab_gens)
orbit_reps[l + 1] = get_reps(orbits)
# line 13: amend sorted orbits
l += 1
temp_orbit = [computed_words[l - 1](point) for point
in basic_orbits[l]]
temp_orbit.sort(key=lambda point: base_ordering[point])
sorted_orbits[l] = temp_orbit
# lines 14 and 15: update variables used minimality tests
new_mu = degree + 1
for i in range(l):
if base[l] in res_basic_orbits_init_base[i]:
candidate = computed_words[i](base[i])
if base_ordering[candidate] > base_ordering[new_mu]:
new_mu = candidate
mu[l] = new_mu
update_nu(l)
# line 16: determine the new transversal element
c[l] = 0
temp_point = sorted_orbits[l][c[l]]
gamma = computed_words[l - 1]._array_form.index(temp_point)
u[l] = transversals[l][gamma]
# update computed words
computed_words[l] = rmul(computed_words[l - 1], u[l])
# lines 17 & 18: apply the tests to the group element found
g = computed_words[l]
temp_point = g(base[l])
if l == base_len - 1 and \
base_ordering[mu[l]] < \
base_ordering[temp_point] < base_ordering[nu[l]] and \
temp_point in orbit_reps[l] and \
tests[l](computed_words) and \
prop(g):
# line 19: reset the base of K
res_generators.append(g)
res_base = base[:]
# line 20: recalculate basic orbits (and transversals)
res_strong_gens.append(g)
res_strong_gens_distr = _distribute_gens_by_base(res_base,
res_strong_gens)
res_basic_orbits_init_base = \
[_orbit(degree, res_strong_gens_distr[i], res_base[i]) \
for i in range(base_len)]
# line 21: recalculate orbit representatives
# line 22: reset the search depth
orbit_reps[f] = get_reps(orbits)
l = f
# line 23: go up the tree until in the first branch not fully
# searched
while l >= 0 and c[l] == len(basic_orbits[l]) - 1:
l = l - 1
# line 24: if the entire tree is traversed, return K
if l == -1:
return PermutationGroup(res_generators)
# lines 25-27: update orbit representatives
if l < f:
# line 26
f = l
c[l] = 0
# line 27
temp_orbits = _orbits(degree, res_strong_gens_distr[f])
orbit_reps[f] = get_reps(temp_orbits)
# line 28: update variables used for minimality testing
mu[l] = degree + 1
temp_index = len(basic_orbits[l]) + 1 - \
len(res_basic_orbits_init_base[l])
if temp_index >= len(sorted_orbits[l]):
nu[l] = base_ordering[degree]
else:
nu[l] = sorted_orbits[l][temp_index]
# line 29: set the next element from the current branch and update
# accorndingly
c[l] += 1
if l == 0:
gamma = sorted_orbits[l][c[l]]
else:
gamma = computed_words[l - 1]._array_form.index(sorted_orbits[l][c[l]])
u[l] = transversals[l][gamma]
if l == 0:
computed_words[l] = u[l]
else:
computed_words[l] = rmul(computed_words[l - 1], u[l])
@property
def transitivity_degree(self):
"""Compute the degree of transitivity of the group.
A permutation group ``G`` acting on ``\Omega = \{0, 1, ..., n-1\}`` is
``k``-fold transitive, if, for any k points
``(a_1, a_2, ..., a_k)\in\Omega`` and any k points
``(b_1, b_2, ..., b_k)\in\Omega`` there exists ``g\in G`` such that
``g(a_1)=b_1, g(a_2)=b_2, ..., g(a_k)=b_k``
The degree of transitivity of ``G`` is the maximum ``k`` such that
``G`` is ``k``-fold transitive. ([8])
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> a = Permutation([1, 2, 0])
>>> b = Permutation([1, 0, 2])
>>> G = PermutationGroup([a, b])
>>> G.transitivity_degree
3
See Also
========
is_transitive, orbit
"""
if self._transitivity_degree is None:
n = self.degree
G = self
# if G is k-transitive, a tuple (a_0,..,a_k)
# can be brought to (b_0,...,b_(k-1), b_k)
# where b_0,...,b_(k-1) are fixed points;
# consider the group G_k which stabilizes b_0,...,b_(k-1)
# if G_k is transitive on the subset excluding b_0,...,b_(k-1)
# then G is (k+1)-transitive
for i in range(n):
orb = G.orbit((i))
if len(orb) != n - i:
self._transitivity_degree = i
return i
G = G.stabilizer(i)
self._transitivity_degree = n
return n
else:
return self._transitivity_degree
def is_group(self):
"""Return True if the group meets three criteria: identity is present,
the inverse of every element is also an element, and the product of
any two elements is also an element. If any of the tests fail, False
is returned.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics import PermutationGroup
>>> from sympy.combinatorics.polyhedron import tetrahedron
The permutation group given in the tetrahedron object is not
a true group:
>>> G = tetrahedron.pgroup
>>> G.is_group()
False
But the group generated by the permutations in the tetrahedron
pgroup is a proper group:
>>> H = PermutationGroup(list(G.generate()))
>>> H.is_group()
True
The identity permutation is present:
>>> H.has(Permutation(G.degree - 1))
True
The product of any two elements from the group is also in the group:
>>> from sympy import TableForm
>>> g = list(H)
>>> n = len(g)
>>> m = []
>>> for i in g:
... m.append([g.index(i*H) for H in g])
...
>>> TableForm(m, headings=[range(n), range(n)], wipe_zeros=False)
| 0 1 2 3 4 5 6 7 8 9 10 11
----------------------------------------
0 | 11 0 8 10 6 2 7 4 5 3 9 1
1 | 0 1 2 3 4 5 6 7 8 9 10 11
2 | 6 2 7 4 5 3 9 1 11 0 8 10
3 | 5 3 9 1 11 0 8 10 6 2 7 4
4 | 3 4 0 2 10 6 11 8 9 7 1 5
5 | 4 5 6 7 8 9 10 11 0 1 2 3
6 | 10 6 11 8 9 7 1 5 3 4 0 2
7 | 9 7 1 5 3 4 0 2 10 6 11 8
8 | 7 8 4 6 2 10 3 0 1 11 5 9
9 | 8 9 10 11 0 1 2 3 4 5 6 7
10 | 2 10 3 0 1 11 5 9 7 8 4 6
11 | 1 11 5 9 7 8 4 6 2 10 3 0
>>>
The entries in the table give the element in the group corresponding
to the product of a given column element and row element:
>>> g[3]*g[2] == g[9]
True
The inverse of every element is also in the group:
>>> TableForm([[g.index(~gi) for gi in g]], headings=[[], range(n)],
... wipe_zeros=False)
0 1 2 3 4 5 6 7 8 9 10 11
---------------------------
11 1 7 3 10 9 6 2 8 5 4 0
So we see that g[1] and g[3] are equivalent to their inverse while
g[7] == ~g[2].
"""
# identity present
I = Permutation(size=self.degree)
for g in self:
if g == I:
break
else:
return False
# associativity already holds: a*(b*c) == (a*b)*c for permutations
# inverse of each is present
if not all(self.has(~a) for a in self):
return False
# closure
for a in self:
for b in self:
if not self.has(a*b):
return False
return True
def _orbit(degree, generators, alpha, action='tuples'):
r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set.
The time complexity of the algorithm used here is ``O(|Orb|*r)`` where
``|Orb|`` is the size of the orbit and ``r`` is the number of generators of
the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.
Here alpha can be a single point, or a list of points.
If alpha is a single point, the ordinary orbit is computed.
if alpha is a list of points, there are three available options:
'union' - computes the union of the orbits of the points in the list
'tuples' - computes the orbit of the list interpreted as an ordered
tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )
'sets' - computes the orbit of the list interpreted as a sets
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbit
>>> a = Permutation([1, 2, 0, 4, 5, 6, 3])
>>> G = PermutationGroup([a])
>>> _orbit(G.degree, G.generators, 0)
set([0, 1, 2])
>>> _orbit(G.degree, G.generators, [0, 4], 'union')
set([0, 1, 2, 3, 4, 5, 6])
See Also
========
orbit, orbit_transversal
"""
if not hasattr(alpha, '__getitem__'):
alpha = [alpha]
gens = [x._array_form for x in generators]
if len(alpha) == 1 or action == 'union':
orb = alpha
used = [False]*degree
for el in alpha:
used[el] = True
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] == False:
orb.append(temp)
used[temp] = True
return set(orb)
elif action == 'tuples':
alpha = tuple(alpha)
orb = [alpha]
used = set([alpha])
for b in orb:
for gen in gens:
temp = tuple([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set(orb)
elif action == 'sets':
alpha = frozenset(alpha)
orb = [alpha]
used = set([alpha])
for b in orb:
for gen in gens:
temp = frozenset([gen[x] for x in b])
if temp not in used:
orb.append(temp)
used.add(temp)
return set([tuple(x) for x in orb])
def _orbits(degree, generators):
"""Compute the orbits of G.
If rep=False it returns a list of sets else it returns a list of
representatives of the orbits
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup, _orbits
>>> a = Permutation([0, 2, 1])
>>> b = Permutation([1, 0, 2])
>>> _orbits(a.size, [a, b])
[set([0, 1, 2])]
"""
seen = set() # elements that have already appeared in orbits
orbs = []
sorted_I = list(range(degree))
I = set(sorted_I)
while I:
i = sorted_I[0]
orb = _orbit(degree, generators, i)
orbs.append(orb)
# remove all indices that are in this orbit
I -= orb
sorted_I = [i for i in sorted_I if i not in orb]
return orbs
def _orbit_transversal(degree, generators, alpha, pairs, af=False):
r"""Computes a transversal for the orbit of ``alpha`` as a set.
generators generators of the group ``G``
For a permutation group ``G``, a transversal for the orbit
``Orb = \{g(\alpha) | g \in G\}`` is a set
``\{g_\beta | g_\beta(\alpha) = \beta\}`` for ``\beta \in Orb``.
Note that there may be more than one possible transversal.
If ``pairs`` is set to ``True``, it returns the list of pairs
``(\beta, g_\beta)``. For a proof of correctness, see [1], p.79
if af is True, the transversal elements are given in array form
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> from sympy.combinatorics.perm_groups import _orbit_transversal
>>> G = DihedralGroup(6)
>>> _orbit_transversal(G.degree, G.generators, 0, False)
[(5), (0 1 2 3 4 5), (0 5)(1 4)(2 3), (0 2 4)(1 3 5), (5)(0 4)(1 3), (0 3)(1 4)(2 5)]
"""
tr = [(alpha, list(range(degree)))]
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
for x, px in tr:
for gen in gens:
temp = gen[x]
if used[temp] == False:
tr.append((temp, _af_rmul(gen, px)))
used[temp] = True
if pairs:
if not af:
tr = [(x, _af_new(y)) for x, y in tr]
return tr
if af:
return [y for _, y in tr]
return [_af_new(y) for _, y in tr]
def _stabilizer(degree, generators, alpha):
r"""Return the stabilizer subgroup of ``alpha``.
The stabilizer of ``\alpha`` is the group ``G_\alpha =
\{g \in G | g(\alpha) = \alpha\}``.
For a proof of correctness, see [1], p.79.
degree degree of G
generators generators of G
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.perm_groups import _stabilizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> G = DihedralGroup(6)
>>> _stabilizer(G.degree, G.generators, 5)
[(5)(0 4)(1 3), (5)]
See Also
========
orbit
"""
orb = [alpha]
table = {alpha: list(range(degree))}
table_inv = {alpha: list(range(degree))}
used = [False]*degree
used[alpha] = True
gens = [x._array_form for x in generators]
stab_gens = []
for b in orb:
for gen in gens:
temp = gen[b]
if used[temp] is False:
gen_temp = _af_rmul(gen, table[b])
orb.append(temp)
table[temp] = gen_temp
table_inv[temp] = _af_invert(gen_temp)
used[temp] = True
else:
schreier_gen = _af_rmuln(table_inv[temp], gen, table[b])
if schreier_gen not in stab_gens:
stab_gens.append(schreier_gen)
return [_af_new(x) for x in stab_gens]
PermGroup = PermutationGroup
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Grub(object):
def __init__(self, version=None, kernel_params='',
kernel_name=None, kernel_regexp=None,
initrd_name=None, initrd_regexp=None):
self.version = version
self.kernel_params = kernel_params
self.kernel_name = kernel_name
self.initrd_name = initrd_name
self.kernel_regexp = kernel_regexp
self.initrd_regexp = initrd_regexp
def append_kernel_params(self, *kernel_params):
for kp in kernel_params:
self.kernel_params = '{0} {1}'.format(self.kernel_params, kp)
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder import db
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils
class VolumeUnmanageTest(test.TestCase):
"""Test cases for cinder/api/contrib/volume_unmanage.py
The API extension adds an action to volumes, "os-unmanage", which will
effectively issue a delete operation on the volume, but with a flag set
that means that a different method will be invoked on the driver, so that
the volume is not actually deleted in the storage backend.
In this set of test cases, we are ensuring that the code correctly parses
the request structure and raises the correct exceptions when things are not
right, and calls down into cinder.volume.api.API.delete with the correct
arguments.
"""
def setUp(self):
super(VolumeUnmanageTest, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake_project', True)
api = fakes.router.APIRouter()
self.app = fakes.urlmap.URLMap()
self.app['/v2'] = api
def _get_resp(self, volume_id):
"""Helper to build an os-unmanage req for the specified volume_id."""
req = webob.Request.blank('/v2/%s/volumes/%s/action' %
(self.ctxt.project_id, volume_id))
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.environ['cinder.context'] = self.ctxt
body = {'os-unmanage': ''}
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(self.app)
return res
@mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume')
def test_unmanage_volume_ok(self, mock_rpcapi):
"""Return success for valid and unattached volume."""
vol = utils.create_volume(self.ctxt)
res = self._get_resp(vol.id)
self.assertEqual(202, res.status_int, res)
mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True)
vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id)
self.assertEqual('deleting', vol.status)
db.volume_destroy(self.ctxt, vol.id)
def test_unmanage_volume_bad_volume_id(self):
"""Return 404 if the volume does not exist."""
res = self._get_resp('nonexistent-volume-id')
self.assertEqual(404, res.status_int, res)
def test_unmanage_volume_attached(self):
"""Return 400 if the volume exists but is attached."""
vol = utils.create_volume(self.ctxt, status='in-use',
attach_status='attached')
res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res)
db.volume_destroy(self.ctxt, vol.id)
def test_unmanage_volume_with_snapshots(self):
"""Return 400 if the volume exists but has snapshots."""
vol = utils.create_volume(self.ctxt)
snap = utils.create_snapshot(self.ctxt, vol.id)
res = self._get_resp(vol.id)
self.assertEqual(400, res.status_int, res)
db.volume_destroy(self.ctxt, vol.id)
db.snapshot_destroy(self.ctxt, snap.id)
|
#!/usr/bin/python
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''protobuf/controller.py - Socket implementation of Google's Protocol Buffers
RPC service interface.
This package contains classes providing a socket implementation of the
RPCController abstract class.
Authors: Martin Norbury ([email protected])
Eric Saunders ([email protected])
May 2009
'''
# Third-party imports
import google.protobuf.service as service
# Module imports
from protobuf import logger
class SocketRpcController(service.RpcController):
''' RpcController implementation to be used by the SocketRpcChannel class.
The RpcController is used to mediate a single method call.
'''
def __init__(self):
'''Constructor which initializes the controller's state.'''
self.fail = False
self.error = None
self.reason = None
def handleError(self,error_code,message):
'''Log and set the controller state.'''
self.fail = True
self.reason = error_code
self.error = message
def reset(self):
'''Resets the controller i.e. clears the error state.'''
self.fail = False
self.error = None
self.reason = None
def failed(self):
'''Returns True if the controller is in a failed state.'''
return self.fail
|
# encoding: utf-8
"""
Test suite for the docx.api module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.api import Document
from docx.enum.text import WD_BREAK
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.opc.coreprops import CoreProperties
from docx.package import Package
from docx.parts.document import DocumentPart, InlineShapes
from docx.parts.numbering import NumberingPart
from docx.parts.styles import StylesPart
from docx.section import Section
from docx.shape import InlineShape
from docx.table import Table
from docx.text import Paragraph, Run
from .unitutil.mock import (
instance_mock, class_mock, method_mock, property_mock, var_mock
)
class DescribeDocument(object):
def it_opens_a_docx_on_construction(self, init_fixture):
docx_, open_ = init_fixture
document = Document(docx_)
open_.assert_called_once_with(docx_)
assert isinstance(document, Document)
def it_can_open_a_docx_file(self, open_fixture):
docx_, Package_, package_, document_part_ = open_fixture
document_part, package = Document._open(docx_)
Package_.open.assert_called_once_with(docx_)
assert document_part is document_part
assert package is package_
def it_opens_default_template_if_no_file_provided(
self, Package_, default_docx_):
Document._open(None)
Package_.open.assert_called_once_with(default_docx_)
def it_should_raise_if_not_a_Word_file(self, Package_, package_, docx_):
package_.main_document.content_type = 'foobar'
with pytest.raises(ValueError):
Document._open(docx_)
def it_can_add_a_heading(self, add_heading_fixture):
document, add_paragraph_, paragraph_, text, level, style = (
add_heading_fixture
)
paragraph = document.add_heading(text, level)
add_paragraph_.assert_called_once_with(text, style)
assert paragraph is paragraph_
def it_should_raise_on_heading_level_out_of_range(self, document):
with pytest.raises(ValueError):
document.add_heading(level=-1)
with pytest.raises(ValueError):
document.add_heading(level=10)
def it_can_add_a_paragraph(self, add_paragraph_fixture):
document, document_part_, text, style, paragraph_ = (
add_paragraph_fixture
)
paragraph = document.add_paragraph(text, style)
document_part_.add_paragraph.assert_called_once_with(text, style)
assert paragraph is paragraph_
def it_can_add_a_page_break(self, add_page_break_fixture):
document, document_part_, paragraph_, run_ = add_page_break_fixture
paragraph = document.add_page_break()
document_part_.add_paragraph.assert_called_once_with()
paragraph_.add_run.assert_called_once_with()
run_.add_break.assert_called_once_with(WD_BREAK.PAGE)
assert paragraph is paragraph_
def it_can_add_a_picture(self, add_picture_fixture):
document, image_path_, width, height, run_, picture_ = (
add_picture_fixture
)
picture = document.add_picture(image_path_, width, height)
run_.add_picture.assert_called_once_with(image_path_, width, height)
assert picture is picture_
def it_can_add_a_section(self, add_section_fixture):
document, start_type_, section_ = add_section_fixture
section = document.add_section(start_type_)
document._document_part.add_section.assert_called_once_with(
start_type_
)
assert section is section_
def it_can_add_a_table(self, add_table_fixture):
document, rows, cols, style, document_part_, expected_style, table_ = (
add_table_fixture
)
table = document.add_table(rows, cols, style)
document_part_.add_table.assert_called_once_with(rows, cols)
assert table.style == expected_style
assert table == table_
def it_provides_access_to_the_document_inline_shapes(self, document):
body = document.inline_shapes
assert body is document._document_part.inline_shapes
def it_provides_access_to_the_document_paragraphs(
self, paragraphs_fixture):
document, paragraphs_ = paragraphs_fixture
paragraphs = document.paragraphs
assert paragraphs is paragraphs_
def it_provides_access_to_the_document_sections(self, document):
body = document.sections
assert body is document._document_part.sections
def it_provides_access_to_the_document_tables(self, tables_fixture):
document, tables_ = tables_fixture
tables = document.tables
assert tables is tables_
def it_can_save_the_package(self, save_fixture):
document, package_, file_ = save_fixture
document.save(file_)
package_.save.assert_called_once_with(file_)
def it_provides_access_to_the_core_properties(self, core_props_fixture):
document, core_properties_ = core_props_fixture
core_properties = document.core_properties
assert core_properties is core_properties_
def it_provides_access_to_the_numbering_part(self, num_part_get_fixture):
document, document_part_, numbering_part_ = num_part_get_fixture
numbering_part = document.numbering_part
document_part_.part_related_by.assert_called_once_with(RT.NUMBERING)
assert numbering_part is numbering_part_
def it_creates_numbering_part_on_first_access_if_not_present(
self, num_part_create_fixture):
document, NumberingPart_, document_part_, numbering_part_ = (
num_part_create_fixture
)
numbering_part = document.numbering_part
NumberingPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
numbering_part_, RT.NUMBERING
)
assert numbering_part is numbering_part_
def it_provides_access_to_the_styles_part(self, styles_part_get_fixture):
document, document_part_, styles_part_ = styles_part_get_fixture
styles_part = document.styles_part
document_part_.part_related_by.assert_called_once_with(RT.STYLES)
assert styles_part is styles_part_
def it_creates_styles_part_on_first_access_if_not_present(
self, styles_part_create_fixture):
document, StylesPart_, document_part_, styles_part_ = (
styles_part_create_fixture
)
styles_part = document.styles_part
StylesPart_.new.assert_called_once_with()
document_part_.relate_to.assert_called_once_with(
styles_part_, RT.STYLES
)
assert styles_part is styles_part_
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('', None),
('', 'Heading1'),
('foo\rbar', 'BodyText'),
])
def add_paragraph_fixture(
self, request, document, document_part_, paragraph_):
text, style = request.param
return document, document_part_, text, style, paragraph_
@pytest.fixture(params=[0, 1, 2, 5, 9])
def add_heading_fixture(
self, request, document, add_paragraph_, paragraph_):
level = request.param
text = 'Spam vs. Bacon'
style = 'Title' if level == 0 else 'Heading%d' % level
return document, add_paragraph_, paragraph_, text, level, style
@pytest.fixture
def add_page_break_fixture(
self, document, document_part_, paragraph_, run_):
return document, document_part_, paragraph_, run_
@pytest.fixture
def add_picture_fixture(self, request, run_, picture_):
document = Document()
image_path_ = instance_mock(request, str, name='image_path_')
width, height = 100, 200
class_mock(request, 'docx.text.Run', return_value=run_)
run_.add_picture.return_value = picture_
return (document, image_path_, width, height, run_, picture_)
@pytest.fixture
def add_section_fixture(self, document, start_type_, section_):
return document, start_type_, section_
@pytest.fixture(params=[None, 'LightShading-Accent1', 'foobar'])
def add_table_fixture(self, request, document, document_part_, table_):
rows, cols = 4, 2
style = expected_style = request.param
return (
document, rows, cols, style, document_part_, expected_style,
table_
)
@pytest.fixture
def core_props_fixture(self, document, core_properties_):
document._package.core_properties = core_properties_
return document, core_properties_
@pytest.fixture
def init_fixture(self, docx_, open_):
return docx_, open_
@pytest.fixture
def num_part_get_fixture(self, document, document_part_, numbering_part_):
document_part_.part_related_by.return_value = numbering_part_
return document, document_part_, numbering_part_
@pytest.fixture
def open_fixture(self, docx_, Package_, package_, document_part_):
return docx_, Package_, package_, document_part_
@pytest.fixture
def paragraphs_fixture(self, document, paragraphs_):
return document, paragraphs_
@pytest.fixture
def save_fixture(self, request, open_, package_):
file_ = instance_mock(request, str)
document = Document()
return document, package_, file_
@pytest.fixture
def tables_fixture(self, document, tables_):
return document, tables_
# fixture components ---------------------------------------------
@pytest.fixture
def add_paragraph_(self, request, paragraph_):
return method_mock(
request, Document, 'add_paragraph', return_value=paragraph_
)
@pytest.fixture
def core_properties_(self, request):
return instance_mock(request, CoreProperties)
@pytest.fixture
def default_docx_(self, request):
return var_mock(request, 'docx.api._default_docx_path')
@pytest.fixture
def Document_inline_shapes_(self, request, inline_shapes_):
return property_mock(
request, Document, 'inline_shapes', return_value=inline_shapes_
)
@pytest.fixture
def document(self, open_):
return Document()
@pytest.fixture
def document_part_(
self, request, paragraph_, paragraphs_, section_, table_,
tables_):
document_part_ = instance_mock(
request, DocumentPart, content_type=CT.WML_DOCUMENT_MAIN
)
document_part_.add_paragraph.return_value = paragraph_
document_part_.add_section.return_value = section_
document_part_.add_table.return_value = table_
document_part_.paragraphs = paragraphs_
document_part_.tables = tables_
return document_part_
@pytest.fixture
def docx_(self, request):
return instance_mock(request, str)
@pytest.fixture
def inline_shapes_(self, request):
return instance_mock(request, InlineShapes)
@pytest.fixture
def num_part_create_fixture(
self, document, NumberingPart_, document_part_, numbering_part_):
document_part_.part_related_by.side_effect = KeyError
return document, NumberingPart_, document_part_, numbering_part_
@pytest.fixture
def NumberingPart_(self, request, numbering_part_):
NumberingPart_ = class_mock(request, 'docx.api.NumberingPart')
NumberingPart_.new.return_value = numbering_part_
return NumberingPart_
@pytest.fixture
def numbering_part_(self, request):
return instance_mock(request, NumberingPart)
@pytest.fixture
def open_(self, request, document_part_, package_):
return method_mock(
request, Document, '_open',
return_value=(document_part_, package_)
)
@pytest.fixture
def Package_(self, request, package_):
Package_ = class_mock(request, 'docx.api.Package')
Package_.open.return_value = package_
return Package_
@pytest.fixture
def package_(self, request, document_part_):
package_ = instance_mock(request, Package)
package_.main_document = document_part_
return package_
@pytest.fixture
def paragraph_(self, request, run_):
paragraph_ = instance_mock(request, Paragraph)
paragraph_.add_run.return_value = run_
return paragraph_
@pytest.fixture
def paragraphs_(self, request):
return instance_mock(request, list)
@pytest.fixture
def picture_(self, request):
return instance_mock(request, InlineShape)
@pytest.fixture
def run_(self, request):
return instance_mock(request, Run)
@pytest.fixture
def section_(self, request):
return instance_mock(request, Section)
@pytest.fixture
def start_type_(self, request):
return instance_mock(request, int)
@pytest.fixture
def StylesPart_(self, request, styles_part_):
StylesPart_ = class_mock(request, 'docx.api.StylesPart')
StylesPart_.new.return_value = styles_part_
return StylesPart_
@pytest.fixture
def styles_part_(self, request):
return instance_mock(request, StylesPart)
@pytest.fixture
def styles_part_create_fixture(
self, document, StylesPart_, document_part_, styles_part_):
document_part_.part_related_by.side_effect = KeyError
return document, StylesPart_, document_part_, styles_part_
@pytest.fixture
def styles_part_get_fixture(self, document, document_part_, styles_part_):
document_part_.part_related_by.return_value = styles_part_
return document, document_part_, styles_part_
@pytest.fixture
def table_(self, request):
return instance_mock(request, Table, style=None)
@pytest.fixture
def tables_(self, request):
return instance_mock(request, list)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HostSettingsOperations:
"""HostSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.botservice.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
**kwargs: Any
) -> "_models.HostSettingsResponse":
"""Get per subscription settings needed to host bot in compute resource such as Azure App Service.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HostSettingsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.botservice.models.HostSettingsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HostSettingsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HostSettingsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.BotService/hostSettings'} # type: ignore
|
# -*- coding: utf-8 -*-
import json
from django.contrib.admin.sites import site
from protoLib.getStuff import getUserProfile
from protoExt.utils.utilsWeb import JsonError
from protoExt.utils.utilsWeb import doReturn
class cAux:
"""
Class for passing parameters
"""
def __init__(self):
self.viewCode = ''
self.viewEntity = ''
self.userProfile = None
def validateRequest( request ):
cBase = cAux()
if request.method != 'POST':
return cBase, JsonError('invalid message')
if not request.user:
return cBase, JsonError('readOnly User')
if not request.user.is_authenticated():
return cBase, JsonError('readOnly User')
if not request.user.is_active:
return cBase, JsonError('readOnly User')
cBase.viewCode = request.POST.get('viewCode', '').strip()
if len( cBase.viewCode ) == 0:
return cBase, JsonError('no viewCode')
cBase.userProfile = getUserProfile( request.user )
# Elimina un punto extrano q viene de js
if cBase.viewCode[-1] == '.':
cBase.viewCode = cBase.viewCode[:-1]
# Si viene el valor lo asume por defecto
cBase.viewEntity = request.POST.get('viewEntity', cBase.viewCode ).strip()
# Verifica si es una vista del modelo y obtiene el nombre base
if cBase.viewEntity.count(".") >= 2:
app, model = cBase.viewEntity.split(".")[:2]
cBase.viewEntity = app + '.' + model
return cBase, None
def getReturnMsg( cBase ):
from protoLib.getStuff import getAllModelPermissions
jsondict = {
'success':True,
'message': '',
'metaData':{
# The name of the property which contains the Array of row objects. ...
'root': 'rows',
# Name of the property within a row object that contains a record identifier value. ...
'idProperty': cBase.protoMeta.get( 'idProperty', 'id'),
# Name of the property from which to retrieve the total number of records in t
'totalProperty':'totalCount',
# Name of the property from which to retrieve the success attribute. ...
'successProperty':'success',
# The name of the property which contains a response message. (optional)
'messageProperty': 'message',
},
'protoMeta': cBase.protoMeta,
'permissions': getAllModelPermissions(cBase.userProfile.user, cBase.model),
'rows':[],
'totalCount': 0,
}
return json.dumps( jsondict )
|
'''
Copyright 2013 Dustin Frisch<[email protected]>
This file is part of netprov.
netprov is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
netprov is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with netprov. If not, see <http://www.gnu.org/licenses/>.
'''
from abc import ABCMeta, abstractproperty
from bitarray import bitarray
class Entry(object):
def __init__(self,
name,
usage,
ipaddr,
hwaddr):
self.__name = name
self.__usage = usage
if isinstance(ipaddr, Address):
self.__ipaddr = ipaddr
else:
self.__ipaddr = Address(ipaddr)
self.__hwaddr = hwaddr
@staticmethod
def fixed(name, ipaddr):
return Entry(name = name,
usage = 'fixed',
ipaddr = ipaddr,
hwaddr = None)
@staticmethod
def static(name, ipaddr, hwaddr):
return Entry(name = name,
usage = 'static',
ipaddr = ipaddr,
hwaddr = hwaddr)
@staticmethod
def dynamic(name, ipaddr):
return Entry(name = name,
usage = 'dynamic',
ipaddr = ipaddr,
hwaddr = None)
@property
def name(self):
return self.__name
@property
def usage(self):
return self.__usage
@property
def ipaddr(self):
return self.__ipaddr
@property
def hwaddr(self):
return self.__hwaddr
def __cmp__(self, other):
return self.__ipaddr.address_int - other.__ipaddr.address_int
class Address(object):
def __init__(self, address):
if isinstance(address, int) or isinstance(address, long):
self.__address = bytearray((address >> n * 8) & 0xFF
for n
in reversed(xrange(4)))
elif isinstance(address, bytearray):
self.__address = address
else:
self.__address = bytearray(int(part)
for part
in address.split('.'))
@property
def address(self):
return self.__address
@property
def address_str(self):
return '.'.join(str(b)
for b
in self.__address)
@property
def address_int(self):
return sum(x << (8 * (3 - n))
for n, x
in enumerate(self.__address))
class Subnet(object):
def __init__(self,
network,
netmask,
**fields):
if isinstance(network, Address):
self.__network = network
else:
self.__network = Address(network)
if isinstance(netmask, int) or isinstance(netmask, long):
self.__suffix = netmask
else:
bits = bitarray()
bits.frombytes(str(bytearray(int(part)
for part
in netmask.split('.'))))
self.__suffix = bits.count(True)
self.__fields = fields
@property
def network(self):
return self.__network
@property
def suffix(self):
return self.__suffix
@property
def netmask(self):
bits = bitarray(32)
bits.setall(False)
bits[0:self.__suffix] = True
return '.'.join(str(ord(byte))
for byte
in bits.tobytes())
@property
def fields(self):
return self.__fields
def __cmp__(self, other):
return self.__network.address_int - other.__network.address_int
class Source(object):
__metaclass__ = ABCMeta
@abstractproperty
def subnets(self):
pass
|
#!/usr/bin/env python
import random
import tempfile
from signal import SIGINT, SIGTERM, Signals, signal
from types import FrameType
from typing import ContextManager
from eth_typing import URI, HexStr
from eth_utils import keccak, remove_0x_prefix
from web3 import HTTPProvider, Web3
from raiden.tests.fixtures.constants import DEFAULT_BALANCE
from raiden.tests.utils.eth_node import (
AccountDescription,
EthNodeDescription,
GenesisDescription,
run_private_blockchain,
)
from raiden.utils.http import JSONRPCExecutor
from raiden.utils.keys import privatekey_to_address
from raiden.utils.typing import ChainID, List, Port, PrivateKey, TokenAmount
from raiden_contracts.constants import CHAINNAME_TO_ID
NUM_GETH_NODES = 3
NUM_RAIDEN_ACCOUNTS = 10
START_PORT = 30301
START_RPCPORT = 8101
DEFAULT_ACCOUNTS_SEEDS = [
"127.0.0.1:{}".format(START_PORT + i).encode() for i in range(NUM_RAIDEN_ACCOUNTS)
]
DEFAULT_ACCOUNTS_KEYS: List[PrivateKey] = [
PrivateKey(keccak(seed)) for seed in DEFAULT_ACCOUNTS_SEEDS
]
DEFAULT_ACCOUNTS = [
AccountDescription(privatekey_to_address(key), TokenAmount(DEFAULT_BALANCE))
for key in DEFAULT_ACCOUNTS_KEYS
]
def main() -> None:
tmpdir = tempfile.mkdtemp()
geth_nodes = []
for i in range(NUM_GETH_NODES):
is_miner = i == 0
node_key = PrivateKey(keccak(f"node:{i}".encode()))
p2p_port = Port(START_PORT + i)
rpc_port = Port(START_RPCPORT + i)
description = EthNodeDescription(
private_key=node_key,
rpc_port=rpc_port,
p2p_port=p2p_port,
miner=is_miner,
extra_config={},
)
geth_nodes.append(description)
rpc_endpoint = URI(f"http://127.0.0.1:{START_RPCPORT}")
web3 = Web3(HTTPProvider(rpc_endpoint))
random_marker = remove_0x_prefix(HexStr(hex(random.getrandbits(100))))
genesis_description = GenesisDescription(
prefunded_accounts=DEFAULT_ACCOUNTS,
random_marker=random_marker,
chain_id=ChainID(CHAINNAME_TO_ID["smoketest"]),
)
private_chain: ContextManager[List[JSONRPCExecutor]] = run_private_blockchain(
web3=web3,
eth_nodes=geth_nodes,
base_datadir=tmpdir,
log_dir=tmpdir,
verbosity="info",
genesis_description=genesis_description,
)
with private_chain:
from IPython import embed
embed()
def shutdown_handler(_signo: Signals, _stackframe: FrameType) -> None:
raise SystemExit
if __name__ == "__main__":
signal(SIGTERM, shutdown_handler)
signal(SIGINT, shutdown_handler)
main()
|
"""Support for AlarmDecoder sensors (Shows Panel Display)."""
import logging
from homeassistant.helpers.entity import Entity
from . import SIGNAL_PANEL_MESSAGE
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up for AlarmDecoder sensor devices."""
_LOGGER.debug("AlarmDecoderSensor: setup_platform")
device = AlarmDecoderSensor(hass)
add_entities([device])
class AlarmDecoderSensor(Entity):
"""Representation of an AlarmDecoder keypad."""
def __init__(self, hass):
"""Initialize the alarm panel."""
self._display = ""
self._state = None
self._icon = 'mdi:alarm-check'
self._name = 'Alarm Panel Display'
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_PANEL_MESSAGE, self._message_callback)
def _message_callback(self, message):
if self._display != message.text:
self._display = message.text
self.schedule_update_ha_state()
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state(self):
"""Return the overall state."""
return self._display
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
|
#!/usr/bin/env python
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('morphology', parent_package, top_path)
config.add_data_dir('tests')
cython(['ccomp.pyx'], working_path=base_path)
cython(['cmorph.pyx'], working_path=base_path)
cython(['_watershed.pyx'], working_path=base_path)
cython(['_skeletonize_cy.pyx'], working_path=base_path)
cython(['_pnpoly.pyx'], working_path=base_path)
cython(['_convex_hull.pyx'], working_path=base_path)
cython(['_greyreconstruct.pyx'], working_path=base_path)
config.add_extension('ccomp', sources=['ccomp.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('cmorph', sources=['cmorph.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_watershed', sources=['_watershed.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_skeletonize_cy', sources=['_skeletonize_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_pnpoly', sources=['_pnpoly.c'],
include_dirs=[get_numpy_include_dirs(), '../_shared'])
config.add_extension('_convex_hull', sources=['_convex_hull.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_greyreconstruct', sources=['_greyreconstruct.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
author='Damian Eads',
maintainer_email='[email protected]',
description='Morphology Wrapper',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import getopt
import sys
import json
from datetime import datetime
HOST = '0.0.0.0'
PORT = 9902
THREAD_POOL = 1000
ENCODE_ON = True
ENCODING = 'utf-8'
"""
Elasticsearch persistor rest api server
"""
class Root:
@cherrypy.expose
def index(self):
return "Elasticsearch persistor is alive!!"
@cherrypy.expose
def datetime(self):
datestr = datetime.now().strftime("%Y%m%d%H%M%S")
return json.dumps({"datestr":datestr})
def usage(command):
print("""
%s [options]
options:
--help Show help
""" % command)
def main(argv):
# Command line arguments
try:
optlist , args = getopt.getopt(argv , 'p:b:' , ['help'])
for opt, value in optlist:
if opt == '--help':
usage(argv[0])
sys.exit()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
},
'global': {
'server.socket_host': HOST,
'server.socket_port': PORT,
'server.thread_pool': THREAD_POOL,
'tools.encode.on': ENCODE_ON,
'tools.encode.encoding': ENCODING
}
}
cherrypy.quickstart(Root(), config=conf)
except getopt.GetoptError as err:
logging.error(err)
usage()
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
|
import sys
import os
from corpustools.freqalt.freq_of_alt import calc_freq_of_alt
from corpustools.contextmanagers import (CanonicalVariantContext,
MostFrequentVariantContext)
def test_freqalt(specified_test_corpus):
with CanonicalVariantContext(specified_test_corpus, 'transcription', 'type') as c:
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -15, phono_align=True)
assert(result==(8,3,0.375))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -6, phono_align=True)
assert(result==(8,0,0))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -6, phono_align=False)
assert(result==(8,2,0.25))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -15, phono_align=False)
assert(result==(8,7,0.875))
result = calc_freq_of_alt(c,'s','ʃ','edit_distance', max_rel = 2, phono_align=True)
assert(result==(8,2,0.25))
result = calc_freq_of_alt(c,'s','ʃ','edit_distance', max_rel = 2, phono_align=False)
assert(result==(8,2,0.25))
result = calc_freq_of_alt(c,'s','ʃ','phono_edit_distance', max_rel = 6, phono_align=True)
assert(result==(8,2,0.25))
result = calc_freq_of_alt(c,'s','ʃ','phono_edit_distance', max_rel = 6, phono_align=False)
assert(result==(8,2,0.25))
with CanonicalVariantContext(specified_test_corpus, 'transcription', 'token') as c:
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -15, phono_align=True)
assert(result==(8,3,0.375))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -6, phono_align=True)
assert(result==(8,2,0.25))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -15, phono_align=False)
assert(result==(8,7,0.875))
result = calc_freq_of_alt(c,'s','ʃ','khorsi', min_rel = -6, phono_align=False)
assert(result==(8,3,0.375))
result = calc_freq_of_alt(c,'s','ʃ','edit_distance', max_rel = 4, phono_align=True)
assert(result==(8,3,0.375))
result = calc_freq_of_alt(c,'s','ʃ','edit_distance', max_rel = 4, phono_align=False)
assert(result==(8,6,0.75))
result = calc_freq_of_alt(c,'s','ʃ','phono_edit_distance', max_rel = 20, phono_align=True)
assert(result==(8,3,0.375))
result = calc_freq_of_alt(c,'s','ʃ','phono_edit_distance', max_rel = 20, phono_align=False)
assert(result==(8,6,0.75))
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120, 1396, 481, 1719, 1720, 328, 609, 212, 1721, 707, 400, 299, 1722, 87,
1397, 1723, 104, 536, 1117, 1203, 1724, 1267, 685, 1268, 508, 1725, 1726, 1727, 1728, 1398,
1399, 1729, 1730, 1731, 141, 621, 326, 1057, 368, 1732, 267, 488, 20, 1733, 1269, 1734,
945, 1400, 1735, 47, 904, 1270, 1736, 1737, 773, 248, 1738, 409, 313, 786, 429, 1739,
116, 987, 813, 1401, 683, 75, 1204, 145, 1740, 1741, 1742, 1743, 16, 847, 667, 622,
708, 1744, 1745, 1746, 966, 787, 304, 129, 1747, 60, 820, 123, 676, 1748, 1749, 1750,
1751, 617, 1752, 626, 1753, 1754, 1755, 1756, 653, 1757, 1758, 1759, 1760, 1761, 1762, 856,
344, 1763, 1764, 1765, 1766, 89, 401, 418, 806, 905, 848, 1767, 1768, 1769, 946, 1205,
709, 1770, 1118, 1771, 241, 1772, 1773, 1774, 1271, 1775, 569, 1776, 999, 1777, 1778, 1779,
1780, 337, 751, 1058, 28, 628, 254, 1781, 177, 906, 270, 349, 891, 1079, 1782, 19,
1783, 379, 1784, 315, 1785, 629, 754, 1402, 559, 1786, 636, 203, 1206, 1787, 710, 567,
1788, 935, 814, 1789, 1790, 1207, 766, 528, 1791, 1792, 1208, 1793, 1794, 1795, 1796, 1797,
1403, 1798, 1799, 533, 1059, 1404, 1405, 1156, 1406, 936, 884, 1080, 1800, 351, 1801, 1802,
1803, 1804, 1805, 801, 1806, 1807, 1808, 1119, 1809, 1157, 714, 474, 1407, 1810, 298, 899,
885, 1811, 1120, 802, 1158, 1812, 892, 1813, 1814, 1408, 659, 1815, 1816, 1121, 1817, 1818,
1819, 1820, 1821, 1822, 319, 1823, 594, 545, 1824, 815, 937, 1209, 1825, 1826, 573, 1409,
1022, 1827, 1210, 1828, 1829, 1830, 1831, 1832, 1833, 556, 722, 807, 1122, 1060, 1834, 697,
1835, 900, 557, 715, 1836, 1410, 540, 1411, 752, 1159, 294, 597, 1211, 976, 803, 770,
1412, 1837, 1838, 39, 794, 1413, 358, 1839, 371, 925, 1840, 453, 661, 788, 531, 723,
544, 1023, 1081, 869, 91, 1841, 392, 430, 790, 602, 1414, 677, 1082, 457, 1415, 1416,
1842, 1843, 475, 327, 1024, 1417, 795, 121, 1844, 733, 403, 1418, 1845, 1846, 1847, 300,
119, 711, 1212, 627, 1848, 1272, 207, 1849, 1850, 796, 1213, 382, 1851, 519, 1852, 1083,
893, 1853, 1854, 1855, 367, 809, 487, 671, 1856, 663, 1857, 1858, 956, 471, 306, 857,
1859, 1860, 1160, 1084, 1861, 1862, 1863, 1864, 1865, 1061, 1866, 1867, 1868, 1869, 1870, 1871,
282, 96, 574, 1872, 502, 1085, 1873, 1214, 1874, 907, 1875, 1876, 827, 977, 1419, 1420,
1421, 268, 1877, 1422, 1878, 1879, 1880, 308, 1881, 2, 537, 1882, 1883, 1215, 1884, 1885,
127, 791, 1886, 1273, 1423, 1887, 34, 336, 404, 643, 1888, 571, 654, 894, 840, 1889,
0, 886, 1274, 122, 575, 260, 908, 938, 1890, 1275, 410, 316, 1891, 1892, 100, 1893,
1894, 1123, 48, 1161, 1124, 1025, 1895, 633, 901, 1276, 1896, 1897, 115, 816, 1898, 317,
1899, 694, 1900, 909, 734, 1424, 572, 866, 1425, 691, 85, 524, 1010, 543, 394, 841,
1901, 1902, 1903, 1026, 1904, 1905, 1906, 1907, 1908, 1909, 30, 451, 651, 988, 310, 1910,
1911, 1426, 810, 1216, 93, 1912, 1913, 1277, 1217, 1914, 858, 759, 45, 58, 181, 610,
269, 1915, 1916, 131, 1062, 551, 443, 1000, 821, 1427, 957, 895, 1086, 1917, 1918, 375,
1919, 359, 1920, 687, 1921, 822, 1922, 293, 1923, 1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174, 1925, 69, 1162, 728, 1428, 910, 1926, 1278, 1218, 1279, 386, 870,
217, 854, 1163, 823, 1927, 1928, 1929, 1930, 834, 1931, 78, 1932, 859, 1933, 1063, 1934,
1935, 1936, 1937, 438, 1164, 208, 595, 1938, 1939, 1940, 1941, 1219, 1125, 1942, 280, 888,
1429, 1430, 1220, 1431, 1943, 1944, 1945, 1946, 1947, 1280, 150, 510, 1432, 1948, 1949, 1950,
1951, 1952, 1953, 1954, 1011, 1087, 1955, 1433, 1043, 1956, 881, 1957, 614, 958, 1064, 1065,
1221, 1958, 638, 1001, 860, 967, 896, 1434, 989, 492, 553, 1281, 1165, 1959, 1282, 1002,
1283, 1222, 1960, 1961, 1962, 1963, 36, 383, 228, 753, 247, 454, 1964, 876, 678, 1965,
1966, 1284, 126, 464, 490, 835, 136, 672, 529, 940, 1088, 1435, 473, 1967, 1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882, 1126, 1285,
639, 1044, 133, 140, 288, 360, 811, 563, 1027, 561, 142, 523, 1969, 1970, 1971, 7,
103, 296, 439, 407, 506, 634, 990, 1972, 1973, 1974, 1975, 645, 1976, 1977, 1978, 1979,
1980, 1981, 236, 1982, 1436, 1983, 1984, 1089, 192, 828, 618, 518, 1166, 333, 1127, 1985,
818, 1223, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 342, 1128, 1286, 746, 842, 1994,
1995, 560, 223, 1287, 98, 8, 189, 650, 978, 1288, 1996, 1437, 1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167, 1998, 201, 1999, 2000, 843, 836, 824,
532, 338, 783, 1090, 182, 576, 436, 1438, 1439, 527, 500, 2001, 947, 889, 2002, 2003,
2004, 2005, 262, 600, 314, 447, 2006, 547, 2007, 693, 738, 1129, 2008, 71, 1440, 745,
619, 688, 2009, 829, 2010, 2011, 147, 2012, 33, 948, 2013, 2014, 74, 224, 2015, 61,
191, 918, 399, 637, 2016, 1028, 1130, 257, 902, 2017, 2018, 2019, 2020, 2021, 2022, 2023,
2024, 2025, 2026, 837, 2027, 2028, 2029, 2030, 179, 874, 591, 52, 724, 246, 2031, 2032,
2033, 2034, 1167, 969, 2035, 1289, 630, 605, 911, 1091, 1168, 2036, 2037, 2038, 1441, 912,
2039, 623, 2040, 2041, 253, 1169, 1290, 2042, 1442, 146, 620, 611, 577, 433, 2043, 1224,
719, 1170, 959, 440, 437, 534, 84, 388, 480, 1131, 159, 220, 198, 679, 2044, 1012,
819, 1066, 1443, 113, 1225, 194, 318, 1003, 1029, 2045, 2046, 2047, 2048, 1067, 2049, 2050,
2051, 2052, 2053, 59, 913, 112, 2054, 632, 2055, 455, 144, 739, 1291, 2056, 273, 681,
499, 2057, 448, 2058, 2059, 760, 2060, 2061, 970, 384, 169, 245, 1132, 2062, 2063, 414,
1444, 2064, 2065, 41, 235, 2066, 157, 252, 877, 568, 919, 789, 580, 2067, 725, 2068,
2069, 1292, 2070, 2071, 1445, 2072, 1446, 2073, 2074, 55, 588, 66, 1447, 271, 1092, 2075,
1226, 2076, 960, 1013, 372, 2077, 2078, 2079, 2080, 2081, 1293, 2082, 2083, 2084, 2085, 850,
2086, 2087, 2088, 2089, 2090, 186, 2091, 1068, 180, 2092, 2093, 2094, 109, 1227, 522, 606,
2095, 867, 1448, 1093, 991, 1171, 926, 353, 1133, 2096, 581, 2097, 2098, 2099, 1294, 1449,
1450, 2100, 596, 1172, 1014, 1228, 2101, 1451, 1295, 1173, 1229, 2102, 2103, 1296, 1134, 1452,
949, 1135, 2104, 2105, 1094, 1453, 1454, 1455, 2106, 1095, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 804, 2118, 2119, 1230, 1231, 805, 1456, 405, 1136, 2120, 2121,
2122, 2123, 2124, 720, 701, 1297, 992, 1457, 927, 1004, 2125, 2126, 2127, 2128, 2129, 2130,
22, 417, 2131, 303, 2132, 385, 2133, 971, 520, 513, 2134, 1174, 73, 1096, 231, 274,
962, 1458, 673, 2135, 1459, 2136, 152, 1137, 2137, 2138, 2139, 2140, 1005, 1138, 1460, 1139,
2141, 2142, 2143, 2144, 11, 374, 844, 2145, 154, 1232, 46, 1461, 2146, 838, 830, 721,
1233, 106, 2147, 90, 428, 462, 578, 566, 1175, 352, 2148, 2149, 538, 1234, 124, 1298,
2150, 1462, 761, 565, 2151, 686, 2152, 649, 2153, 72, 173, 2154, 460, 415, 2155, 1463,
2156, 1235, 305, 2157, 2158, 2159, 2160, 2161, 2162, 579, 2163, 2164, 2165, 2166, 2167, 747,
2168, 2169, 2170, 2171, 1464, 669, 2172, 2173, 2174, 2175, 2176, 1465, 2177, 23, 530, 285,
2178, 335, 729, 2179, 397, 2180, 2181, 2182, 1030, 2183, 2184, 698, 2185, 2186, 325, 2187,
2188, 369, 2189, 799, 1097, 1015, 348, 2190, 1069, 680, 2191, 851, 1466, 2192, 2193, 10,
2194, 613, 424, 2195, 979, 108, 449, 589, 27, 172, 81, 1031, 80, 774, 281, 350,
1032, 525, 301, 582, 1176, 2196, 674, 1045, 2197, 2198, 1467, 730, 762, 2199, 2200, 2201,
2202, 1468, 2203, 993, 2204, 2205, 266, 1070, 963, 1140, 2206, 2207, 2208, 664, 1098, 972,
2209, 2210, 2211, 1177, 1469, 1470, 871, 2212, 2213, 2214, 2215, 2216, 1471, 2217, 2218, 2219,
2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 1472, 1236, 2228, 2229, 2230, 2231, 2232, 2233,
2234, 2235, 1299, 2236, 2237, 200, 2238, 477, 373, 2239, 2240, 731, 825, 777, 2241, 2242,
2243, 521, 486, 548, 2244, 2245, 2246, 1473, 1300, 53, 549, 137, 875, 76, 158, 2247,
1301, 1474, 469, 396, 1016, 278, 712, 2248, 321, 442, 503, 767, 744, 941, 1237, 1178,
1475, 2249, 82, 178, 1141, 1179, 973, 2250, 1302, 2251, 297, 2252, 2253, 570, 2254, 2255,
2256, 18, 450, 206, 2257, 290, 292, 1142, 2258, 511, 162, 99, 346, 164, 735, 2259,
1476, 1477, 4, 554, 343, 798, 1099, 2260, 1100, 2261, 43, 171, 1303, 139, 215, 2262,
2263, 717, 775, 2264, 1033, 322, 216, 2265, 831, 2266, 149, 2267, 1304, 2268, 2269, 702,
1238, 135, 845, 347, 309, 2270, 484, 2271, 878, 655, 238, 1006, 1478, 2272, 67, 2273,
295, 2274, 2275, 461, 2276, 478, 942, 412, 2277, 1034, 2278, 2279, 2280, 265, 2281, 541,
2282, 2283, 2284, 2285, 2286, 70, 852, 1071, 2287, 2288, 2289, 2290, 21, 56, 509, 117,
432, 2291, 2292, 331, 980, 552, 1101, 148, 284, 105, 393, 1180, 1239, 755, 2293, 187,
2294, 1046, 1479, 2295, 340, 2296, 63, 1047, 230, 2297, 2298, 1305, 763, 1306, 101, 800,
808, 494, 2299, 2300, 2301, 903, 2302, 37, 1072, 14, 5, 2303, 79, 675, 2304, 312,
2305, 2306, 2307, 2308, 2309, 1480, 6, 1307, 2310, 2311, 2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964, 2314, 259, 2315,
501, 380, 2316, 2317, 83, 981, 153, 689, 1308, 1481, 1482, 1483, 2318, 2319, 716, 1484,
2320, 2321, 2322, 2323, 2324, 2325, 1485, 2326, 2327, 128, 57, 68, 261, 1048, 211, 170,
1240, 31, 2328, 51, 435, 742, 2329, 2330, 2331, 635, 2332, 264, 456, 2333, 2334, 2335,
425, 2336, 1486, 143, 507, 263, 943, 2337, 363, 920, 1487, 256, 1488, 1102, 243, 601,
1489, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 861, 2345, 2346, 2347, 2348, 2349, 2350, 395,
2351, 1490, 1491, 62, 535, 166, 225, 2352, 2353, 668, 419, 1241, 138, 604, 928, 2354,
1181, 2355, 1492, 1493, 2356, 2357, 2358, 1143, 2359, 696, 2360, 387, 307, 1309, 682, 476,
2361, 2362, 332, 12, 222, 156, 2363, 232, 2364, 641, 276, 656, 517, 1494, 1495, 1035,
416, 736, 1496, 2365, 1017, 586, 2366, 2367, 2368, 1497, 2369, 242, 2370, 2371, 2372, 1498,
2373, 965, 713, 2374, 2375, 2376, 2377, 740, 982, 1499, 944, 1500, 1007, 2378, 2379, 1310,
1501, 2380, 2381, 2382, 785, 329, 2383, 2384, 1502, 2385, 2386, 2387, 932, 2388, 1503, 2389,
2390, 2391, 2392, 1242, 2393, 2394, 2395, 2396, 2397, 994, 950, 2398, 2399, 2400, 2401, 1504,
1311, 2402, 2403, 2404, 2405, 1049, 749, 2406, 2407, 853, 718, 1144, 1312, 2408, 1182, 1505,
2409, 2410, 255, 516, 479, 564, 550, 214, 1506, 1507, 1313, 413, 239, 444, 339, 1145,
1036, 1508, 1509, 1314, 1037, 1510, 1315, 2411, 1511, 2412, 2413, 2414, 176, 703, 497, 624,
593, 921, 302, 2415, 341, 165, 1103, 1512, 2416, 1513, 2417, 2418, 2419, 376, 2420, 700,
2421, 2422, 2423, 258, 768, 1316, 2424, 1183, 2425, 995, 608, 2426, 2427, 2428, 2429, 221,
2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 195, 323, 726, 188, 897, 983, 1317, 377,
644, 1050, 879, 2438, 452, 2439, 2440, 2441, 2442, 2443, 2444, 914, 2445, 2446, 2447, 2448,
915, 489, 2449, 1514, 1184, 2450, 2451, 515, 64, 427, 495, 2452, 583, 2453, 483, 485,
1038, 562, 213, 1515, 748, 666, 2454, 2455, 2456, 2457, 334, 2458, 780, 996, 1008, 705,
1243, 2459, 2460, 2461, 2462, 2463, 114, 2464, 493, 1146, 366, 163, 1516, 961, 1104, 2465,
291, 2466, 1318, 1105, 2467, 1517, 365, 2468, 355, 951, 1244, 2469, 1319, 2470, 631, 2471,
2472, 218, 1320, 364, 320, 756, 1518, 1519, 1321, 1520, 1322, 2473, 2474, 2475, 2476, 997,
2477, 2478, 2479, 2480, 665, 1185, 2481, 916, 1521, 2482, 2483, 2484, 584, 684, 2485, 2486,
797, 2487, 1051, 1186, 2488, 2489, 2490, 1522, 2491, 2492, 370, 2493, 1039, 1187, 65, 2494,
434, 205, 463, 1188, 2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585, 2496, 590, 505, 1073, 2497, 599, 244, 219, 917, 1018, 952, 646, 1523, 2498, 1323,
2499, 2500, 49, 984, 354, 741, 2501, 625, 2502, 1324, 2503, 1019, 190, 357, 757, 491,
95, 782, 868, 2504, 2505, 2506, 2507, 2508, 2509, 134, 1524, 1074, 422, 1525, 898, 2510,
161, 2511, 2512, 2513, 2514, 769, 2515, 1526, 2516, 2517, 411, 1325, 2518, 472, 1527, 2519,
2520, 2521, 2522, 2523, 2524, 985, 2525, 2526, 2527, 2528, 2529, 2530, 764, 2531, 1245, 2532,
2533, 25, 204, 311, 2534, 496, 2535, 1052, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 199,
704, 504, 468, 758, 657, 1528, 196, 44, 839, 1246, 272, 750, 2543, 765, 862, 2544,
2545, 1326, 2546, 132, 615, 933, 2547, 732, 2548, 2549, 2550, 1189, 1529, 2551, 283, 1247,
1053, 607, 929, 2552, 2553, 2554, 930, 183, 872, 616, 1040, 1147, 2555, 1148, 1020, 441,
249, 1075, 2556, 2557, 2558, 466, 743, 2559, 2560, 2561, 92, 514, 426, 420, 526, 2562,
2563, 2564, 2565, 2566, 2567, 2568, 185, 2569, 2570, 2571, 2572, 776, 1530, 658, 2573, 362,
2574, 361, 922, 1076, 793, 2575, 2576, 2577, 2578, 2579, 2580, 1531, 251, 2581, 2582, 2583,
2584, 1532, 54, 612, 237, 1327, 2585, 2586, 275, 408, 647, 111, 2587, 1533, 1106, 465,
3, 458, 9, 38, 2588, 107, 110, 890, 209, 26, 737, 498, 2589, 1534, 2590, 431,
202, 88, 1535, 356, 287, 1107, 660, 1149, 2591, 381, 1536, 986, 1150, 445, 1248, 1151,
974, 2592, 2593, 846, 2594, 446, 953, 184, 1249, 1250, 727, 2595, 923, 193, 883, 2596,
2597, 2598, 102, 324, 539, 817, 2599, 421, 1041, 2600, 832, 2601, 94, 175, 197, 406,
2602, 459, 2603, 2604, 2605, 2606, 2607, 330, 555, 2608, 2609, 2610, 706, 1108, 389, 2611,
2612, 2613, 2614, 233, 2615, 833, 558, 931, 954, 1251, 2616, 2617, 1537, 546, 2618, 2619,
1009, 2620, 2621, 2622, 1538, 690, 1328, 2623, 955, 2624, 1539, 2625, 2626, 772, 2627, 2628,
2629, 2630, 2631, 924, 648, 863, 603, 2632, 2633, 934, 1540, 864, 865, 2634, 642, 1042,
670, 1190, 2635, 2636, 2637, 2638, 168, 2639, 652, 873, 542, 1054, 1541, 2640, 2641, 2642, # 512, 256
# Everything below is of no interest for detection purpose
2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658,
2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674,
2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690,
2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 1542, 880, 2700, 2701, 2702, 2703, 2704,
2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720,
2721, 2722, 2723, 2724, 2725, 1543, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 1544, 2733, 2734,
2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750,
2751, 2752, 2753, 2754, 1545, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765,
2766, 1546, 2767, 1547, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779,
2780, 2781, 2782, 2783, 2784, 2785, 2786, 1548, 2787, 2788, 2789, 1109, 2790, 2791, 2792, 2793,
2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809,
2810, 2811, 2812, 1329, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824,
2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840,
2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856,
1549, 2857, 2858, 2859, 2860, 1550, 2861, 2862, 1551, 2863, 2864, 2865, 2866, 2867, 2868, 2869,
2870, 2871, 2872, 2873, 2874, 1110, 1330, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883,
2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899,
2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915,
2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 1331,
2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 1552, 2944, 2945,
2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961,
2962, 2963, 2964, 1252, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976,
2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992,
2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008,
3009, 3010, 3011, 3012, 1553, 3013, 3014, 3015, 3016, 3017, 1554, 3018, 1332, 3019, 3020, 3021,
3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037,
3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 1555, 3051, 3052,
3053, 1556, 1557, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066,
3067, 1558, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 1559, 3077, 3078, 3079, 3080,
3081, 3082, 3083, 1253, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095,
3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 1152, 3109, 3110,
3111, 3112, 3113, 1560, 3114, 3115, 3116, 3117, 1111, 3118, 3119, 3120, 3121, 3122, 3123, 3124,
3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140,
3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156,
3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172,
3173, 3174, 3175, 3176, 1333, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187,
3188, 3189, 1561, 3190, 3191, 1334, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201,
3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217,
3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233,
3234, 1562, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248,
3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264,
3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 1563, 3278, 3279,
3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295,
3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311,
3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327,
3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343,
3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359,
3360, 3361, 3362, 3363, 3364, 1335, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374,
3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 1336, 3388, 3389,
3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405,
3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 1337, 3415, 3416, 3417, 3418, 3419, 1338,
3420, 3421, 3422, 1564, 1565, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 1254, 3432,
3433, 3434, 1339, 3435, 3436, 3437, 3438, 3439, 1566, 3440, 3441, 3442, 3443, 3444, 3445, 3446,
3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 1255, 3455, 3456, 3457, 3458, 3459, 1567, 1191,
3460, 1568, 1569, 3461, 3462, 3463, 1570, 3464, 3465, 3466, 3467, 3468, 1571, 3469, 3470, 3471,
3472, 3473, 1572, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486,
1340, 3487, 3488, 3489, 3490, 3491, 3492, 1021, 3493, 3494, 3495, 3496, 3497, 3498, 1573, 3499,
1341, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 1342, 3512, 3513,
3514, 3515, 3516, 1574, 1343, 3517, 3518, 3519, 1575, 3520, 1576, 3521, 3522, 3523, 3524, 3525,
3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541,
3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557,
3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573,
3574, 3575, 3576, 3577, 3578, 3579, 3580, 1577, 3581, 3582, 1578, 3583, 3584, 3585, 3586, 3587,
3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603,
3604, 1579, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618,
3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 1580, 3630, 3631, 1581, 3632,
3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648,
3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 1582, 3657, 3658, 3659, 3660, 3661, 3662, 3663,
3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679,
3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695,
3696, 3697, 3698, 3699, 3700, 1192, 3701, 3702, 3703, 3704, 1256, 3705, 3706, 3707, 3708, 1583,
1257, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 1584, 3717, 3718, 3719, 3720, 3721, 3722,
3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738,
3739, 3740, 3741, 3742, 3743, 3744, 3745, 1344, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753,
3754, 3755, 3756, 1585, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 1586, 3767,
3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 1345, 3779, 3780, 3781, 3782,
3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 1346, 1587, 3796,
3797, 1588, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 1347, 3807, 3808, 3809, 3810,
3811, 1589, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 1590, 3822, 3823, 1591,
1348, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 1592, 3831, 3832, 1593, 3833, 3834, 3835, 3836,
3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 1349, 3845, 3846, 3847, 3848, 3849, 3850, 3851,
3852, 3853, 3854, 3855, 3856, 3857, 3858, 1594, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866,
3867, 3868, 3869, 1595, 3870, 3871, 3872, 3873, 1596, 3874, 3875, 3876, 3877, 3878, 3879, 3880,
3881, 3882, 3883, 3884, 3885, 3886, 1597, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895,
1598, 3896, 3897, 3898, 1599, 1600, 3899, 1350, 3900, 1351, 3901, 3902, 1352, 3903, 3904, 3905,
3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921,
3922, 3923, 3924, 1258, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 1193, 3932, 1601, 3933, 3934,
3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 1602, 3944, 3945, 3946, 3947, 3948, 1603,
3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964,
3965, 1604, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 1353, 3978,
3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 1354, 3992, 3993,
3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009,
4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 1355, 4024,
4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040,
1605, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055,
4056, 4057, 4058, 4059, 4060, 1606, 4061, 4062, 4063, 4064, 1607, 4065, 4066, 4067, 4068, 4069,
4070, 4071, 4072, 4073, 4074, 4075, 4076, 1194, 4077, 4078, 1608, 4079, 4080, 4081, 4082, 4083,
4084, 4085, 4086, 4087, 1609, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4098,
4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 1259, 4109, 4110, 4111, 4112, 4113,
4114, 4115, 4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 1195, 4125, 4126, 4127, 1610,
4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 1356, 4138, 4139, 4140, 4141, 4142,
4143, 4144, 1611, 4145, 4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157,
4158, 4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172, 4173,
4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 4186, 4187, 4188, 4189,
4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, 4201, 4202, 4203, 4204, 4205,
4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4219, 1612, 4220,
4221, 4222, 4223, 4224, 4225, 4226, 4227, 1357, 4228, 1613, 4229, 4230, 4231, 4232, 4233, 4234,
4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 1614, 4244, 4245, 4246, 4247, 4248, 4249,
4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265,
4266, 4267, 4268, 4269, 4270, 1196, 1358, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 1615, 4288, 4289, 4290, 4291, 4292, 4293, 4294,
4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310,
4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326,
4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 1616, 4335, 4336, 4337, 4338, 4339, 4340, 4341,
4342, 4343, 4344, 4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357,
4358, 4359, 4360, 1617, 4361, 4362, 4363, 4364, 4365, 1618, 4366, 4367, 4368, 4369, 4370, 4371,
4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387,
4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403,
4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 1619, 4417, 4418,
4419, 4420, 4421, 4422, 4423, 4424, 4425, 1112, 4426, 4427, 4428, 4429, 4430, 1620, 4431, 4432,
4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 1260, 1261, 4443, 4444, 4445, 4446,
4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 1359, 4456, 4457, 4458, 4459, 4460, 4461,
4462, 4463, 4464, 4465, 1621, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476,
4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 1055, 4490, 4491,
4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507,
4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 1622, 4519, 4520, 4521, 1623,
4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 1360, 4536,
4537, 4538, 4539, 4540, 4541, 4542, 4543, 975, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551,
4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567,
4568, 4569, 4570, 4571, 1624, 4572, 4573, 4574, 4575, 4576, 1625, 4577, 4578, 4579, 4580, 4581,
4582, 4583, 4584, 1626, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 1627,
4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611,
4612, 4613, 4614, 4615, 1628, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626,
4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4647, 4648, 4649, 1361, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657,
4658, 4659, 4660, 4661, 1362, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672,
4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 1629, 4683, 4684, 4685, 4686, 4687,
1630, 4688, 4689, 4690, 4691, 1153, 4692, 4693, 4694, 1113, 4695, 4696, 4697, 4698, 4699, 4700,
4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 1197, 4712, 4713, 4714, 4715,
4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731,
4732, 4733, 4734, 4735, 1631, 4736, 1632, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 1633,
4745, 4746, 4747, 4748, 4749, 1262, 4750, 4751, 4752, 4753, 4754, 1363, 4755, 4756, 4757, 4758,
4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 1634, 4769, 4770, 4771, 4772, 4773,
4774, 4775, 4776, 4777, 4778, 1635, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788,
4789, 1636, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803,
4804, 4805, 4806, 1637, 4807, 4808, 4809, 1638, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817,
4818, 1639, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832,
4833, 1077, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847,
4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863,
4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4879,
4880, 4881, 4882, 4883, 1640, 4884, 4885, 1641, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893,
4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909,
4910, 4911, 1642, 4912, 4913, 4914, 1364, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923,
4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 1643, 4932, 4933, 4934, 4935, 4936, 4937, 4938,
4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954,
4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970,
4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 1644, 4981, 4982, 4983, 4984, 1645,
4985, 4986, 1646, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999,
5000, 5001, 5002, 5003, 5004, 5005, 1647, 5006, 1648, 5007, 5008, 5009, 5010, 5011, 5012, 1078,
5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028,
1365, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 1649, 5040, 5041, 5042,
5043, 5044, 5045, 1366, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 1650, 5056,
5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072,
5073, 5074, 5075, 5076, 5077, 1651, 5078, 5079, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087,
5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103,
5104, 5105, 5106, 5107, 5108, 5109, 5110, 1652, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118,
1367, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 1653, 5130, 5131, 5132,
5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148,
5149, 1368, 5150, 1654, 5151, 1369, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5159, 5160, 5161,
5162, 5163, 5164, 5165, 5166, 5167, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177,
5178, 1370, 5179, 5180, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5189, 5190, 5191, 5192,
5193, 5194, 5195, 5196, 5197, 5198, 1655, 5199, 5200, 5201, 5202, 1656, 5203, 5204, 5205, 5206,
1371, 5207, 1372, 5208, 5209, 5210, 5211, 1373, 5212, 5213, 1374, 5214, 5215, 5216, 5217, 5218,
5219, 5220, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234,
5235, 5236, 5237, 5238, 5239, 5240, 5241, 5242, 5243, 5244, 5245, 5246, 5247, 1657, 5248, 5249,
5250, 5251, 1658, 1263, 5252, 5253, 5254, 5255, 5256, 1375, 5257, 5258, 5259, 5260, 5261, 5262,
5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5274, 5275, 5276, 5277, 5278,
5279, 5280, 5281, 5282, 5283, 1659, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5292, 5293,
5294, 5295, 5296, 5297, 5298, 5299, 5300, 1660, 5301, 5302, 5303, 5304, 5305, 5306, 5307, 5308,
5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 1376, 5322, 5323,
5324, 5325, 5326, 5327, 5328, 5329, 5330, 5331, 5332, 5333, 1198, 5334, 5335, 5336, 5337, 5338,
5339, 5340, 5341, 5342, 5343, 1661, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353,
5354, 5355, 5356, 5357, 5358, 5359, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5368, 5369,
5370, 5371, 5372, 5373, 5374, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5385,
5386, 5387, 5388, 5389, 5390, 5391, 5392, 5393, 5394, 5395, 5396, 5397, 5398, 1264, 5399, 5400,
5401, 5402, 5403, 5404, 5405, 5406, 5407, 5408, 5409, 5410, 5411, 5412, 1662, 5413, 5414, 5415,
5416, 1663, 5417, 5418, 5419, 5420, 5421, 5422, 5423, 5424, 5425, 5426, 5427, 5428, 5429, 5430,
5431, 5432, 5433, 5434, 5435, 5436, 5437, 5438, 1664, 5439, 5440, 5441, 5442, 5443, 5444, 5445,
5446, 5447, 5448, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461,
5462, 5463, 5464, 5465, 5466, 5467, 5468, 5469, 5470, 5471, 5472, 5473, 5474, 5475, 5476, 5477,
5478, 1154, 5479, 5480, 5481, 5482, 5483, 5484, 5485, 1665, 5486, 5487, 5488, 5489, 5490, 5491,
5492, 5493, 5494, 5495, 5496, 5497, 5498, 5499, 5500, 5501, 5502, 5503, 5504, 5505, 5506, 5507,
5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5519, 5520, 5521, 5522, 5523,
5524, 5525, 5526, 5527, 5528, 5529, 5530, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539,
5540, 5541, 5542, 5543, 5544, 5545, 5546, 5547, 5548, 1377, 5549, 5550, 5551, 5552, 5553, 5554,
5555, 5556, 5557, 5558, 5559, 5560, 5561, 5562, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5570,
1114, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5578, 5579, 5580, 5581, 5582, 5583, 5584, 5585,
5586, 5587, 5588, 5589, 5590, 5591, 5592, 1378, 5593, 5594, 5595, 5596, 5597, 5598, 5599, 5600,
5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5610, 5611, 5612, 5613, 5614, 1379, 5615,
5616, 5617, 5618, 5619, 5620, 5621, 5622, 5623, 5624, 5625, 5626, 5627, 5628, 5629, 5630, 5631,
5632, 5633, 5634, 1380, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646,
5647, 5648, 5649, 1381, 1056, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5657, 5658, 5659, 5660,
1666, 5661, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 1667, 5669, 1668, 5670, 5671, 5672, 5673,
5674, 5675, 5676, 5677, 5678, 1155, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688,
5689, 5690, 5691, 5692, 5693, 5694, 5695, 5696, 5697, 5698, 1669, 5699, 5700, 5701, 5702, 5703,
5704, 5705, 1670, 5706, 5707, 5708, 5709, 5710, 1671, 5711, 5712, 5713, 5714, 1382, 5715, 5716,
5717, 5718, 5719, 5720, 5721, 5722, 5723, 5724, 5725, 1672, 5726, 5727, 1673, 1674, 5728, 5729,
5730, 5731, 5732, 5733, 5734, 5735, 5736, 1675, 5737, 5738, 5739, 5740, 5741, 5742, 5743, 5744,
1676, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 1383, 5752, 5753, 5754, 5755, 5756, 5757, 5758,
5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 1677, 5769, 5770, 5771, 5772, 5773,
1678, 5774, 5775, 5776, 998, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 1384, 5786,
5787, 5788, 5789, 5790, 5791, 5792, 5793, 5794, 5795, 5796, 5797, 5798, 5799, 5800, 1679, 5801,
5802, 5803, 1115, 1116, 5804, 5805, 5806, 5807, 5808, 5809, 5810, 5811, 5812, 5813, 5814, 5815,
5816, 5817, 5818, 5819, 5820, 5821, 5822, 5823, 5824, 5825, 5826, 5827, 5828, 5829, 5830, 5831,
5832, 5833, 5834, 5835, 5836, 5837, 5838, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847,
5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 1680, 5856, 5857, 5858, 5859, 5860, 5861, 5862,
5863, 5864, 1681, 5865, 5866, 5867, 1682, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5875, 5876,
5877, 5878, 5879, 1683, 5880, 1684, 5881, 5882, 5883, 5884, 1685, 5885, 5886, 5887, 5888, 5889,
5890, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905,
5906, 5907, 1686, 5908, 5909, 5910, 5911, 5912, 5913, 5914, 5915, 5916, 5917, 5918, 5919, 5920,
5921, 5922, 5923, 5924, 5925, 5926, 5927, 5928, 5929, 5930, 5931, 5932, 5933, 5934, 5935, 1687,
5936, 5937, 5938, 5939, 5940, 5941, 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, 5950, 5951,
5952, 1688, 1689, 5953, 1199, 5954, 5955, 5956, 5957, 5958, 5959, 5960, 5961, 1690, 5962, 5963,
5964, 5965, 5966, 5967, 5968, 5969, 5970, 5971, 5972, 5973, 5974, 5975, 5976, 5977, 5978, 5979,
5980, 5981, 1385, 5982, 1386, 5983, 5984, 5985, 5986, 5987, 5988, 5989, 5990, 5991, 5992, 5993,
5994, 5995, 5996, 5997, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6009,
6010, 6011, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6019, 6020, 6021, 6022, 6023, 6024, 6025,
6026, 6027, 1265, 6028, 6029, 1691, 6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039,
6040, 6041, 6042, 6043, 6044, 6045, 6046, 6047, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055,
6056, 6057, 6058, 6059, 6060, 6061, 6062, 6063, 6064, 6065, 6066, 6067, 6068, 6069, 6070, 6071,
6072, 6073, 6074, 6075, 6076, 6077, 6078, 6079, 6080, 6081, 6082, 6083, 6084, 1692, 6085, 6086,
6087, 6088, 6089, 6090, 6091, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102,
6103, 6104, 6105, 6106, 6107, 6108, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118,
6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6130, 6131, 1693, 6132, 6133,
6134, 6135, 6136, 1694, 6137, 6138, 6139, 6140, 6141, 1695, 6142, 6143, 6144, 6145, 6146, 6147,
6148, 6149, 6150, 6151, 6152, 6153, 6154, 6155, 6156, 6157, 6158, 6159, 6160, 6161, 6162, 6163,
6164, 6165, 6166, 6167, 6168, 6169, 6170, 6171, 6172, 6173, 6174, 6175, 6176, 6177, 6178, 6179,
6180, 6181, 6182, 6183, 6184, 6185, 1696, 6186, 6187, 6188, 6189, 6190, 6191, 6192, 6193, 6194,
6195, 6196, 6197, 6198, 6199, 6200, 6201, 6202, 6203, 6204, 6205, 6206, 6207, 6208, 6209, 6210,
6211, 6212, 6213, 6214, 6215, 6216, 6217, 6218, 6219, 1697, 6220, 6221, 6222, 6223, 6224, 6225,
6226, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6241,
6242, 6243, 6244, 6245, 6246, 6247, 6248, 6249, 6250, 6251, 6252, 6253, 1698, 6254, 6255, 6256,
6257, 6258, 6259, 6260, 6261, 6262, 6263, 1200, 6264, 6265, 6266, 6267, 6268, 6269, 6270, 6271, # 1024
6272, 6273, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6284, 6285, 6286, 6287,
6288, 6289, 6290, 6291, 6292, 6293, 6294, 6295, 6296, 6297, 6298, 6299, 6300, 6301, 6302, 1699,
6303, 6304, 1700, 6305, 6306, 6307, 6308, 6309, 6310, 6311, 6312, 6313, 6314, 6315, 6316, 6317,
6318, 6319, 6320, 6321, 6322, 6323, 6324, 6325, 6326, 6327, 6328, 6329, 6330, 6331, 6332, 6333,
6334, 6335, 6336, 6337, 6338, 6339, 1701, 6340, 6341, 6342, 6343, 6344, 1387, 6345, 6346, 6347,
6348, 6349, 6350, 6351, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6359, 6360, 6361, 6362, 6363,
6364, 6365, 6366, 6367, 6368, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6377, 6378, 6379,
6380, 6381, 6382, 6383, 6384, 6385, 6386, 6387, 6388, 6389, 6390, 6391, 6392, 6393, 6394, 6395,
6396, 6397, 6398, 6399, 6400, 6401, 6402, 6403, 6404, 6405, 6406, 6407, 6408, 6409, 6410, 6411,
6412, 6413, 1702, 6414, 6415, 6416, 6417, 6418, 6419, 6420, 6421, 6422, 1703, 6423, 6424, 6425,
6426, 6427, 6428, 6429, 6430, 6431, 6432, 6433, 6434, 6435, 6436, 6437, 6438, 1704, 6439, 6440,
6441, 6442, 6443, 6444, 6445, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6454, 6455, 6456,
6457, 6458, 6459, 6460, 6461, 6462, 6463, 6464, 6465, 6466, 6467, 6468, 6469, 6470, 6471, 6472,
6473, 6474, 6475, 6476, 6477, 6478, 6479, 6480, 6481, 6482, 6483, 6484, 6485, 6486, 6487, 6488,
6489, 6490, 6491, 6492, 6493, 6494, 6495, 6496, 6497, 6498, 6499, 6500, 6501, 6502, 6503, 1266,
6504, 6505, 6506, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6514, 6515, 6516, 6517, 6518, 6519,
6520, 6521, 6522, 6523, 6524, 6525, 6526, 6527, 6528, 6529, 6530, 6531, 6532, 6533, 6534, 6535,
6536, 6537, 6538, 6539, 6540, 6541, 6542, 6543, 6544, 6545, 6546, 6547, 6548, 6549, 6550, 6551,
1705, 1706, 6552, 6553, 6554, 6555, 6556, 6557, 6558, 6559, 6560, 6561, 6562, 6563, 6564, 6565,
6566, 6567, 6568, 6569, 6570, 6571, 6572, 6573, 6574, 6575, 6576, 6577, 6578, 6579, 6580, 6581,
6582, 6583, 6584, 6585, 6586, 6587, 6588, 6589, 6590, 6591, 6592, 6593, 6594, 6595, 6596, 6597,
6598, 6599, 6600, 6601, 6602, 6603, 6604, 6605, 6606, 6607, 6608, 6609, 6610, 6611, 6612, 6613,
6614, 6615, 6616, 6617, 6618, 6619, 6620, 6621, 6622, 6623, 6624, 6625, 6626, 6627, 6628, 6629,
6630, 6631, 6632, 6633, 6634, 6635, 6636, 6637, 1388, 6638, 6639, 6640, 6641, 6642, 6643, 6644,
1707, 6645, 6646, 6647, 6648, 6649, 6650, 6651, 6652, 6653, 6654, 6655, 6656, 6657, 6658, 6659,
6660, 6661, 6662, 6663, 1708, 6664, 6665, 6666, 6667, 6668, 6669, 6670, 6671, 6672, 6673, 6674,
1201, 6675, 6676, 6677, 6678, 6679, 6680, 6681, 6682, 6683, 6684, 6685, 6686, 6687, 6688, 6689,
6690, 6691, 6692, 6693, 6694, 6695, 6696, 6697, 6698, 6699, 6700, 6701, 6702, 6703, 6704, 6705,
6706, 6707, 6708, 6709, 6710, 6711, 6712, 6713, 6714, 6715, 6716, 6717, 6718, 6719, 6720, 6721,
6722, 6723, 6724, 6725, 1389, 6726, 6727, 6728, 6729, 6730, 6731, 6732, 6733, 6734, 6735, 6736,
1390, 1709, 6737, 6738, 6739, 6740, 6741, 6742, 1710, 6743, 6744, 6745, 6746, 1391, 6747, 6748,
6749, 6750, 6751, 6752, 6753, 6754, 6755, 6756, 6757, 1392, 6758, 6759, 6760, 6761, 6762, 6763,
6764, 6765, 6766, 6767, 6768, 6769, 6770, 6771, 6772, 6773, 6774, 6775, 6776, 6777, 6778, 6779,
6780, 1202, 6781, 6782, 6783, 6784, 6785, 6786, 6787, 6788, 6789, 6790, 6791, 6792, 6793, 6794,
6795, 6796, 6797, 6798, 6799, 6800, 6801, 6802, 6803, 6804, 6805, 6806, 6807, 6808, 6809, 1711,
6810, 6811, 6812, 6813, 6814, 6815, 6816, 6817, 6818, 6819, 6820, 6821, 6822, 6823, 6824, 6825,
6826, 6827, 6828, 6829, 6830, 6831, 6832, 6833, 6834, 6835, 6836, 1393, 6837, 6838, 6839, 6840,
6841, 6842, 6843, 6844, 6845, 6846, 6847, 6848, 6849, 6850, 6851, 6852, 6853, 6854, 6855, 6856,
6857, 6858, 6859, 6860, 6861, 6862, 6863, 6864, 6865, 6866, 6867, 6868, 6869, 6870, 6871, 6872,
6873, 6874, 6875, 6876, 6877, 6878, 6879, 6880, 6881, 6882, 6883, 6884, 6885, 6886, 6887, 6888,
6889, 6890, 6891, 6892, 6893, 6894, 6895, 6896, 6897, 6898, 6899, 6900, 6901, 6902, 1712, 6903,
6904, 6905, 6906, 6907, 6908, 6909, 6910, 1713, 6911, 6912, 6913, 6914, 6915, 6916, 6917, 6918,
6919, 6920, 6921, 6922, 6923, 6924, 6925, 6926, 6927, 6928, 6929, 6930, 6931, 6932, 6933, 6934,
6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6947, 6948, 6949, 6950,
6951, 6952, 6953, 6954, 6955, 6956, 6957, 6958, 6959, 6960, 6961, 6962, 6963, 6964, 6965, 6966,
6967, 6968, 6969, 6970, 6971, 6972, 6973, 6974, 1714, 6975, 6976, 6977, 6978, 6979, 6980, 6981,
6982, 6983, 6984, 6985, 6986, 6987, 6988, 1394, 6989, 6990, 6991, 6992, 6993, 6994, 6995, 6996,
6997, 6998, 6999, 7000, 1715, 7001, 7002, 7003, 7004, 7005, 7006, 7007, 7008, 7009, 7010, 7011,
7012, 7013, 7014, 7015, 7016, 7017, 7018, 7019, 7020, 7021, 7022, 7023, 7024, 7025, 7026, 7027,
7028, 1716, 7029, 7030, 7031, 7032, 7033, 7034, 7035, 7036, 7037, 7038, 7039, 7040, 7041, 7042,
7043, 7044, 7045, 7046, 7047, 7048, 7049, 7050, 7051, 7052, 7053, 7054, 7055, 7056, 7057, 7058,
7059, 7060, 7061, 7062, 7063, 7064, 7065, 7066, 7067, 7068, 7069, 7070, 7071, 7072, 7073, 7074,
7075, 7076, 7077, 7078, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 7087, 7088, 7089, 7090,
7091, 7092, 7093, 7094, 7095, 7096, 7097, 7098, 7099, 7100, 7101, 7102, 7103, 7104, 7105, 7106,
7107, 7108, 7109, 7110, 7111, 7112, 7113, 7114, 7115, 7116, 7117, 7118, 7119, 7120, 7121, 7122,
7123, 7124, 7125, 7126, 7127, 7128, 7129, 7130, 7131, 7132, 7133, 7134, 7135, 7136, 7137, 7138,
7139, 7140, 7141, 7142, 7143, 7144, 7145, 7146, 7147, 7148, 7149, 7150, 7151, 7152, 7153, 7154,
7155, 7156, 7157, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 7165, 7166, 7167, 7168, 7169, 7170,
7171, 7172, 7173, 7174, 7175, 7176, 7177, 7178, 7179, 7180, 7181, 7182, 7183, 7184, 7185, 7186,
7187, 7188, 7189, 7190, 7191, 7192, 7193, 7194, 7195, 7196, 7197, 7198, 7199, 7200, 7201, 7202,
7203, 7204, 7205, 7206, 7207, 1395, 7208, 7209, 7210, 7211, 7212, 7213, 1717, 7214, 7215, 7216,
7217, 7218, 7219, 7220, 7221, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 7229, 7230, 7231, 7232,
7233, 7234, 7235, 7236, 7237, 7238, 7239, 7240, 7241, 7242, 7243, 7244, 7245, 7246, 7247, 7248,
7249, 7250, 7251, 7252, 7253, 7254, 7255, 7256, 7257, 7258, 7259, 7260, 7261, 7262, 7263, 7264,
7265, 7266, 7267, 7268, 7269, 7270, 7271, 7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280,
7281, 7282, 7283, 7284, 7285, 7286, 7287, 7288, 7289, 7290, 7291, 7292, 7293, 7294, 7295, 7296,
7297, 7298, 7299, 7300, 7301, 7302, 7303, 7304, 7305, 7306, 7307, 7308, 7309, 7310, 7311, 7312,
7313, 1718, 7314, 7315, 7316, 7317, 7318, 7319, 7320, 7321, 7322, 7323, 7324, 7325, 7326, 7327,
7328, 7329, 7330, 7331, 7332, 7333, 7334, 7335, 7336, 7337, 7338, 7339, 7340, 7341, 7342, 7343,
7344, 7345, 7346, 7347, 7348, 7349, 7350, 7351, 7352, 7353, 7354, 7355, 7356, 7357, 7358, 7359,
7360, 7361, 7362, 7363, 7364, 7365, 7366, 7367, 7368, 7369, 7370, 7371, 7372, 7373, 7374, 7375,
7376, 7377, 7378, 7379, 7380, 7381, 7382, 7383, 7384, 7385, 7386, 7387, 7388, 7389, 7390, 7391,
7392, 7393, 7394, 7395, 7396, 7397, 7398, 7399, 7400, 7401, 7402, 7403, 7404, 7405, 7406, 7407,
7408, 7409, 7410, 7411, 7412, 7413, 7414, 7415, 7416, 7417, 7418, 7419, 7420, 7421, 7422, 7423,
7424, 7425, 7426, 7427, 7428, 7429, 7430, 7431, 7432, 7433, 7434, 7435, 7436, 7437, 7438, 7439,
7440, 7441, 7442, 7443, 7444, 7445, 7446, 7447, 7448, 7449, 7450, 7451, 7452, 7453, 7454, 7455,
7456, 7457, 7458, 7459, 7460, 7461, 7462, 7463, 7464, 7465, 7466, 7467, 7468, 7469, 7470, 7471,
7472, 7473, 7474, 7475, 7476, 7477, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7485, 7486, 7487,
7488, 7489, 7490, 7491, 7492, 7493, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503,
7504, 7505, 7506, 7507, 7508, 7509, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519,
7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7531, 7532, 7533, 7534, 7535,
7536, 7537, 7538, 7539, 7540, 7541, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551,
7552, 7553, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567,
7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583,
7584, 7585, 7586, 7587, 7588, 7589, 7590, 7591, 7592, 7593, 7594, 7595, 7596, 7597, 7598, 7599,
7600, 7601, 7602, 7603, 7604, 7605, 7606, 7607, 7608, 7609, 7610, 7611, 7612, 7613, 7614, 7615,
7616, 7617, 7618, 7619, 7620, 7621, 7622, 7623, 7624, 7625, 7626, 7627, 7628, 7629, 7630, 7631,
7632, 7633, 7634, 7635, 7636, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647,
7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7659, 7660, 7661, 7662, 7663,
7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7676, 7677, 7678, 7679,
7680, 7681, 7682, 7683, 7684, 7685, 7686, 7687, 7688, 7689, 7690, 7691, 7692, 7693, 7694, 7695,
7696, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711,
7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727,
7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743,
7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7758, 7759,
7760, 7761, 7762, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775,
7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791,
7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807,
7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7819, 7820, 7821, 7822, 7823,
7824, 7825, 7826, 7827, 7828, 7829, 7830, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7839,
7840, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855,
7856, 7857, 7858, 7859, 7860, 7861, 7862, 7863, 7864, 7865, 7866, 7867, 7868, 7869, 7870, 7871,
7872, 7873, 7874, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7886, 7887,
7888, 7889, 7890, 7891, 7892, 7893, 7894, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903,
7904, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7917, 7918, 7919,
7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935,
7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7944, 7945, 7946, 7947, 7948, 7949, 7950, 7951,
7952, 7953, 7954, 7955, 7956, 7957, 7958, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967,
7968, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 7981, 7982, 7983,
7984, 7985, 7986, 7987, 7988, 7989, 7990, 7991, 7992, 7993, 7994, 7995, 7996, 7997, 7998, 7999,
8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8010, 8011, 8012, 8013, 8014, 8015,
8016, 8017, 8018, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 8027, 8028, 8029, 8030, 8031,
8032, 8033, 8034, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8046, 8047,
8048, 8049, 8050, 8051, 8052, 8053, 8054, 8055, 8056, 8057, 8058, 8059, 8060, 8061, 8062, 8063,
8064, 8065, 8066, 8067, 8068, 8069, 8070, 8071, 8072, 8073, 8074, 8075, 8076, 8077, 8078, 8079,
8080, 8081, 8082, 8083, 8084, 8085, 8086, 8087, 8088, 8089, 8090, 8091, 8092, 8093, 8094, 8095,
8096, 8097, 8098, 8099, 8100, 8101, 8102, 8103, 8104, 8105, 8106, 8107, 8108, 8109, 8110, 8111,
8112, 8113, 8114, 8115, 8116, 8117, 8118, 8119, 8120, 8121, 8122, 8123, 8124, 8125, 8126, 8127,
8128, 8129, 8130, 8131, 8132, 8133, 8134, 8135, 8136, 8137, 8138, 8139, 8140, 8141, 8142, 8143,
8144, 8145, 8146, 8147, 8148, 8149, 8150, 8151, 8152, 8153, 8154, 8155, 8156, 8157, 8158, 8159,
8160, 8161, 8162, 8163, 8164, 8165, 8166, 8167, 8168, 8169, 8170, 8171, 8172, 8173, 8174, 8175,
8176, 8177, 8178, 8179, 8180, 8181, 8182, 8183, 8184, 8185, 8186, 8187, 8188, 8189, 8190, 8191,
8192, 8193, 8194, 8195, 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8203, 8204, 8205, 8206, 8207,
8208, 8209, 8210, 8211, 8212, 8213, 8214, 8215, 8216, 8217, 8218, 8219, 8220, 8221, 8222, 8223,
8224, 8225, 8226, 8227, 8228, 8229, 8230, 8231, 8232, 8233, 8234, 8235, 8236, 8237, 8238, 8239,
8240, 8241, 8242, 8243, 8244, 8245, 8246, 8247, 8248, 8249, 8250, 8251, 8252, 8253, 8254, 8255,
8256, 8257, 8258, 8259, 8260, 8261, 8262, 8263, 8264, 8265, 8266, 8267, 8268, 8269, 8270, 8271,
8272, 8273, 8274, 8275, 8276, 8277, 8278, 8279, 8280, 8281, 8282, 8283, 8284, 8285, 8286, 8287,
8288, 8289, 8290, 8291, 8292, 8293, 8294, 8295, 8296, 8297, 8298, 8299, 8300, 8301, 8302, 8303,
8304, 8305, 8306, 8307, 8308, 8309, 8310, 8311, 8312, 8313, 8314, 8315, 8316, 8317, 8318, 8319,
8320, 8321, 8322, 8323, 8324, 8325, 8326, 8327, 8328, 8329, 8330, 8331, 8332, 8333, 8334, 8335,
8336, 8337, 8338, 8339, 8340, 8341, 8342, 8343, 8344, 8345, 8346, 8347, 8348, 8349, 8350, 8351,
8352, 8353, 8354, 8355, 8356, 8357, 8358, 8359, 8360, 8361, 8362, 8363, 8364, 8365, 8366, 8367,
8368, 8369, 8370, 8371, 8372, 8373, 8374, 8375, 8376, 8377, 8378, 8379, 8380, 8381, 8382, 8383,
8384, 8385, 8386, 8387, 8388, 8389, 8390, 8391, 8392, 8393, 8394, 8395, 8396, 8397, 8398, 8399,
8400, 8401, 8402, 8403, 8404, 8405, 8406, 8407, 8408, 8409, 8410, 8411, 8412, 8413, 8414, 8415,
8416, 8417, 8418, 8419, 8420, 8421, 8422, 8423, 8424, 8425, 8426, 8427, 8428, 8429, 8430, 8431,
8432, 8433, 8434, 8435, 8436, 8437, 8438, 8439, 8440, 8441, 8442, 8443, 8444, 8445, 8446, 8447,
8448, 8449, 8450, 8451, 8452, 8453, 8454, 8455, 8456, 8457, 8458, 8459, 8460, 8461, 8462, 8463,
8464, 8465, 8466, 8467, 8468, 8469, 8470, 8471, 8472, 8473, 8474, 8475, 8476, 8477, 8478, 8479,
8480, 8481, 8482, 8483, 8484, 8485, 8486, 8487, 8488, 8489, 8490, 8491, 8492, 8493, 8494, 8495,
8496, 8497, 8498, 8499, 8500, 8501, 8502, 8503, 8504, 8505, 8506, 8507, 8508, 8509, 8510, 8511,
8512, 8513, 8514, 8515, 8516, 8517, 8518, 8519, 8520, 8521, 8522, 8523, 8524, 8525, 8526, 8527,
8528, 8529, 8530, 8531, 8532, 8533, 8534, 8535, 8536, 8537, 8538, 8539, 8540, 8541, 8542, 8543,
8544, 8545, 8546, 8547, 8548, 8549, 8550, 8551, 8552, 8553, 8554, 8555, 8556, 8557, 8558, 8559,
8560, 8561, 8562, 8563, 8564, 8565, 8566, 8567, 8568, 8569, 8570, 8571, 8572, 8573, 8574, 8575,
8576, 8577, 8578, 8579, 8580, 8581, 8582, 8583, 8584, 8585, 8586, 8587, 8588, 8589, 8590, 8591,
8592, 8593, 8594, 8595, 8596, 8597, 8598, 8599, 8600, 8601, 8602, 8603, 8604, 8605, 8606, 8607,
8608, 8609, 8610, 8611, 8612, 8613, 8614, 8615, 8616, 8617, 8618, 8619, 8620, 8621, 8622, 8623,
8624, 8625, 8626, 8627, 8628, 8629, 8630, 8631, 8632, 8633, 8634, 8635, 8636, 8637, 8638, 8639,
8640, 8641, 8642, 8643, 8644, 8645, 8646, 8647, 8648, 8649, 8650, 8651, 8652, 8653, 8654, 8655,
8656, 8657, 8658, 8659, 8660, 8661, 8662, 8663, 8664, 8665, 8666, 8667, 8668, 8669, 8670, 8671,
8672, 8673, 8674, 8675, 8676, 8677, 8678, 8679, 8680, 8681, 8682, 8683, 8684, 8685, 8686, 8687,
8688, 8689, 8690, 8691, 8692, 8693, 8694, 8695, 8696, 8697, 8698, 8699, 8700, 8701, 8702, 8703,
8704, 8705, 8706, 8707, 8708, 8709, 8710, 8711, 8712, 8713, 8714, 8715, 8716, 8717, 8718, 8719,
8720, 8721, 8722, 8723, 8724, 8725, 8726, 8727, 8728, 8729, 8730, 8731, 8732, 8733, 8734, 8735,
8736, 8737, 8738, 8739, 8740, 8741)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from content_index import indexer, cntIndex
from subprocess import Popen, PIPE
import StringIO
import odt2txt
import sys, zipfile, xml.dom.minidom
import logging
_logger = logging.getLogger(__name__)
def _to_unicode(s):
try:
return s.decode('utf-8')
except UnicodeError:
try:
return s.decode('latin')
except UnicodeError:
try:
return s.encode('ascii')
except UnicodeError:
return s
def textToString(element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += textToString(node)
return buffer
class TxtIndex(indexer):
def _getMimeTypes(self):
return ['text/plain','text/html','text/diff','text/xml', 'text/*',
'application/xml']
def _getExtensions(self):
return ['.txt', '.py']
def _doIndexContent(self, content):
return content
cntIndex.register(TxtIndex())
class PptxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.presentationml.presentation']
def _getExtensions(self):
return ['.pptx']
def _doIndexFile(self, fname):
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["a:t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
data = []
zip = zipfile.ZipFile(fname)
files = filter(lambda x: x.startswith('ppt/slides/slide'), zip.namelist())
for i in range(1, len(files) + 1):
content = xml.dom.minidom.parseString(zip.read('ppt/slides/slide%s.xml' % str(i)))
res = toString().encode('ascii','replace')
data.append(res)
return _to_unicode('\n'.join(data))
cntIndex.register(PptxIndex())
class DocIndex(indexer):
def _getMimeTypes(self):
return [ 'application/ms-word']
def _getExtensions(self):
return ['.doc']
def _doIndexFile(self, fname):
try:
pop = Popen(['antiword', fname], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
#return _to_unicode(data)
return data
except OSError:
_logger.warning("Failed attempt to execute antiword (MS Word reader). Antiword is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(DocIndex())
class DocxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']
def _getExtensions(self):
return ['.docx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("word/document.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["w:p", "w:h", "text:list"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(toString())
cntIndex.register(DocxIndex())
class XlsxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
def _getExtensions(self):
return ['.xlsx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("xl/sharedStrings.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(toString())
cntIndex.register(XlsxIndex())
class PdfIndex(indexer):
def _getMimeTypes(self):
return [ 'application/pdf']
def _getExtensions(self):
return ['.pdf']
def _doIndexFile(self, fname):
try:
pop = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', fname, '-'], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
return _to_unicode(data)
except OSError:
_logger.warning("Failed attempt to execute pdftotext. This program is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(PdfIndex())
class ImageNoIndex(indexer):
def _getMimeTypes(self):
return [ 'image/*']
def _getExtensions(self):
#better return no extension, and let 'file' do its magic
return []
#return ['.png','.jpg','.gif','.jpeg','.bmp','.tiff']
def _doIndexContent(self, content):
return 'image'
cntIndex.register(ImageNoIndex())
# other opendocument formats:
# chart-template chart database
# formula-template formula graphics-template graphics
# image
# presentation-template presentation spreadsheet-template spreadsheet
class OpenDoc(indexer):
""" Index OpenDocument files.
Q: is it really worth it to index spreadsheets, or do we only get a
meaningless list of numbers (cell contents) ?
"""
def _getMimeTypes(self):
otypes = [ 'text', 'text-web', 'text-template', 'text-master' ]
return map(lambda a: 'application/vnd.oasis.opendocument.'+a, otypes)
def _getExtensions(self):
return ['.odt', '.ott', ] # '.ods'
def _doIndexContent(self, content):
s = StringIO.StringIO(content)
o = odt2txt.OpenDocumentTextFile(s)
result = _to_unicode(o.toString())
s.close()
return result
cntIndex.register(OpenDoc())
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/env python
import cPickle
import sys
from matplotlib import pyplot as plt
from matplotlib import rcParams
import matplotlib.gridspec as gridspec
import numpy as np
import math
import argparse
import copy
import random
import wifi_im
def plot(data, time, args):
xyr = [point for point in data["xyr"] if point[0] < time]
states = [state for state in data["state_log"] if state[0] < time]
fit_model = [state for state in states if state[1] == "fit_model"]
walls = data["walls"]
if args.model:
xx = np.linspace(-12, 2, 24)
yy = np.linspace(-15, 20, 70)
rssi_plot = []
try:
rssi_plot = [model[1] for model in data["model_log"] if model[0] < time][-1]
except:
xyr_s = []
if len(fit_model) > 0:
xyr_s = [xyr[i] for i in np.random.choice(len(xyr), min(len(xyr), 5000), replace=False)]
# xyr_s = copy.deepcopy([point for point in xyr if point[0] <= fit_model[-1][0]])
# random.shuffle(xyr_s)
rssi_plot = np.zeros((len(xx), len(yy)))
if len(xyr_s) > 0:
model = wifi_im.ScaledModel(wifi_im.FNN([100, 20]))
model.fit([point[1:3] for point in xyr_s], [point[3] for point in xyr_s])
for i in range(len(xx)):
for j in range(len(yy)):
rssi_plot[i, j] = model.predict([[xx[i], yy[j]]])
gs = gridspec.GridSpec(2, 3)
plt.subplot(gs[:, 0])
plt.title("hexbin")
plt.hexbin([x for t, x, y, r in xyr], [y for t, x, y, r in xyr], [r for t, x, y, r in xyr], gridsize=20, cmap=plt.get_cmap("gnuplot2"), vmin=-80, vmax=-20, extent=(-10, 0, -15, 20))
plt.plot([x for x,y in walls], [y for x,y in walls], "k,")
plt.plot([x for t, x, y, r in xyr], [y for t, x, y, r in xyr], color="#39FF14")
plt.gca().set_xlim((-12, 2))
plt.gca().set_xlabel("x [m]")
plt.gca().set_ylim((-15, 20))
plt.gca().set_ylabel("y [m]")
cbar = plt.colorbar()
cbar.set_label("mean rssi [dB]")
if args.model:
plt.subplot(gs[:, 1])
plt.title("model")
plt.pcolormesh(xx, yy, rssi_plot.T, cmap=plt.get_cmap("gnuplot2"), vmin=-80, vmax=-20)
plt.plot([x for x,y in walls], [y for x,y in walls], "k,")
plt.plot([x for t, x, y, r in xyr], [y for t, x, y, r in xyr], color="#39FF14")
plt.gca().set_xlim((-12, 2))
plt.gca().set_xlabel("x [m]")
plt.gca().set_ylim((-15, 20))
plt.gca().set_ylabel("y [m]")
cbar = plt.colorbar()
cbar.set_label("mean rssi [dB]")
if args.model:
plt.subplot(gs[0, 2:])
else:
plt.subplot(gs[0, 1:])
plt.title("log by time")
plt.plot([rssi[0]-data["xyr"][0][0] for rssi in xyr], [rssi[3] for rssi in xyr], "b.")
plt.gca().set_xlabel("time [s]")
plt.gca().set_ylabel("rssi [dB]")
plt.gca().set_ylim((-80, -20))
if args.model:
plt.subplot(gs[1, 2:])
else:
plt.subplot(gs[1, 1:])
plt.title("log by distance")
trajectory = [xyr[i][1:3] for i in range(len(xyr))]
lengths = [math.hypot(trajectory[i][0] - trajectory[i-1][0], trajectory[i][1]-trajectory[i][1]) for i in range(1, len(trajectory))]
dist = [0.0]
for length in lengths:
dist.append(dist[-1]+length)
plt.plot(dist, [rssi[3] for rssi in xyr], "b.")
plt.gca().set_xlabel("distance travelled [m]")
plt.gca().set_ylabel("rssi [dB]")
plt.gca().set_ylim((-80, -20))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="analyze pickle files generated by wifi_im")
parser.add_argument("-v", "--video", help="generate a video", action="store_true")
parser.add_argument("-m", "--model", help="also show the model", action="store_true")
parser.add_argument("filename", help="pickle file")
args = parser.parse_args()
data = cPickle.load(open(args.filename, "rb"))
time_start = data["xyr"][0][0]
time_end = data["xyr"][-1][0]
times = [time_end]
if args.video:
times = np.arange(time_start+10.0, time_end, 10.0)
rcParams["font.family"] = "serif"
rcParams["xtick.labelsize"] = 6
rcParams["ytick.labelsize"] = 6
rcParams["axes.labelsize"] = 6
rcParams["axes.titlesize"] = 6
for i in range(len(times)):
print i, "/", len(times)
plt.clf()
plot(data, times[i], args)
plt.suptitle("{:.2f}".format(times[i]))
plt.gcf().set_size_inches((12, 9))
plt.savefig("frame"+str(i).zfill(6)+".jpg", dpi=100)
|
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
"""
kcriched.mfx
Rich Edit Object - Clickteam (http://www.clickteam.com)
Enhanced editable formated text box. Can display, load, save RTF
files.
Ported to Python by Mathias Kaerlev
"""
from mmfparser.player.extensions.common import UserExtension, HiddenObject
from mmfparser.player.event.actions.common import Action
from mmfparser.player.event.conditions.common import Condition
from mmfparser.player.event.expressions.common import Expression
# Actions
class Action0(Action):
"""
Files->Load ASCII text
Parameters:
0: Please enter pathname (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action1(Action):
"""
Files->Load RTF text
Parameters:
0: Please enter pathname (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action2(Action):
"""
Files->Save ASCII text
Parameters:
0: Please enter pathname (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action3(Action):
"""
Files->Save RTF text
Parameters:
0: Please enter pathname (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action4(Action):
"""
Files->Load ASCII text from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action5(Action):
"""
Files->Load RTF text from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action6(Action):
"""
Files->Save ASCII text from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action7(Action):
"""
Files->Save RTF text from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action8(Action):
"""
Block->Replace selection
Parameters:
0: Replace selection (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action9(Action):
"""
Block->Cut
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action10(Action):
"""
Block->Copy
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action11(Action):
"""
Block->Paste
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action12(Action):
"""
Block->Clear
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action13(Action):
"""
Size->Set width
Parameters:
0: Set width (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action14(Action):
"""
Size->Set height
Parameters:
0: Set height (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action15(Action):
"""
Visibility->Show object
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action16(Action):
"""
Visibility->Hide object
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action17(Action):
"""
Control->Read only->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action18(Action):
"""
Control->Read only->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action19(Action):
"""
Control->Window->Enabled
"""
def execute(self, instance):
instance.objectPlayer.set_enabled(True)
class Action20(Action):
"""
Control->Window->Disabled
"""
def execute(self, instance):
instance.objectPlayer.set_enabled(False)
class Action21(Action):
"""
Control->Goto->Character number...
Parameters:
0: Character number... (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action22(Action):
"""
Control->Goto->Line number...
Parameters:
0: Line number... (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action23(Action):
"""
Control->Find->Find text
Parameters:
0: Enter the string to find* (EXPSTRING, ExpressionParameter)
1: 0 : no case, 1 : case sensitive, 2 : whole word, 3 : both (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action24(Action):
"""
Control->Find->Find next
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action25(Action):
"""
Control->Find->Find and select text
Parameters:
0: Enter the string to find* (EXPSTRING, ExpressionParameter)
1: 0 : no case, 1 : case sensitive, 2 : whole word, 3 : both (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action26(Action):
"""
Control->Find->Find and select next
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action27(Action):
"""
Control->Window->Set focus
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action28(Action):
"""
Block->Select text
Parameters:
0: Enter the first character to select (EXPRESSION, ExpressionParameter)
1: Enter the last character to select (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action29(Action):
"""
Control->Background->Set color from string
Parameters:
0: Please enter the color, use a string with "Red, Green, Blue" (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action30(Action):
"""
Control->Undo->Undo
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action31(Action):
"""
Control->Undo->Clear UNDO buffer
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action32(Action):
"""
Control->Format->Bold->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action33(Action):
"""
Control->Format->Bold->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action34(Action):
"""
Control->Format->Italic->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action35(Action):
"""
Control->Format->Italic->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action36(Action):
"""
Control->Format->Underline->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action37(Action):
"""
Control->Format->Underline->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action38(Action):
"""
Control->Format->Strike->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action39(Action):
"""
Control->Format->Strike->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action40(Action):
"""
Control->Format->Font->Set font from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action41(Action):
"""
Control->Format->Font->Set font color from string
Parameters:
0: Please enter the color, use a string with "Red, Green, Blue" (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action42(Action):
"""
Control->Format->Font->Set font color from selector
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action43(Action):
"""
Control->Set Text
Parameters:
0: Set Text (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
instance.objectPlayer.add_line(self.evaluate_expression(
self.get_parameter(0)))
class Action44(Action):
"""
Paragraph->Align->Left
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action45(Action):
"""
Paragraph->Align->Center
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action46(Action):
"""
Paragraph->Align->Right
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action47(Action):
"""
Paragraph->Bullets->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action48(Action):
"""
Paragraph->Bullets->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action49(Action):
"""
Block->Select all
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action50(Action):
"""
Paragraph->Indent
Parameters:
0: Please enter the indentation of the first line (EXPRESSION, ExpressionParameter)
1: Please enter the indentation of the second line (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action51(Action):
"""
Control->Scrolling->Scroll up
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action52(Action):
"""
Control->Scrolling->Scroll down
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action53(Action):
"""
Control->Scrolling->Scroll left
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action54(Action):
"""
Control->Scrolling->Scroll right
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action55(Action):
"""
Control->Format->Font->Set font name
Parameters:
0: Set font name (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action56(Action):
"""
Control->Format->Font->Set font size
Parameters:
0: Set font size (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action57(Action):
"""
Control->Format->Font->Set font color from integer
Parameters:
0: Please enter the color in the form : BLUE*65536 + GREEN*256 + RED (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action58(Action):
"""
Control->Background->Set color from integer
Parameters:
0: Please enter the color in the form : BLUE*65536 + GREEN*256 + RED (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action59(Action):
"""
Block->Unselect block
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action60(Action):
"""
Control->Scrolling->Scroll To Top
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action61(Action):
"""
Control->Scrolling->Scroll To Line
Parameters:
0: Enter line number (EXPRESSION, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action62(Action):
"""
Control->Scrolling->Scroll To End
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action63(Action):
"""
Files->Print text
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action64(Action):
"""
Control->Format->Link (2.0)->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action65(Action):
"""
Control->Format->Link (2.0)->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action66(Action):
"""
Control->Undo->&Redo (2.0)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action67(Action):
"""
Control->Set RTF Text
Parameters:
0: Set RTF Text (EXPSTRING, ExpressionParameter)
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action68(Action):
"""
Control->Format->Protection->On
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action69(Action):
"""
Control->Format->Protection->Off
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action70(Action):
"""
Control->&Modified->&Yes
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action71(Action):
"""
Control->&Modified->&No
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action72(Action):
"""
Control->Auto &Focus->O&n
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Action73(Action):
"""
Control->Auto &Focus->O&ff
"""
def execute(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
# Conditions
class Condition0(Condition):
"""
Is object visible?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition1(Condition):
"""
Is object enabled?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition2(Condition):
"""
Is object read-only?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition3(Condition):
"""
Has the object the input focus?
"""
def check(self, instance):
return False
class Condition4(Condition):
"""
Has the text been modified?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition5(Condition):
"""
Undo->Is UNDO available?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition6(Condition):
"""
Is text available in the clipboard?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition7(Condition):
"""
Character->Is bold?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition8(Condition):
"""
Character->Is italic?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition9(Condition):
"""
Character->Is underlined?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition10(Condition):
"""
Character->Is striked out?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition11(Condition):
"""
Paragraph->Is aligned on the left?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition12(Condition):
"""
Paragraph->Is centered?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition13(Condition):
"""
Paragraph->Is aligned on the right?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition14(Condition):
"""
Paragraph->Bullet selected?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition15(Condition):
"""
Mouse->Clicked with left button?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition16(Condition):
"""
Mouse->Clicked with right button?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition17(Condition):
"""
Mouse->Double-clicked?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition18(Condition):
"""
Mouse->Link clicked? (2.0)
Parameters:
0: Link text (EXPSTRING, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition19(Condition):
"""
Mouse->Any link clicked? (2.0)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition20(Condition):
"""
Undo->Is REDO available? (2.0)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition21(Condition):
"""
Character->Is link? (2.0)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition22(Condition):
"""
Character->Is protected?
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition23(Condition):
"""
Scrolling->Is line visible?
Parameters:
0: Enter line number (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Condition24(Condition):
"""
Scrolling->Is line entirely visible
Parameters:
0: Enter line number (EXPRESSION, ExpressionParameter)
"""
def check(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
# Expressions
class Expression0(Expression):
"""
Caret->Get caret position
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression1(Expression):
"""
Caret->Get caret line
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression2(Expression):
"""
Position->Get X Position
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression3(Expression):
"""
Position->Get Y Position
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression4(Expression):
"""
Size->Get width
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression5(Expression):
"""
Size->Get height
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression6(Expression):
"""
Text->Get text
Parameters:
0: First character number (Int)
1: Second character number (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression7(Expression):
"""
Text->Get selection
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression8(Expression):
"""
Font->Get font name
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression9(Expression):
"""
Font->Get font size
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression10(Expression):
"""
Font->Get font color
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression11(Expression):
"""
Scrolling->Get first visible line
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression12(Expression):
"""
Scrolling->Get line count
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression13(Expression):
"""
Link (2.0)->Get link text
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression14(Expression):
"""
Text->Get RTF text
Parameters:
0: First character number (Int)
1: Second character number (Int)
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression15(Expression):
"""
Text->Get RTF selection
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression16(Expression):
"""
Text->Get total number of characters
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class Expression17(Expression):
"""
Link (2.0)->Get link position
Return type: Int
"""
def get(self, instance):
raise NotImplementedError('%s not implemented' % (
str(self)))
class DefaultObject(HiddenObject):
enabled = True
def created(self, data):
# you should probably make your object ready here.
# data is your property data
pass
def set_enabled(self, value):
self.enabled = value
def add_line(self, value):
print 'add_line:', value
class kcriched(UserExtension):
objectPlayer = DefaultObject
actions = {
0 : Action0,
1 : Action1,
2 : Action2,
3 : Action3,
4 : Action4,
5 : Action5,
6 : Action6,
7 : Action7,
8 : Action8,
9 : Action9,
10 : Action10,
11 : Action11,
12 : Action12,
16 : Action13,
17 : Action14,
18 : Action15,
19 : Action16,
20 : Action17,
21 : Action18,
22 : Action19,
23 : Action20,
24 : Action21,
25 : Action22,
26 : Action23,
27 : Action24,
28 : Action25,
29 : Action26,
30 : Action27,
31 : Action28,
32 : Action29,
33 : Action30,
34 : Action31,
35 : Action32,
36 : Action33,
37 : Action34,
38 : Action35,
39 : Action36,
40 : Action37,
41 : Action38,
42 : Action39,
43 : Action40,
44 : Action41,
45 : Action42,
46 : Action43,
47 : Action44,
48 : Action45,
49 : Action46,
50 : Action47,
51 : Action48,
52 : Action49,
53 : Action50,
54 : Action51,
55 : Action52,
56 : Action53,
57 : Action54,
58 : Action55,
59 : Action56,
60 : Action57,
61 : Action58,
62 : Action59,
63 : Action60,
64 : Action61,
65 : Action62,
66 : Action63,
67 : Action64,
68 : Action65,
69 : Action66,
70 : Action67,
71 : Action68,
72 : Action69,
73 : Action70,
74 : Action71,
75 : Action72,
76 : Action73,
}
conditions = {
0 : Condition0,
1 : Condition1,
2 : Condition2,
3 : Condition3,
4 : Condition4,
5 : Condition5,
6 : Condition6,
7 : Condition7,
8 : Condition8,
9 : Condition9,
10 : Condition10,
11 : Condition11,
12 : Condition12,
13 : Condition13,
14 : Condition14,
15 : Condition15,
16 : Condition16,
17 : Condition17,
18 : Condition18,
19 : Condition19,
20 : Condition20,
21 : Condition21,
22 : Condition22,
23 : Condition23,
24 : Condition24,
}
expressions = {
0 : Expression0,
1 : Expression1,
2 : Expression2,
3 : Expression3,
4 : Expression4,
5 : Expression5,
6 : Expression6,
7 : Expression7,
8 : Expression8,
9 : Expression9,
10 : Expression10,
11 : Expression11,
12 : Expression12,
13 : Expression13,
14 : Expression14,
15 : Expression15,
16 : Expression16,
17 : Expression17,
}
extension = kcriched()
def get_extension():
return extension
|
"""
Python model "Teacup.py"
Translated using PySD version 1.3.0
"""
from os import path
from pysd.py_backend.functions import Integ
from pysd import cache
_subscript_dict = {}
_namespace = {
"TIME": "time",
"Time": "time",
"Characteristic Time": "characteristic_time",
"Heat Loss to Room": "heat_loss_to_room",
"Room Temperature": "room_temperature",
"Teacup Temperature": "teacup_temperature",
"FINAL TIME": "final_time",
"INITIAL TIME": "initial_time",
"SAVEPER": "saveper",
"TIME STEP": "time_step",
}
__pysd_version__ = "1.3.0"
__data = {"scope": None, "time": lambda: 0}
_root = path.dirname(__file__)
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data["time"]()
@cache.run
def characteristic_time():
"""
Real Name: Characteristic Time
Original Eqn: 10
Units: Minutes
Limits: (None, None)
Type: constant
Subs: None
"""
return 10
@cache.step
def heat_loss_to_room():
"""
Real Name: Heat Loss to Room
Original Eqn: (Teacup Temperature - Room Temperature) / Characteristic Time
Units: Degrees/Minute
Limits: (None, None)
Type: component
Subs: None
This is the rate at which heat flows from the cup into the room. We can
ignore it at this point.
"""
return (teacup_temperature() - room_temperature()) / characteristic_time()
@cache.run
def room_temperature():
"""
Real Name: Room Temperature
Original Eqn: 70
Units:
Limits: (None, None)
Type: constant
Subs: None
"""
return 70
@cache.step
def teacup_temperature():
"""
Real Name: Teacup Temperature
Original Eqn: INTEG ( -Heat Loss to Room, 180)
Units: Degrees
Limits: (None, None)
Type: component
Subs: None
"""
return _integ_teacup_temperature()
@cache.run
def final_time():
"""
Real Name: FINAL TIME
Original Eqn: 30
Units: Minute
Limits: (None, None)
Type: constant
Subs: None
The final time for the simulation.
"""
return 30
@cache.run
def initial_time():
"""
Real Name: INITIAL TIME
Original Eqn: 0
Units: Minute
Limits: (None, None)
Type: constant
Subs: None
The initial time for the simulation.
"""
return 0
@cache.step
def saveper():
"""
Real Name: SAVEPER
Original Eqn: TIME STEP
Units: Minute
Limits: (0.0, None)
Type: component
Subs: None
The frequency with which output is stored.
"""
return time_step()
@cache.run
def time_step():
"""
Real Name: TIME STEP
Original Eqn: 0.125
Units: Minute
Limits: (0.0, None)
Type: constant
Subs: None
The time step for the simulation.
"""
return 0.125
_integ_teacup_temperature = Integ(lambda: -heat_loss_to_room(), lambda: 180)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FeedEntry.author'
db.add_column('newspeak_feedentry', 'author',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FeedEntry.author'
db.delete_column('newspeak_feedentry', 'author')
models = {
'newspeak.feed': {
'Meta': {'object_name': 'Feed'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'error_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_description': ('django.db.models.fields.TextField', [], {}),
'error_state': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'filters': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['newspeak.KeywordFilter']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'newspeak.feedentry': {
'Meta': {'ordering': "('-published',)", 'object_name': 'FeedEntry'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'entry_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['newspeak.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.DateTimeField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'newspeak.keywordfilter': {
'Meta': {'object_name': 'KeywordFilter'},
'filter_summary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_title': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['newspeak']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.