Dataset Viewer
src
stringlengths 721
1.04M
|
---|
"""
[2014-11-26] Challenge #190 [Intermediate] Words inside of words
https://www.reddit.com/r/dailyprogrammer/comments/2nihz6/20141126_challenge_190_intermediate_words_inside/
#Description
This weeks challenge is a short yet interesting one that should hopefully help you exercise elegant solutions to a
problem rather than bruteforcing a challenge.
#Challenge
Given the wordlist [enable1.txt](http://www.joereynoldsaudio.com/enable1.txt), you must find the word in that file
which also contains the greatest number of words within that word.
For example, the word 'grayson' has the following words in it
Grayson
Gray
Grays
Ray
Rays
Son
On
Here's another example, the word 'reports' has the following
reports
report
port
ports
rep
You're tasked with finding the word in that file that contains the most words.
NOTE : If you have a different wordlist you would like to use, you're free to do so.
#Restrictions
* To keep output slightly shorter, a word will only be considered a word if it is 2 or more letters in length
* The word you are using may not be permuted to get a different set of words (You can't change 'report' to 'repotr' so
that you can add more words to your list)
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
|
import ConfigParser
import os
import sys
# Dealing with registering match and target modules
match_modules_registered = {}
target_modules_registered = {}
def register_target_module(name, func):
target_modules_registered[name.lower()] = func
def get_target_module_func(name):
if name.lower() in target_modules_registered.keys():
return target_modules_registered[name.lower()]
else:
return None # chain target
def register_match_module(name, func):
match_modules_registered[name.lower()] = func
def get_match_module_func(name):
return match_modules_registered[name.lower()]
def register_all():
# Open the modules.conf file and register all modules
configp = ConfigParser.ConfigParser()
configp.read("modules.conf")
match_modules = configp.get("Modules", "match").split(',')
target_modules = configp.get("Modules", "target").split(',')
for match_module in match_modules:
sys.path.append('modules/match')
module = __import__(match_module)
match_func = getattr(module, 'match_prepare')
register_match_module(match_module, match_func)
for target_module in target_modules:
sys.path.append('modules/target')
module = __import__(target_module)
target_func = getattr(module, 'target_prepare')
register_target_module(target_module, target_func)
|
#!/usr/bin/env python
'''
Created on Mars 20 2016
@author: popotvin
'''
import mqtt_zway_test
import mqtt_zway
import paho.mqtt.client as mqtt
import time
import traceback
date_time = mqtt_zway_test.date_time
# Main variables
mqtt_old_payload = []
mqtt_new_payload = []
payload = {}
publish_string = ""
# MQTT config
outgoing_topic = mqtt_zway_test.outgoing_topic
ongoing_topic = mqtt_zway_test.ongoing_topic
mqtt_ip = mqtt_zway_test.mqtt_ip
mqtt_port = mqtt_zway_test.mqtt_port
mqtt_client = mqtt_zway_test.mqtt_client
# ZWAY config
zway_ip = mqtt_zway_test.zway_ip
zway_port = mqtt_zway_test.zway_port
# list of connected devices on the zway server (device_id, device type, device level value)
zway_devList = mqtt_zway.zway_devList(zway_ip,zway_port)
# MQTT Client init
mqttc = mqtt.Client(str(mqtt_client))
mqttc.on_subscribe = mqtt_zway_test.on_subscribe
mqttc.on_message = mqtt_zway_test.on_message
mqttc.on_connect = mqtt_zway_test.on_connect
mqttc.connect(mqtt_ip, mqtt_port)
# Test zway and MQTT servers
zway_test = mqtt_zway.server_test(zway_ip, zway_port)
mqtt_test = mqtt_zway.server_test(mqtt_ip, mqtt_port)
# Main loop
if zway_test and mqtt_test:
print "ZWAY is running at: %s"% str(date_time)
print "MQTT is running at: %s"% str(date_time)
while True:
try:
mqttc.loop()
for key, value in zway_devList.dev_dict().iteritems():
for i,j in value.iteritems():
if i == "id":
dev_id = j
elif i == "type":
dev_type = j
zway_devList.dev_get(dev_id, dev_type)
payload["device_id"] = str(dev_id)
payload["type"] = str(dev_type)
payload["value"] = zway_devList.dev_value(dev_id, dev_type)
mqtt_new_payload.append(dict(payload))
time.sleep(0.1)
if mqtt_old_payload != mqtt_new_payload:
mqttc.publish(outgoing_topic, str(mqtt_new_payload))
#print "published to mQTT: %s" % mqtt_new_payload
mqtt_old_payload = mqtt_new_payload
mqtt_new_payload = []
time.sleep(0.5)
except Exception, e:
print traceback.print_exc()
break
elif not zway_test:
print "ZWAY server is offline"
elif not mqtt_test:
print "MQTT server is Offline"
|
from __future__ import division
from itertools import product
from nltk.tokenize import RegexpTokenizer
from nltk.translate.bleu_score import sentence_bleu
from nltk.util import skipgrams
from nltk.corpus import wordnet
from treetagger import TreeTagger
import numpy as np
import text2int as t2i
import copy
import sys
import pickle
import word2vec
import math
import os
from munkres import Munkres
import scipy.spatial
def combinations(n, k):
f = math.factorial
return f(n) / (f(k) * f(n - k))
# Comprising sage advice from:
# http://www.kozareva.com/papers/fintalKozareva.pdf
# http://web.science.mq.edu.au/~rdale/publications/papers/2006/swan-final.pdf
def computeSimple(sentence1, sentence2):
features = [0] * 7
tokenizer = RegexpTokenizer(r'\w+')
words1 = tokenizer.tokenize(sentence1)
words2 = tokenizer.tokenize(sentence2)
n = len(words1)
m = len(words2)
# word overlap features
count = 0 # num of same words in sentence
for word1 in words1:
for word2 in words2:
if word1 == word2:
count += 1
features[0] = count / n # "precision"
features[1] = count / m # "recall"
features[2] = sentence_bleu([sentence1], sentence2)
features[3] = sentence_bleu([sentence2], sentence1)
# Obtain pairs of adjacent words
skipgrams1 = skipgrams(words1, 2, 0)
skipgrams2 = skipgrams(words2, 2, 0)
count = 0
for gram1 in skipgrams1:
for gram2 in skipgrams2:
if gram1 == gram2:
count += 1
features[4] = count / combinations(n, count)
features[5] = count / combinations(m, count)
"""if (n > m):
features[6] = m / n
else:
features[6] = n / m"""
if len(sentence1) > len(sentence2):
features[7] = len(sentence2) / len(sentence1)
else:
features[7] = len(sentence1) / len(sentence2)
return features
# Uses treetagger-python (Installation https://github.com/miotto/treetagger-python ; http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/)
try:
semanticsimilarity_lookuptable = pickle.load(open('semanticsimilarity_lookuptable.pkl', 'rb'))
except Exception:
semanticsimilarity_lookuptable = {}
print "Build Word2Vec Corpus"
dir = os.path.dirname(os.path.abspath(__file__))
try:
# on OSX for some reason this does not work
word2vec.word2phrase(dir + '/text8', dir + '/text8-phrases', verbose=True)
word2vec.word2vec(dir + '/text8-phrases', dir + '/text8.bin', size=100, verbose=True)
except Exception as e:
print e
model = word2vec.load(dir + '/text8.bin')
print "Finish"
def computeSemantics(sentence1, sentence2):
def computeSemanticSimilarityFeatures(sentence1, sentence2):
features = [0] * 9
if (sentence1 + sentence2) not in semanticsimilarity_lookuptable:
def prepareSentence(sentence):
return sentence.replace('-', ' ').replace('$', ' ')
tt = TreeTagger(language='english')
tags1 = [a for a in tt.tag(prepareSentence(sentence1)) if len(a) > 1]
tags2 = [a for a in tt.tag(prepareSentence(sentence2)) if len(a) > 1]
semanticsimilarity_lookuptable[sentence1 + sentence2] = [tags1, tags2]
tags1 = copy.deepcopy(semanticsimilarity_lookuptable[sentence1 + sentence2][0])
tags2 = copy.deepcopy(semanticsimilarity_lookuptable[sentence1 + sentence2][1])
# Feature: noun/web semantic similarity
# Get Synonym set
def synSet(tags):
for word in tags:
# Only compare Nouns or Verbs
# Python does not have short circuit operators, wtf?!
if (word[1][0] != 'N' if len(word[1]) >= 1 else 1) and (word[1][:2] != 'VV' if len(word[1]) >= 2 else 1):
continue
word.append(wordnet.synsets(word[2]))
synSet(tags=tags1)
synSet(tags=tags2)
simsMaxNoun = []
simsAvgNoun = []
simsMaxVerb = []
simsAvgVerb = []
for word1, word2 in product(tags1, tags2):
type1 = word1[1]
type2 = word2[1]
if (type1[0] != 'N' and type1[:2] != 'VV') or type1 != type2:
continue
similarityMax = 0
similarityAvg = 0
if word1[2] == word2[2]:
similarityAvg = 1
similarityMax = 1
else:
for sense1, sense2 in product(word1[3], word2[3]):
sim = wordnet.wup_similarity(sense1, sense2)
similarityMax = max(similarityMax, sim)
similarityAvg += sim if sim is not None else 0
if type1[0] != 'N':
simsMaxNoun.append(similarityMax)
simsAvgNoun.append(similarityAvg / (len(word1[3]) + len(word2[3])) if len(word1[3]) + len(word2[3]) > 0 else 0)
else:
simsMaxVerb.append(similarityMax)
simsAvgVerb.append(similarityAvg / (len(word1[3]) + len(word2[3])) if len(word1[3]) + len(word2[3]) > 0 else 0)
features[0] = np.sum(simsMaxNoun) / len(simsMaxNoun) if len(simsMaxNoun) > 0 else 0
features[1] = np.sum(simsAvgNoun) / len(simsAvgNoun) if len(simsAvgNoun) > 0 else 0
features[2] = np.sum(simsMaxVerb) / len(simsMaxVerb) if len(simsMaxVerb) > 0 else 0
features[3] = np.sum(simsAvgVerb) / len(simsAvgVerb) if len(simsAvgVerb) > 0 else 0
# Feature: Cardinal number similarity
def findCardinals(tags):
cardinals = []
for index, word1 in enumerate(tags):
if word1[1] == 'CD':
# is "more", "over" or "above" before?
before = [a[0] for a in tags[max(index-2, 0):index]]
try:
val = float(word1[0])
except ValueError:
val = t2i.text2int(word1[0])
maxValue = minValue = val
if ("more" in before) or ("over" in before) or ("above" in before) or ("greater" in before):
maxValue = sys.maxint
minValue += 1
elif ("less" in before) or ("under" in before) or ("below" in before) or ("smaller" in before):
minValue = -sys.maxint - 1
maxValue -= 1
cardinals.append([minValue, maxValue])
return cardinals
cardinals1 = findCardinals(tags=tags1)
cardinals2 = findCardinals(tags=tags2)
def countCDMatches(cardinals1, cardinals2):
count = 0
for cd1 in cardinals1:
for cd2 in cardinals2:
if cd1[0] == cd2[0] and cd1[1] == cd2[1]:
count += 1
break
return count
features[4] = (countCDMatches(cardinals1, cardinals2) + countCDMatches(cardinals2, cardinals1)) / (len(cardinals1) + len(cardinals2)) if len(cardinals1) + len(cardinals2) > 0 else 1
#features[2] = countCDMatches(cardinals1, cardinals2) / len(cardinals1) if len(cardinals1) > 0 else 1
#features[3] = countCDMatches(cardinals2, cardinals1) / len(cardinals2) if len(cardinals2) > 0 else 1
# Feature: Proper Name
def findProperNouns(tags):
nouns = []
for word in tags:
if word[1] == 'NPS':
nouns.append(word[0])
return nouns
def countNounMatches(nouns1, nouns2):
count = 0
for noun1 in nouns1:
for noun2 in nouns2:
if noun1 == noun2:
count += 1
break
return count
nouns1 = findProperNouns(tags1)
nouns2 = findProperNouns(tags2)
features[5] = (countNounMatches(nouns1, nouns2) + countNounMatches(nouns2, nouns1)) / (len(nouns1) + len(nouns2)) if len(nouns1) + len(nouns2) > 0 else 1
# features[4] = countNounMatches(nouns1, nouns2) / len(nouns1) if len(nouns1) > 0 else 1
# features[5] = countNounMatches(nouns2, nouns1) / len(nouns2) if len(nouns2) > 0 else 1
# Feature: Word2Vec (all)
meaning1 = np.zeros(model.vectors.shape[1])
for word in tags1:
if word[2] in model:
meaning1 += model[word[2]]
meaning2 = np.zeros(model.vectors.shape[1])
for word in tags2:
if word[2] in model:
meaning2 += model[word[2]]
diffMeaning = meaning1 - meaning2
features[6] = np.linalg.norm(diffMeaning)
features[7] = scipy.spatial.distance.cosine(meaning1, meaning2)
similarityMatrix = [0] * len(tags1)
for index1, word1 in enumerate(tags1):
row = [0]*len(tags2)
for index2, word2 in enumerate(tags2):
similarityMax = 0
if len(word1) > 3 and len(word2) > 3:
for sense1, sense2 in product(word1[3], word2[3]):
sim = wordnet.wup_similarity(sense1, sense2)
similarityMax = max(similarityMax, sim)
similarityMax = 1 - similarityMax
else:
similarityMax = 1
row[index2] = similarityMax
similarityMatrix[index1] = row
m = Munkres()
totalCost = 0
indices = m.compute(similarityMatrix)
for row, column in indices:
totalCost += similarityMatrix[row][column]
features[8] = totalCost / len(indices)
return features
|
#!/usr/bin/env python2
import re
import os
import sys
import time
import json
import zlib
import argparse
import requests
from dateutil.parser import parse
# Regular expression patterns
re_commit_author = re.compile(
r'^author (?P<name>.+) <(?P<email>.+)> (?P<time>\d+) (?P<tz>[+-]\d+)$',
re.MULTILINE
)
# Helpers
def fail(msg, retcode=1):
"""Show failure message and exit."""
print("Error: {0:s}".format(msg))
sys.exit(retcode)
def include_translations_in_manifest(app_name, manifest):
for i in os.listdir("locales"):
if not i.endswith("json"):
continue
if i == "en.json":
continue
current_lang = i.split(".")[0]
translations = json.load(open(os.path.join("locales", i), "r"))
key = "%s_manifest_description" % app_name
if key in translations and translations[key]:
manifest["description"][current_lang] = translations[key]
for category, questions in manifest["arguments"].items():
for question in questions:
key = "%s_manifest_arguments_%s_%s" % (app_name, category, question["name"])
# don't overwrite already existing translation in manifests for now
if key in translations and translations[key] and not current_lang not in question["ask"]:
print "[ask]", current_lang, key
question["ask"][current_lang] = translations[key]
key = "%s_manifest_arguments_%s_help_%s" % (app_name, category, question["name"])
# don't overwrite already existing translation in manifests for now
if key in translations and translations[key] and not current_lang not in question.get("help", []):
print "[help]", current_lang, key
question["help"][current_lang] = translations[key]
return manifest
def get_json(url, verify=True):
try:
# Retrieve and load manifest
if ".github" in url:
r = requests.get(url, verify=verify, auth=token)
else:
r = requests.get(url, verify=verify)
r.raise_for_status()
return r.json()
except requests.exceptions.RequestException as e:
print("-> Error: unable to request %s, %s" % (url, e))
return None
except ValueError as e:
print("-> Error: unable to decode json from %s : %s" % (url, e))
return None
def get_zlib(url, verify=True):
try:
# Retrieve last commit information
r = requests.get(obj_url, verify=verify)
r.raise_for_status()
return zlib.decompress(r.content).decode('utf-8').split('\x00')
except requests.exceptions.RequestException as e:
print("-> Error: unable to request %s, %s" % (obj_url, e))
return None
except zlib.error as e:
print("-> Error: unable to decompress object from %s : %s" % (url, e))
return None
# Main
# Create argument parser
parser = argparse.ArgumentParser(description='Process YunoHost application list.')
# Add arguments and options
parser.add_argument("input", help="Path to json input file")
parser.add_argument("-o", "--output", help="Path to result file. If not specified, '-build' suffix will be added to input filename.")
parser.add_argument("-g", "--github", help="Github token <username>:<password>")
# Parse args
args = parser.parse_args()
try:
# Retrieve apps list from json file
with open(args.input) as f:
apps_list = json.load(f)
except IOError as e:
fail("%s file not found" % args.input)
# Get list name from filename
list_name = os.path.splitext(os.path.basename(args.input))[0]
print(":: Building %s list..." % list_name)
# Args default
if not args.output:
args.output = '%s-build.json' % list_name
already_built_file = {}
if os.path.exists(args.output):
try:
already_built_file = json.load(open(args.output))
except Exception as e:
print("Error while trying to load already built file: %s" % e)
# GitHub credentials
if args.github:
token = (args.github.split(':')[0], args.github.split(':')[1])
else:
token = None
# Loop through every apps
result_dict = {}
for app, info in apps_list.items():
print("---")
print("Processing '%s'..." % app)
app = app.lower()
# Store usefull values
app_branch = info['branch']
app_url = info['url']
app_rev = info['revision']
app_state = info["state"]
app_level = info.get("level")
app_maintained = info.get("maintained", True)
forge_site = app_url.split('/')[2]
owner = app_url.split('/')[3]
repo = app_url.split('/')[4]
if forge_site == "github.com":
forge_type = "github"
elif forge_site == "framagit.org":
forge_type = "gitlab"
elif forge_site == "code.ffdn.org":
forge_type = "gogs"
else:
forge_type = "unknown"
previous_state = already_built_file.get(app, {}).get("state", {})
manifest = {}
timestamp = None
previous_rev = already_built_file.get(app, {}).get("git", {}).get("revision", None)
previous_url = already_built_file.get(app, {}).get("git", {}).get("url")
previous_level = already_built_file.get(app, {}).get("level")
previous_maintained = already_built_file.get(app, {}).get("maintained")
if forge_type == "github" and app_rev == "HEAD":
if previous_rev is None:
previous_rev = 'HEAD'
url = "https://api.github.com/repos/{}/{}/git/refs/heads/{}".format(owner, repo, app_branch)
head = get_json(url)
app_rev = head["object"]["sha"]
url = "https://api.github.com/repos/{}/{}/compare/{}...{}".format(owner, repo, previous_rev, app_branch)
diff = get_json(url)
if not diff["commits"]:
app_rev = previous_rev if previous_rev != 'HEAD' else app_rev
else:
# Only if those files got updated, do we want to update the
# commit (otherwise that would trigger an unecessary upgrade)
ignore_files = [ "README.md", "LICENSE", ".gitignore", "check_process", ".travis.yml" ]
diff_files = [ f for f in diff["files"] if f["filename"] not in ignore_files ]
if diff_files:
print("This app points to HEAD and significant changes where found between HEAD and previous commit")
app_rev = diff["commits"][-1]["sha"]
else:
print("This app points to HEAD but no significant changes where found compared to HEAD, so keeping the previous commit")
app_rev = previous_rev if previous_rev != 'HEAD' else app_rev
print("Previous commit : %s" % previous_rev)
print("Current commit : %s" % app_rev)
if previous_rev == app_rev and previous_url == app_url:
print("Already up to date, ignoring")
result_dict[app] = already_built_file[app]
if previous_state != app_state:
result_dict[app]["state"] = app_state
print("... but has changed of state, updating it from '%s' to '%s'" % (previous_state, app_state))
if previous_level != app_level or app_level is None:
result_dict[app]["level"] = app_level
print("... but has changed of level, updating it from '%s' to '%s'" % (previous_level, app_level))
if previous_maintained != app_maintained:
result_dict[app]["maintained"] = app_maintained
print("... but maintained status changed, updating it from '%s' to '%s'" % (previous_maintained, app_maintained))
print "update translations but don't download anything"
result_dict[app]['manifest'] = include_translations_in_manifest(app, result_dict[app]['manifest'])
continue
print("Revision changed ! Updating...")
# Hosted on GitHub
if forge_type == "github":
raw_url = 'https://raw.githubusercontent.com/%s/%s/%s/manifest.json' % (
owner, repo, app_rev
)
manifest = get_json(raw_url)
if manifest is None:
continue
api_url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
owner, repo, app_rev
)
info2 = get_json(api_url)
if info2 is None:
continue
commit_date = parse(info2['commit']['author']['date'])
timestamp = int(time.mktime(commit_date.timetuple()))
# Gitlab-type forge
elif forge_type == "gitlab":
raw_url = '%s/raw/%s/manifest.json' % (app_url, app_rev)
manifest = get_json(raw_url, verify=True)
if manifest is None:
continue
api_url = 'https://%s/api/v4/projects/%s%%2F%s/repository/commits/%s' % (forge_site, owner, repo, app_rev)
commit = get_json(api_url)
if commit is None:
continue
commit_date = parse(commit["authored_date"])
timestamp = int(time.mktime(commit_date.timetuple()))
# Gogs-type forge
elif forge_type == "gogs":
if not app_url.endswith('.git'):
app_url += ".git"
raw_url = '%s/raw/%s/manifest.json' % (app_url[:-4], app_rev)
manifest = get_json(raw_url, verify=False)
if manifest is None:
continue
obj_url = '%s/objects/%s/%s' % (
app_url, app_rev[0:2], app_rev[2:]
)
commit = get_zlib(obj_url, verify=False)
if commit is None or len(commit) < 2:
continue
else:
commit = commit[1]
# Extract author line and commit date
commit_author = re_commit_author.search(commit)
if not commit_author:
print("-> Error: author line in commit not found")
continue
# Construct UTC timestamp
timestamp = int(commit_author.group('time'))
tz = commit_author.group('tz')
if len(tz) != 5:
print("-> Error: unexpected timezone length in commit")
continue
elif tz != '+0000':
tdelta = (int(tz[1:3]) * 3600) + (int(tz[3:5]) * 60)
if tz[0] == '+':
timestamp -= tdelta
elif tz[0] == '-':
timestamp += tdelta
else:
print("-> Error: unexpected timezone format in commit")
continue
else:
print("-> Error: unsupported VCS and/or protocol")
continue
if manifest["id"] != app or manifest["id"] != repo.replace("_ynh", ""):
print("Warning: IDs different between community.json, manifest and repo name")
print(" Manifest id : %s" % manifest["id"])
print(" Name in community json : %s" % app)
print(" Repo name : %s" % repo.replace("_ynh", ""))
try:
result_dict[manifest['id']] = {
'git': {
'branch': info['branch'],
'revision': app_rev,
'url': app_url
},
'lastUpdate': timestamp,
'manifest': include_translations_in_manifest(manifest['id'], manifest),
'state': info['state'],
'level': info.get('level', '?'),
'maintained': app_maintained
}
except KeyError as e:
print("-> Error: invalid app info or manifest, %s" % e)
continue
# Write resulting file
with open(args.output, 'w') as f:
f.write(json.dumps(result_dict, sort_keys=True))
print("\nDone! Written in %s" % args.output)
|
from mpl_toolkits.mplot3d import axes3d
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from socket import *
import time
# Объявляем все глобальные переменные
HOST = '192.168.0.76'
PORT = 21566
BUFSIZ = 512
ADDR = (HOST, PORT)
bad_packet = 0
good_packet = 0
# fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Socket
# tcpCliSock = socket(AF_INET, SOCK_STREAM)
# tcpCliSock.connect(ADDR)
# Запрет на ожидание
plt.ion()
tstart = time.time()
# real-time plotting loop
X, Y, Z = [], [], []
while True:
try:
# читаем данные из сети
tcpCliSock.c
data = tcpCliSock.recv(BUFSIZ)
if data:
print(len(X), data)
data = data.decode().split(',')
if len(data) == 9:
# print('Data received', data)
# tcpCliSock.send(b'Ok')
good_packet += 1
else:
bad_packet += 1
# читаем данные из сети
data = tcpCliSock.recv(BUFSIZ)
X.append(data[0])
Y.append(data[1])
Z.append(data[2])
frame = ax.scatter(X, Y, Z, c='b', marker='o')
# Remove old line collection before drawing
#if oldcol is not None:
# ax.collections.remove(oldcol)
plt.pause(0.001 / len(X))
except KeyboardInterrupt:
tcpCliSock.close()
print('FPS: %f' % (len(X) / (time.time() - tstart)))
break
|
import asyncio
import fcntl
import logging
import os
import sys
import threading
import time
import uvloop
import unittest
import weakref
from unittest import mock
from uvloop._testbase import UVTestCase, AIOTestCase
class _TestBase:
def test_close(self):
self.assertFalse(self.loop._closed)
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop._closed)
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = self.loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon_1(self):
calls = []
def cb(inc):
calls.append(inc)
self.loop.stop()
self.loop.call_soon(cb, 10)
h = self.loop.call_soon(cb, 100)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_soon(cb, 1)
self.loop.run_forever()
self.assertEqual(calls, [10, 1])
def test_call_soon_2(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_3(self):
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_soon(lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_soon_base_exc(self):
def cb():
raise KeyboardInterrupt()
self.loop.call_soon(cb)
with self.assertRaises(KeyboardInterrupt):
self.loop.run_forever()
self.assertFalse(self.loop.is_closed())
def test_calls_debug_reporting(self):
def run_test(debug, meth, stack_adj):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def cb():
1 / 0
meth(cb)
self.assertIsNone(context)
self.loop.run_until_complete(asyncio.sleep(0.05, loop=self.loop))
self.assertIs(type(context['exception']), ZeroDivisionError)
self.assertTrue(context['message'].startswith(
'Exception in callback'))
if debug:
tb = context['source_traceback']
self.assertEqual(tb[-1 + stack_adj].name, 'run_test')
else:
self.assertFalse('source_traceback' in context)
del context
for debug in (True, False):
for meth_name, meth, stack_adj in (
('call_soon',
self.loop.call_soon, 0),
('call_later', # `-1` accounts for lambda
lambda *args: self.loop.call_later(0.01, *args), -1)
):
with self.subTest(debug=debug, meth_name=meth_name):
run_test(debug, meth, stack_adj)
def test_now_update(self):
async def run():
st = self.loop.time()
time.sleep(0.05)
return self.loop.time() - st
delta = self.loop.run_until_complete(run())
self.assertTrue(delta > 0.049 and delta < 0.6)
def test_call_later_1(self):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
self.assertTrue(self.loop.is_running())
if stop:
self.loop.call_soon(self.loop.stop)
self.loop.call_later(0.05, cb)
# canceled right away
h = self.loop.call_later(0.05, cb, 100, True)
self.assertIn('.cb', repr(h))
h.cancel()
self.assertIn('cancelled', repr(h))
self.loop.call_later(0.05, cb, 1, True)
self.loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(calls, [10, 1])
self.assertFalse(self.loop.is_running())
self.assertLess(finished - started, 0.1)
self.assertGreater(finished - started, 0.04)
def test_call_later_2(self):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001, loop=self.loop)
time.sleep(0.01)
await asyncio.sleep(0.01, loop=self.loop)
started = time.monotonic()
self.loop.run_until_complete(main())
delta = time.monotonic() - started
self.assertGreater(delta, 0.019)
def test_call_later_3(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f: f.set_result(None), waiter)
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_4(self):
# a memory leak regression test
waiter = self.loop.create_future()
waiter_r = weakref.ref(waiter)
self.loop.call_later(0.01, lambda f=waiter: f.set_result(None))
self.loop.run_until_complete(waiter)
del waiter
self.assertIsNone(waiter_r())
def test_call_later_negative(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop.stop()
self.loop.call_later(-1, cb, 'a')
self.loop.run_forever()
self.assertEqual(calls, ['a'])
def test_call_later_rounding(self):
# Refs #233, call_later() and call_at() shouldn't call cb early
def cb():
self.loop.stop()
for i in range(8):
self.loop.call_later(0.06 + 0.01, cb) # 0.06999999999999999
started = int(round(self.loop.time() * 1000))
self.loop.run_forever()
finished = int(round(self.loop.time() * 1000))
self.assertGreaterEqual(finished - started, 69)
def test_call_at(self):
if os.environ.get('TRAVIS_OS_NAME'):
# Time seems to be really unpredictable on Travis.
raise unittest.SkipTest('time is not monotonic on Travis')
i = 0
def cb(inc):
nonlocal i
i += inc
self.loop.stop()
at = self.loop.time() + 0.05
self.loop.call_at(at, cb, 100).cancel()
self.loop.call_at(at, cb, 10)
started = time.monotonic()
self.loop.run_forever()
finished = time.monotonic()
self.assertEqual(i, 10)
self.assertLess(finished - started, 0.07)
self.assertGreater(finished - started, 0.045)
def test_check_thread(self):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = self.new_loop()
try:
asyncio.set_event_loop(loop2)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
called = []
def cb(arg):
called.append(arg)
async def runner():
await self.loop.run_in_executor(None, cb, 'a')
self.loop.run_until_complete(runner())
self.assertEqual(called, ['a'])
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test_run_until_complete_type_error(self):
self.assertRaises(
TypeError, self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_loop()
self.addCleanup(other_loop.close)
self.assertRaises(
ValueError, other_loop.run_until_complete, task)
def test_run_until_complete_error(self):
async def foo():
raise ValueError('aaa')
with self.assertRaisesRegex(ValueError, 'aaa'):
self.loop.run_until_complete(foo())
def test_run_until_complete_loop_orphan_future_close_loop(self):
if self.implementation == 'asyncio' and sys.version_info < (3, 6, 2):
raise unittest.SkipTest('unfixed asyncio')
class ShowStopper(BaseException):
pass
async def foo(delay):
await asyncio.sleep(delay, loop=self.loop)
def throw():
raise ShowStopper
self.loop.call_soon(throw)
try:
self.loop.run_until_complete(foo(0.1))
except ShowStopper:
pass
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_debug_slow_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Handle', msg)
self.assertIn('test_debug_slow_callbacks', msg)
def test_debug_slow_timer_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
self.loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(asyncio.sleep(0.02, loop=self.loop))
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <TimerHandle', msg)
self.assertIn('test_debug_slow_timer_callbacks', msg)
def test_debug_slow_task_callbacks(self):
logger = logging.getLogger('asyncio')
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.2
async def foo():
time.sleep(0.3)
with mock.patch.object(logger, 'warning') as log:
self.loop.run_until_complete(foo())
self.assertEqual(log.call_count, 1)
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
self.assertIn('Executing <Task finished', msg)
self.assertIn('test_debug_slow_task_callbacks', msg)
def test_default_exc_handler_callback(self):
self.loop.set_exception_handler(None)
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
def test_set_exc_handler_custom(self):
self.loop.set_exception_handler(None)
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
self.loop.set_debug(True)
if hasattr(self.loop, 'get_exception_handler'):
# Available since Python 3.5.2
self.assertIsNone(self.loop.get_exception_handler())
self.loop.set_exception_handler(handler)
if hasattr(self.loop, 'get_exception_handler'):
self.assertIs(self.loop.get_exception_handler(), handler)
run_loop()
self.assertEqual(len(errors), 1)
self.assertRegex(errors[-1]['message'],
'Exception in callback.*zero_error')
self.loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in callback.*zero'),
exc_info=mock.ANY)
self.assertEqual(len(errors), 1)
def test_set_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
self.loop.stop()
1 / 0
self.loop.call_soon(zero_error)
self.loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError,
'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def _compile_agen(self, src):
try:
g = {}
exec(src, globals(), g)
except SyntaxError:
# Python < 3.6
raise unittest.SkipTest()
else:
return g['waiter']
def test_shutdown_asyncgens_01(self):
finalized = list()
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen(
'''async def waiter(timeout, finalized, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
await asyncio.sleep(0, loop=loop)
finalized.append(1)
''')
async def wait():
async for _ in waiter(1, finalized, self.loop):
pass
t1 = self.loop.create_task(wait())
t2 = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(finalized, [1, 1])
# Silence warnings
t1.cancel()
t2.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_02(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
logged = 0
def logger(loop, context):
nonlocal logged
self.assertIn('asyncgen', context)
expected = 'an error occurred during closing of asynchronous'
if expected in context['message']:
logged += 1
waiter = self._compile_agen('''async def waiter(timeout, loop):
try:
await asyncio.sleep(timeout, loop=loop)
yield 1
finally:
1 / 0
''')
async def wait():
async for _ in waiter(1, self.loop):
pass
t = self.loop.create_task(wait())
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
self.loop.set_exception_handler(logger)
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.assertEqual(logged, 1)
# Silence warnings
t.cancel()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
def test_shutdown_asyncgens_03(self):
if not hasattr(self.loop, 'shutdown_asyncgens'):
raise unittest.SkipTest()
waiter = self._compile_agen('''async def waiter():
yield 1
yield 2
''')
async def foo():
# We specifically want to hit _asyncgen_finalizer_hook
# method.
await waiter().asend(None)
self.loop.run_until_complete(foo())
self.loop.run_until_complete(asyncio.sleep(0.01, loop=self.loop))
def test_inf_wait_for(self):
async def foo():
await asyncio.sleep(0.1, loop=self.loop)
return 123
res = self.loop.run_until_complete(
asyncio.wait_for(foo(), timeout=float('inf'), loop=self.loop))
self.assertEqual(res, 123)
class TestBaseUV(_TestBase, UVTestCase):
def test_loop_create_future(self):
fut = self.loop.create_future()
self.assertTrue(isinstance(fut, asyncio.Future))
self.assertIs(fut._loop, self.loop)
fut.cancel()
def test_loop_call_soon_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_soon(cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly()
self.assertFalse(handle.cancelled())
def test_loop_call_later_handle_cancelled(self):
cb = lambda: False # NoQA
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
handle.cancel()
self.assertTrue(handle.cancelled())
handle = self.loop.call_later(0.01, cb)
self.assertFalse(handle.cancelled())
self.run_loop_briefly(delay=0.05)
self.assertFalse(handle.cancelled())
def test_loop_std_files_cloexec(self):
# See https://github.com/MagicStack/uvloop/issues/40 for details.
for fd in {0, 1, 2}:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
self.assertFalse(flags & fcntl.FD_CLOEXEC)
def test_default_exc_handler_broken(self):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
self.addCleanup(lambda: asyncio.set_event_loop(None))
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
self.mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
class TestBaseAIO(_TestBase, AIOTestCase):
pass
class TestPolicy(unittest.TestCase):
def test_uvloop_policy(self):
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
try:
self.assertIsInstance(loop, uvloop.Loop)
finally:
loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
finally:
asyncio.set_event_loop_policy(None)
@unittest.skipUnless(hasattr(asyncio, '_get_running_loop'),
'No asyncio._get_running_loop')
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = uvloop.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
|
"""
Stochastic Gradient Descent.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import time
import logging
import theano
import theano.tensor as TT
# from theano.sandbox.scan import scan
from theano.scan_module import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import print_time, print_mem, const
logger = logging.getLogger(__name__)
class SGD(object):
def __init__(self,
model,
state,
data):
"""
Parameters:
:param model:
Class describing the model used. It should provide the
computational graph to evaluate the model, and have a
similar structure to classes on the models folder
:param state:
Dictionary containing the current state of your job. This
includes configuration of the job, specifically the seed,
the startign damping factor, batch size, etc. See main.py
for details
:param data:
Class describing the dataset used by the model
"""
if 'adarho' not in state:
state['adarho'] = 0.96
if 'adaeps' not in state:
state['adaeps'] = 1e-6
#####################################
# Step 0. Constructs shared variables
#####################################
bs = state['bs']
self.model = model
self.rng = numpy.random.RandomState(state['seed'])
srng = RandomStreams(self.rng.randint(213))
self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name)
for p in model.params]
self.gnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_g2')
for p in model.params]
self.dnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_d2')
for p in model.params]
self.step = 0
self.bs = bs
self.state = state
self.data = data
self.step_timer = time.time()
self.gdata = [theano.shared(numpy.zeros( (2,)*x.ndim,
dtype=x.dtype),
name=x.name) for x in model.inputs]
if 'profile' not in self.state:
self.state['profile'] = 0
###################################
# Step 1. Compile training function
###################################
logger.debug('Constructing grad function')
loc_data = self.gdata
self.prop_exprs = [x[1] for x in model.properties]
self.prop_names = [x[0] for x in model.properties]
self.update_rules = [x[1] for x in model.updates]
rval = theano.clone(model.param_grads + self.update_rules + \
self.prop_exprs + [model.train_cost],
replace=list(zip(model.inputs, loc_data)))
nparams = len(model.params)
nouts = len(self.prop_exprs)
nrules = len(self.update_rules)
gs = rval[:nparams]
rules = rval[nparams:nparams + nrules]
outs = rval[nparams + nrules:]
norm_gs = TT.sqrt(sum(TT.sum(x**2)
for x,p in zip(gs, self.model.params) if p not in self.model.exclude_params_for_norm))
if 'cutoff' in state and state['cutoff'] > 0:
c = numpy.float32(state['cutoff'])
if state['cutoff_rescale_length']:
c = c * TT.cast(loc_data[0].shape[0], 'float32')
notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs))
_gs = []
for g,p in zip(gs,self.model.params):
if p not in self.model.exclude_params_for_norm:
tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g)
_gs.append(
TT.switch(notfinite, numpy.float32(.1)*p, tmpg))
else:
_gs.append(g)
gs = _gs
store_gs = [(s,g) for s,g in zip(self.gs, gs)]
updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)]
rho = self.state['adarho']
eps = self.state['adaeps']
# grad2
gnorm2_up = [rho * gn2 + (1. - rho) * (g ** 2.) for gn2,g in zip(self.gnorm2, gs)]
updates = updates + list(zip(self.gnorm2, gnorm2_up))
logger.debug('Compiling grad function')
st = time.time()
self.train_fn = theano.function(
[], outs, name='train_function',
updates = updates,
givens = list(zip(model.inputs, loc_data)))
logger.debug('took {}'.format(time.time() - st))
self.lr = numpy.float32(1.)
new_params = [p - (TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g
for p, g, gn2, dn2 in
zip(model.params, self.gs, self.gnorm2, self.dnorm2)]
updates = list(zip(model.params, new_params))
# d2
d2_up = [(dn2, rho * dn2 + (1. - rho) *
(((TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g) ** 2.))
for dn2, gn2, g in zip(self.dnorm2, self.gnorm2, self.gs)]
updates = updates + d2_up
self.update_fn = theano.function(
[], [], name='update_function',
allow_input_downcast=True,
updates = updates)
self.old_cost = 1e20
self.schedules = model.get_schedules()
self.return_names = self.prop_names + \
['cost',
'error',
'time_step',
'whole_time', 'lr']
self.prev_batch = None
def __call__(self):
batch = next(self.data)
assert batch
# Perturb the data (! and the model)
if isinstance(batch, dict):
batch = self.model.perturb(**batch)
else:
batch = self.model.perturb(*batch)
# Load the dataset into GPU
# Note: not the most efficient approach in general, as it involves
# each batch is copied individually on gpu
if isinstance(batch, dict):
for gdata in self.gdata:
gdata.set_value(batch[gdata.name], borrow=True)
else:
for gdata, data in zip(self.gdata, batch):
gdata.set_value(data, borrow=True)
# Run the trianing function
g_st = time.time()
rvals = self.train_fn()
for schedule in self.schedules:
schedule(self, rvals[-1])
self.update_fn()
g_ed = time.time()
self.state['lr'] = float(self.lr)
cost = rvals[-1]
self.old_cost = cost
whole_time = time.time() - self.step_timer
if self.step % self.state['trainFreq'] == 0:
msg = '.. iter %4d cost %.3f'
vals = [self.step, cost]
for dx, prop in enumerate(self.prop_names):
msg += ' '+prop+' %.2e'
vals += [float(numpy.array(rvals[dx]))]
msg += ' step time %s whole time %s lr %.2e'
vals += [print_time(g_ed - g_st),
print_time(time.time() - self.step_timer),
float(self.lr)]
print(msg % tuple(vals))
self.step += 1
ret = dict([('cost', float(cost)),
('error', float(cost)),
('lr', float(self.lr)),
('time_step', float(g_ed - g_st)),
('whole_time', float(whole_time))]+list(zip(self.prop_names, rvals)))
return ret
|
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
import math
def verbing(s):
if len(s) >= 3:
if s[-3:] == 'ing':
s += 'ly'
else:
s += 'ing'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
iNot = s.find('not')
iBad = s.find('bad')
if iNot > 0 and iBad > 0 and iNot < iBad:
s = s[:iNot] + 'good' + s[iBad+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
aMid = int(math.ceil(len(a) / 2.))
bMid = int(math.ceil(len(b) / 2.))
# aMid = len(a) // 2
# bMid = len(b) // 2
return a[:aMid] + b[:bMid] + a[aMid:] + b[bMid:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
|
import logging
from pele.potentials import LJ
from nestedbasinsampling import (
NoGUTSSampler, NestedOptimizerKalman, HardShellConstraint, random_structure,
RecordMinimization, CompareStructures, LOG_CONFIG, Database)
logger = logging.getLogger("LJ31.system")
logger = logging.getLogger("NBS.LJ_system")
default_sampler_kws = dict(
max_depth=7, remove_linear_momentum=True, remove_angular_momentum=True,
remove_initial_linear_momentum=False, remove_initial_angular_momentum=False)
default_nopt_kws = dict(
nsteps=2000, MC_steps=10, target_acc=0.4, nsave=30, tol=1e-2,
nwait=15, kalman_discount=100.)
default_struct_kws = dict(niter=100)
default_database_kws = dict()
class NBS_LJ(object):
"""
"""
def __init__(self, natoms, radius=None, stepsize=None,
sampler_kws=None, nopt_kws=None, stepsize_kw=None,
struct_kws=None, database_kws=None):
self.pot = LJ()
self.natoms = natoms
self.radius = float(natoms) ** (1. / 3) if radius is None else radius
self.constraint = HardShellConstraint(self.radius)
self.sampler_kws = default_sampler_kws.copy()
if sampler_kws is not None: self.sampler_kws.update(sampler_kw)
self.sampler = NoGUTSSampler(
self.pot, constraint=self.constraint, **self.sampler_kws)
self.nopt_kws = default_nopt_kws.copy()
if nopt_kws is not None: self.nopt_kws.update(nopt_kws)
self.struct_kws = default_struct_kws.copy()
if struct_kws is not None: self.struct_kws.update(struct_kws)
self.database_kws = default_database_kws.copy()
if database_kws is not None: self.database_kws.update(database_kws)
if 'compareMinima' not in self.database_kws:
self.database_kws['compareMinima'] = self.get_compare_structures()
if stepsize is None:
kws = {} if stepsize_kw is None else stepsize_kw
s = self.determine_stepsize(
target_acc=self.nopt_kws['target_acc'], **kws)
self.stepsize = s[-1]
else:
self.stepsize = stepsize
def determine_stepsize(self, coords=None, E=None, **kwargs):
if coords is None: coords = self.random_config()
if E is None: E = self.pot.getEnergy(coords)
s = self.sampler.determine_stepsize(coords, E, **kwargs)
return s
def random_config(self):
return random_structure(self.natoms, self.radius)
def nopt(self, coords=None, Ecut=None, stepsize=None):
if coords is None: coords = self.random_config()
if Ecut is None: Ecut = self.pot.getEnergy(coords)
if stepsize is None: stepsize = self.stepsize
opt = NestedOptimizerKalman(
coords, self.pot, sampler=self.sampler,
energy=Ecut, stepsize=stepsize, **self.nopt_kws)
return dict(opt.run())
def get_configuration(self):
coords = self.random_config()
Ecut = self.pot.getEnergy(coords)
stepsize = self.stepsize
return coords, Ecut, stepsize
def get_compare_structures(self):
return CompareStructures(**self.struct_kws)
def get_database(self, dbname=":memory:"):
db = Database(dbname, **self.database_kws)
db.add_property('sampler', self.sampler_kws, overwrite=False)
db.add_property('nopt', self.nopt_kws, overwrite=False)
db.add_property('struct', self.struct_kws, overwrite=False)
logger.info("Connecting to database: {:s}".format(dbname))
logger.info("params:\nsampler:\n{:s}\nnopt:\n{:s}".format(
str(self.sampler_kws), str(self.nopt_kws)))
return db
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, **LOG_CONFIG)
system = NBS_LJ(natoms=31, stepsize=0.1)
res = system.nopt()
|
"""Docblock manipulation utilities."""
from pprint import pformat
def append_to_docs(fn, text):
"""Append text to a functions existing docblock."""
if not text:
return
if fn.__doc__:
min_indent = _getindent(fn.__doc__)
fn.__doc__ = '%s\n\n%s' % (fn.__doc__, _indent(text, min_indent))
else:
fn.__doc__ = text
def append_var_to_docs(fn, label, value):
"""Append text & pformatted value to docblock."""
value_width = 76 - _getindent(fn.__doc__)
append_to_docs(
fn,
"%s:\n%s" % (
label,
_indent(pformat(value, width=value_width))
)
)
def include_docs_from(source_function):
"""Decorator copying documentation from one function onto another."""
def decorator(dest_function):
append_to_docs(dest_function, source_function.__doc__)
return dest_function
return decorator
def _indent(string, indent_level=4):
"""Indent each line by `indent_level` of spaces."""
return '\n'.join('%s%s' % (' '*indent_level, x) for x in
string.splitlines())
def _getindent(string):
try:
lines = string.splitlines()
# drop first line if it has no indent level
if _nspaces(lines[0]) == 0:
lines.pop(0)
indent_levels = (_nspaces(x) for x in lines if x)
return min(indent_levels) or 0
except (AttributeError, ValueError):
# Things that don't look like strings and strings with no
# indentation should report indentation of 0
return 0
def _nspaces(line):
for idx, char in enumerate(line):
if char != ' ':
return idx
|
import os
import re
import luigi
import luigi.hadoop
import luigi.hdfs
class InputText(luigi.ExternalTask):
path = luigi.Parameter()
def output(self):
return luigi.hdfs.HdfsTarget(self.path)
class Ngrams(luigi.hadoop.JobTask):
source = luigi.Parameter()
destination = luigi.Parameter()
# overrides superclass; gets set as jobconf:
n_reduce_tasks = luigi.IntParameter(default=10)
def requires(self):
tasks = []
paths = luigi.hdfs.HdfsClient().listdir(self.source, ignore_directories=True, recursive=True)
for path in paths:
tasks.append(InputText(path))
return tasks
def output(self):
return luigi.hdfs.HdfsTarget(self.destination)
def init_mapper(self):
try:
input_file = os.environ['map_input_file']
except KeyError:
input_file = os.environ['mapreduce_map_input_file']
self.expected_tokens = int(re.findall(r'([\d]+)gram', os.path.basename(input_file))[0])
def mapper(self, line):
data = line.split('\t')
if len(data) < 3:
return
# unpack data
ngram = data[0].split()
year = data[1]
count = int(data[2])
if len(ngram) != self.expected_tokens:
return
# generate key
pair = sorted([ngram[0], ngram[self.expected_tokens - 1]])
k = pair + [year]
yield (k, count)
def combiner(self, key, values):
yield (key, sum(values))
def reducer(self, key, values):
yield "%s\t%s\t%s" % tuple(key), str(sum(values))
if __name__ == '__main__':
luigi.run()
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Form Customization"),
"icon": "fa fa-glass",
"items": [
{
"type": "doctype",
"name": "Customize Form",
"description": _("Change field properties (hide, readonly, permission etc.)")
},
{
"type": "doctype",
"name": "Custom Field",
"description": _("Add fields to forms.")
},
{
"type": "doctype",
"name": "Custom Script",
"description": _("Add custom javascript to forms.")
},
{
"type": "doctype",
"name": "DocType",
"description": _("Add custom forms.")
},
]
},
{
"label": _("Dashboards"),
"items": [
{
"type": "doctype",
"name": "Dashboard",
},
{
"type": "doctype",
"name": "Dashboard Chart",
},
{
"type": "doctype",
"name": "Dashboard Chart Source",
},
]
},
{
"label": _("Other"),
"items": [
{
"type": "doctype",
"label": _("Custom Translations"),
"name": "Translation",
"description": _("Add your own translations")
},
{
"type": "doctype",
"label": _("Package"),
"name": "Package",
"description": _("Import and Export Packages.")
}
]
}
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
0 paikannimi
1 nimen kielikoodi
2 kielen nimi
3 paikkatyypin koodi
4 paikkatyypin selite
5 kkj/pkj pohjoinen
6 kkj/pkj itä
7 kkj/ykj pohjoinen
8 kkj/ykj itä
9 etrs/tm35fin pohjoinen
10 etrs/tm35fin itä
11 kuntakoodi
12 kunnan nimi
13 seutukuntakoodi
14 seutukunnan nimi
15 maakunnan koodi
16 maakunnan nimi
17 suuraluekoodi
18 suuralueen nimi
19 läänikoodi
20 läänin nimi
21 lehtijaon 5x5 tunnus
22 pelastuslehtijaon tunnus
23 etrs-tm35 -tunnus
24 nimen kielen virallisuuskoodi
25 nimen kielen virallisuusselite
26 nimen kielen enemmistöasemakoodi
27 nimen kielen enemmistöselitys
28 paikannimenlähdekoodi
29 paikannimen lähdeselitys
30 paikka-id
31 paikannimen id
Suomi-englanti:
http://www.google.fi/url?sa=t&rct=j&q=&esrc=s&source=web&cd=18&ved=0CEUQFjAHOAo&url=http%3A%2F%2Fwww.pohjois-karjala.fi%2Fdman%2FDocument.phx%2F~maakuntaliitto%2FJulkiset%2FEUFUND%2FHankesanasto%3FfolderId%3D~maakuntaliitto%252FJulkiset%252FEUFUND%26cmd%3Ddownload&ei=-RKIUISCGMKA4gS9roHYCg&usg=AFQjCNEqVl4XU868FwPn8C-_qlnozH81Vw&cad=rja
"""
from __future__ import print_function
import sys
import codecs
import sqlite3
from coordinates import Translate, COORD_TYPE_WGS84, COORD_TYPE_ETRSTM35FIN
o8 = codecs.getwriter('utf-8')(sys.stdout)
e8 = codecs.getwriter('utf-8')(sys.stderr)
# Input: dictionary with ['type'] is coordinate system type identifier
# ['N'] is coordinate Northing / Lat
# ['E'] in coordinate Easting / Lon
# type identifier of the coordinate system to transform the input
# coordinates to
# Output: dictionary with ['type'] is coordinate system type identifier
# ['N'] is coordinate Northing / Lat
# ['E'] in coordinate Easting / Lon
class Place(object):
def __init__(self, lst):
self.name = lst[0]
wgs84_coords = Translate({'type': COORD_TYPE_ETRSTM35FIN,
'N': float(lst[9]), 'E': float(lst[10])}, COORD_TYPE_WGS84)
self.lat = wgs84_coords['N']
self.lon = wgs84_coords['E']
self.type_id = lst[3]
self.municipality_id = lst[11]
self.sub_region_id = lst[13]
self.NUTS3_region_id = lst[15]
self.NUTS2_region_id = lst[17]
self.id = lst[30]
def __repr__(self):
return "<Place %s %s>" % (self.id, str(self))
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
return u"{0}, {1}; {2}, {3}".format(self.name, self.municipality_id, self.lat, self.lon)
def insert_stmt(self):
return (u"INSERT INTO places (name, municipality_id, id, lat, lon, type_id, sub_region_id, NUTS2_region_id, NUTS3_region_id) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(self.name,
self.municipality_id,
self.id,
self.lat,
self.lon,
self.type_id,
self.sub_region_id,
self.NUTS2_region_id,
self.NUTS3_region_id))
def insert_fts_stmt(self):
return (u"INSERT INTO places_fts (id, name) VALUES (?, ?)",
(self.id,
self.name))
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from src.probabilisticSeqMatch import sequences_match_prob
from src.probabilisticSeqMatch import base_prob
from src.fastqparser import phred_score_dict
from src.fastqparser import fastqIterator
from src.fastqparser import Fastq
from src.fastqparser import fastqWriter
from src.progressbar import Bar
from operator import itemgetter
from datetime import timedelta
from shutil import rmtree
import glob
import gzip
import sys
import os
#import concurrent.futures as cf
def run(args):
print("Precomputing base probabilities...")
# Precompute string to phred scores dictionary
phred_dict = phred_score_dict(args.phredOffset)
# Precompute base probabilities for phredscores up to 50
base_prob_precompute = {}
for letter in phred_dict:
base_prob_precompute[letter] = base_prob(phred_dict[letter])
# Convert index qual argument to a qual character
args.indexQual = chr(args.indexQual + args.phredOffset)
print("Searching for fastqs...")
# Check that the multiplexed path exists
multiplexed_dir = os.path.join(args.inDir, "multiplexed")
if not os.path.exists(multiplexed_dir):
sys.exit("Directory '<inDir>/multiplexed' does not exist. Re-run with"
" different <inDir>")
# Create out directory
out_dir = "demultiplexed"
if args.uniqID != None:
out_dir += "_{0}".format(args.uniqID)
out_dir = os.path.join(args.inDir, out_dir)
create_folder(out_dir)
# Initiate multiplexed class
multiplexed = Multiplex(multiplexed_dir)
print("Loading index sequences...")
# Initiate sample sheet and read possible indexes
sampleSheet = SampleSheet(args.sampleSheet)
sampleSheet.parse(args.indexQual, base_prob_precompute)
# Check that there are the same number of indexes in sample sheet and
# multiplexed fastqs
if sampleSheet.is_dualindexed != multiplexed.is_dualindexed:
sys.exit("Error: Different number of indexes in sampleSheet and "
"multiplexed reads. Exiting!")
print("Initiating...")
# Open output class for each sample, and a not_assigned group
sample_out = {}
for sample in list(sampleSheet.sample_indexes.keys()) + ['not_assigned']:
sample_out[sample] = Sample(sample, out_dir, multiplexed.is_pairend,
multiplexed.is_dualindexed)
# Initiate progress bar
num_records = file_len(multiplexed.barcode_paths[0]) / 4
bar = Bar('Demultiplexing', max=int(num_records/10000),
suffix='%(percent)d%% %(eta)a secs')
c = 1
for variables in futures_iterate_reads(base_prob_precompute,
multiplexed, sampleSheet, args.minProb):
# Get output
output = futures_barcode_to_indexes(variables)
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 10000 == 0:
bar.next()
c += 1
# Close progress bar
bar.finish()
# Close all sample handles
for sample_name in sample_out:
sample_out[sample_name].close_handles()
print("Finished!")
"""
# Send each read/barcode record to futures to match up to sample
with cf.ProcessPoolExecutor(max_workers=args.numCPU) as executor:
c = 1
# Map read/barcode records
for output in executor.map(futures_barcode_to_indexes,
futures_iterate_reads(multiplexed, sampleSheet,
base_prob_precompute, args.minProb)):
# Unpack output
((read_records, barcode_records), sample, prob, _) = output
# Write record to correct sample file
sample_out[sample].write(read_records, barcode_records)
# Update progress
if c % 1000 == 0:
print(c)
c += 1
"""
return 0
def futures_iterate_reads(base_prob_precompute, multiplexed, sampleSheet,
min_prob):
""" Returns an iterator that contains everything needed for futures.
"""
for combined_record in multiplexed.iterate(base_prob_precompute):
yield (combined_record, sampleSheet, min_prob)
def futures_barcode_to_indexes(variables):
""" Compares the reads barcodes to sample indexes and returns matching
sample name.
"""
# Unpack variables
(combined_record, sampleSheet, min_prob) = variables
# Get barcode records
_, barcode_records = combined_record
# Find sample
b1_header, sample, prob = match_barcode_to_indexes(barcode_records,
sampleSheet, min_prob)
if sample == None:
sample = 'not_assigned'
# Append probability to barcode1 header
b1_header = "{0} {1}".format(b1_header, prob)
# Change header
combined_record[1][0].id = b1_header
return combined_record, sample, prob, b1_header
def match_barcode_to_indexes(barcode_records, sampleSheet, min_prob):
""" For the barcode pair, caluclates probability of a match against each set
of indexes
"""
index_probs = {}
for sample_name in sampleSheet.sample_indexes:
index_records = sampleSheet.sample_indexes[sample_name]
# Calculate the match probability for barcode 1
b1_prob = sequences_match_prob(index_records[0].seq,
index_records[0].qual_prob,
barcode_records[0].seq,
barcode_records[0].qual_prob, 0)
# Do for second barcode if present
if sampleSheet.is_dualindexed:
# Skip if already below the threshold, else assign same prob as b1
if b1_prob >= min_prob:
b2_prob = sequences_match_prob(index_records[1].seq,
index_records[1].qual_prob,
barcode_records[1].seq,
barcode_records[1].qual_prob, 0)
else:
b2_prob = b1_prob
# Caluclate combined probability
if sampleSheet.is_dualindexed:
overall_prob = b1_prob * b2_prob
else:
overall_prob = b1_prob
# Save result
index_probs[sample_name] = overall_prob
# Sort the results by their probability
sorted_probs = sorted(index_probs.items(), key=itemgetter(1),
reverse=True)
# Return header, sample, prob
header = barcode_records[0].id
if sorted_probs[0][1] > min_prob:
return header, sorted_probs[0][0], sorted_probs[0][1]
else:
return header, None, sorted_probs[0][1]
class Sample:
# Class for each possible sample. 1) Holds the output directory for that
# sample. 2) Opens handles. 3) Writes record to sample.
def __init__(self, name, out_dir, is_pe, id_dual):
self.read_paths = []
self.barcode_paths = []
self.read_handles = None
self.barcode_handles = None
# Create directory for sample
name = name.replace(' ', '_')
self.sample_dir = os.path.join(out_dir, name)
create_folder(self.sample_dir)
# Create read paths
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R1.fastq.gz'.format(name)))
if is_pe:
self.read_paths.append(os.path.join(self.sample_dir,
'{0}.R2.fastq.gz'.format(name)))
# Create barcode paths
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_1.fastq.gz'.format(name)))
if id_dual:
self.barcode_paths.append(os.path.join(self.sample_dir,
'{0}.barcode_2.fastq.gz'.format(name)))
def open_handles(self):
""" For the reads and barcodes, opens output handles.
"""
self.read_handles = [get_handle(read_path, 'w') for read_path
in self.read_paths]
self.barcode_handles = [get_handle(barcode_path, 'w') for barcode_path
in self.barcode_paths]
return 0
def write(self, read_records, barcode_records):
""" Writes the demultiplexed read and barcode records to sample file.
"""
# Open handles if not open
if self.read_handles == None:
self.open_handles()
# Write read records
for i in range(len(read_records)):
fastqWriter(read_records[i], self.read_handles[i])
# Write barcode records
for i in range(len(barcode_records)):
fastqWriter(barcode_records[i], self.barcode_handles[i])
return 0
def close_handles(self):
""" Closes any open handles.
"""
if self.read_handles != None:
for handle in self.read_handles + self.barcode_handles:
handle.close()
return 0
class SampleSheet:
# Class to hold the sample sheet and retrieve indexes from it.
def __init__(self, path):
self.path = path
def parse(self, index_qual, base_prob_precompute):
""" Parses the sample sheet to retrieve the indexes for each sample.
"""
sample_indexes = {}
with open(self.path, 'r') as in_h:
# Skip to line after [Data]
line = in_h.readline()
while not line.startswith('[Data]'):
line = in_h.readline()
# Get header
header = in_h.readline().rstrip().lower().split(',')
col_ind = dict(zip(header, range(len(header))))
# Save whether it is dual indexed
if "index2" in col_ind.keys():
self.is_dualindexed = True
else:
self.is_dualindexed = False
# Get indexes
for line in in_h:
# Break if EOF
if line.strip() == "":
break
# Get info
parts = line.rstrip().split(',')
sample_name = parts[col_ind['sample_name']]
# If sample_name is empty, take sample_id instead
if sample_name == "":
sample_name = parts[col_ind['sample_id']]
# Get first index
index1 = parts[col_ind['index']]
sample_indexes[sample_name] = [index1]
# Get second index
if self.is_dualindexed:
index2 = parts[col_ind['index2']]
sample_indexes[sample_name].append(index2)
# Convert indexes to seqIO seqRecords
self.sample_indexes = self.convert_index_to_fastqRecord(sample_indexes,
index_qual, base_prob_precompute)
return 0
def convert_index_to_fastqRecord(self, sample_indexes, index_qual,
base_prob_precompute):
""" Converts each index sequence to a seqIO seqRecord.
"""
# For each sample
for sample in sample_indexes:
# For each index
for i in range(len(sample_indexes[sample])):
raw_seq = sample_indexes[sample][i]
qual = [index_qual] * len(raw_seq)
# Convert to fastqRecord
record = Fastq(None, raw_seq, qual)
# Calculate base probabilities
record.qual_to_prob(base_prob_precompute)
# Save record
sample_indexes[sample][i] = record
return sample_indexes
class Multiplex:
# Class for the folder of multiplexed reads + barcodes
def __init__(self, folder):
""" Make list of read and barcode files.
"""
self.dir = folder
# Get list of read and barcode paths
self.read_paths = []
self.barcode_paths = []
for fastq in sorted(glob.glob(os.path.join(folder, "*.fastq*"))):
if "barcode_" in os.path.split(fastq)[1]:
self.barcode_paths.append(fastq)
else:
self.read_paths.append(fastq)
# Save whether pairend
if len(self.read_paths) == 1:
self.is_pairend = False
elif len(self.read_paths) == 2:
self.is_pairend = True
else:
sys.exit("There must be 1 or 2 input read fastqs, not {0}".format(
len(self.read_paths)))
# Save whether dualindex
if len(self.barcode_paths) == 1:
self.is_dualindexed = False
elif len(self.barcode_paths) == 2:
self.is_dualindexed = True
else:
sys.exit("There must be 1 or 2 input barcode fastqs, not"
" {0}".format(len(self.barcode_paths)))
return None
def open_handles(self):
""" Opens the file names for reading.
"""
read_handles = [get_handle(filen, 'r') for filen in self.read_paths]
barcode_handles = [get_handle(filen, 'r') for filen
in self.barcode_paths]
return read_handles, barcode_handles
def open_iterators(self, read_handles, barcode_handles):
""" Opens fastq iterators using biopythons SeqIO
"""
# Open iterators for each handle
read_iterators = [fastqIterator(handle) for handle
in read_handles]
barcode_iterators = [fastqIterator(handle) for handle
in barcode_handles]
return read_iterators, barcode_iterators
def iterate(self, base_prob_precompute):
""" Loads the reads and barcode fastqs and yields 1 set at a time.
"""
# Open handles
read_handles, barcode_handles = self.open_handles()
# Open iterators for each handle
read_iterators, barcode_iterators = self.open_iterators(
read_handles, barcode_handles)
# Iterate through records
for r1_record in read_iterators[0]:
# Get read records
read_records = [r1_record]
if self.is_pairend:
read_records.append(next(read_iterators[1]))
# Get barcode records
barcode_records = [next(barcode_iterators[0])]
if self.is_dualindexed:
barcode_records.append(next(barcode_iterators[1]))
# Check that they all have the same title
titles = [record.id.split(" ")[0] for record in read_records + barcode_records]
if len(set(titles)) > 1:
sys.exit('Reads and/or barcodes are not in sync\n'
'{0}'.format(titles))
# Calculate base probabilities for barcodes
for i in range(len(barcode_records)):
barcode_records[i].qual_to_prob(base_prob_precompute)
yield [read_records, barcode_records]
# Close handles
for handle in read_handles + barcode_handles:
handle.close()
def create_folder(folder):
""" Check out folder exists and create a new one.
"""
# Check if it exists
if os.path.exists(folder):
response = input('{0} exists. Would you like to overwrite it? [y/n] '.format(folder))
if response == 'y':
rmtree(folder)
else:
sys.exit()
os.makedirs(folder)
return folder
def get_handle(filen, rw):
""" Returns file handle using gzip if file ends in .gz
"""
if filen.split('.')[-1] == 'gz':
return gzip.open(filen, rw)
else:
return open(filen, rw)
def file_len(fname):
""" Count number of lines in a file.
"""
with get_handle(fname, 'r') as f:
for i, l in enumerate(f):
pass
return i + 1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to be used by the backend."""
from google.ads.googleads.client import GoogleAdsClient
def populate_adgroup_details(client, account, ag_id):
"""Gets an adgroup ID and returns an adgroup object including
adgroup id, adgroup name and campaign name."""
ga_service = client.get_service('GoogleAdsService', version='v7')
query = '''
SELECT
campaign.name,
ad_group.name,
ad_group.id
FROM
ad_group
WHERE
ad_group.id = %s
''' % (ag_id)
request = client.get_type("SearchGoogleAdsStreamRequest")
request.customer_id = account
request.query = query
response = ga_service.search_stream(request=request)
for batch in response:
for row in batch.results:
return {
'adgroup_id': row.ad_group.id,
'adgroup_name': row.ad_group.name,
'campaign_name': row.campaign.name
}
|
"""
SALTS XBMC Addon
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re
addon = xbmcaddon.Addon()
ICON_PATH = os.path.join(addon.getAddonInfo('path'), 'icon.png')
get_setting = addon.getSetting
show_settings = addon.openSettings
def get_path():
return addon.getAddonInfo('path')
def get_profile():
return addon.getAddonInfo('profile')
def set_setting(id, value):
if not isinstance(value, basestring): value = str(value)
addon.setSetting(id, value)
def get_version():
return addon.getAddonInfo('version')
def get_id():
return addon.getAddonInfo('id')
def get_name():
return addon.getAddonInfo('name')
def get_plugin_url(queries):
try:
query = urllib.urlencode(queries)
except UnicodeEncodeError:
for k in queries:
if isinstance(queries[k], unicode):
queries[k] = queries[k].encode('utf-8')
query = urllib.urlencode(queries)
return sys.argv[0] + '?' + query
def end_of_directory(cache_to_disc=True):
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if menu_items is None: menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
if is_playable is None:
playable = 'false' if is_folder else 'true'
else:
playable = 'true' if is_playable else 'false'
liz_url = get_plugin_url(queries)
if fanart: list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)
def parse_query(query):
q = {'mode': 'main'}
if query.startswith('?'): query = query[1:]
queries = urlparse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
else:
q[key] = queries[key]
return q
def notify(header=None, msg='', duration=2000, sound=None):
if header is None: header = get_name()
if sound is None: sound = get_setting('mute_notifications') == 'false'
xbmcgui.Dialog().notification(header, msg, ICON_PATH, duration, sound)
def get_current_view():
skinPath = xbmc.translatePath('special://skin/')
xml = os.path.join(skinPath, 'addon.xml')
f = xbmcvfs.File(xml)
read = f.read()
f.close()
try: src = re.search('defaultresolution="([^"]+)', read, re.DOTALL).group(1)
except: src = re.search('<res.+?folder="([^"]+)', read, re.DOTALL).group(1)
src = os.path.join(skinPath, src, 'MyVideoNav.xml')
f = xbmcvfs.File(src)
read = f.read()
f.close()
match = re.search('<views>([^<]+)', read, re.DOTALL)
if match:
views = match.group(1)
for view in views.split(','):
if xbmc.getInfoLabel('Control.GetLabel(%s)' % (view)): return view
|
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import pandas as pd
import matplotlib
import numpy as np
import math
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import plotly.tools as tls
import pandas as pd
from sqlalchemy import create_engine # database connection
import datetime as dt
import io
import logging
import plotly.plotly as py # interactive graphing
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import Bar, Scatter, Marker, Layout
from heraspy.model import HeraModel
np.random.seed(1337)
import theano
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model,model_from_yaml
from keras.layers import Input, Dense, GRU, LSTM, TimeDistributed, Masking,merge
from model import *
import argparse
import sys
if __name__ == "__main__":
t_start = dt.datetime.now()
parser = argparse.ArgumentParser(prog='Weighted Model')
parser.add_argument('-t','--table',required=True)
args = parser.parse_args()
####################################DATA SOURCE################################
table = vars(args)['table']
# table = 'data_trim'
# rsl_file = './data/gs_results_trim.csv'
# rsl_file = './data/psql_data_trim.csv'
# table = 'data_little_enc'
# rsl_file = './data/gs_results_little.csv'
# table = 'data_more'
# rsl_file = './data/gs_results_more.csv'
# table = 'auth'
# rsl_file = './data/auth.csv'
events_tbl = 'event'
events_tbl = None
rsl_file = './data/psql_{table}.csv'.format(table=table)
################################################################################
print "Commencing..."
data_dir = './data/'
evt_name = 'Featurespace_events_output.csv'
auth_name = 'Featurespace_auths_output.csv'
db_name = 'c1_agg.db'
address = "postgresql+pg8000://script@localhost:5432/ccfd"
# disk_engine = create_engine('sqlite:///'+data_dir+db_name,convert_unicode=True)
# disk_engine.raw_connection().connection.text_factory = str
disk_engine = create_engine(address)
#######################Settings#############################################
samples_per_epoch = trans_num_table(table,disk_engine,mode='train',trans_mode='train')
# epoch_limit = 10000
# samples_per_epoch = epoch_limit
# user_sample_size = 8000
epoch_limit = samples_per_epoch
user_sample_size = None
nb_epoch = 300
fraud_w_list = [1000.]
##########ENCODERS CONF
tbl_src = 'auth'
# tbl_src = table
tbl_evnt = 'event'
##################################
batch_size = 300
batch_size_val = 1000
print "SAMPLES per epoch:",samples_per_epoch
print "User sample size:",user_sample_size
print 'sequence length size',batch_size
# samples_per_epoch = 1959
# table = 'data_trim'
# samples_per_epoch = 485
lbl_pad_val = 2
pad_val = 0
# dropout_W_list = [0.3]
dropout_W_list = [0.4,0.5,0.6,0.7]
# dropout_W_list = [0.15,0.3,0.4,0.8]
input_dim = 44
hid_dims = [320]
num_l = [7]
lr_s = [2.5e-4]
# lr_s = [1.25e-4,6e-5]
# lr_s = [1e-2,1e-3,1e-4]
# lr_s = [1e-1,1e-2,1e-3]
num_opt = 1
opts = lambda x,lr:[keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08),
# keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
# keras.optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
][x]
# add_info = str(int(seq_len_param))+'_class_w_'+str(fraud_w)
print 'Populating encoders'
path_encoders ='./data/encoders/{tbl_src}+{tbl_evnt}'.format(tbl_src=tbl_src,tbl_evnt=tbl_evnt)
if os.path.exists(path_encoders):
encoders = load_encoders(path_encoders)
else:
encoders = populate_encoders_scale(tbl_src,disk_engine,tbl_evnt)
with open(path_encoders, 'wb') as output:
pickle.dump(encoders, output, pickle.HIGHEST_PROTOCOL)
print 'ENCODERS SAVED to {path}!'.format(path=path_encoders)
# sys.exit()
gru_dict = {}
lstm_dict = {}
for fraud_w in fraud_w_list:
add_info = 'Mask=pad_class_w_'+str(fraud_w)+'ES-OFF'
class_weight = {0 : 1.,
1: fraud_w,
2: 0.}
for dropout_W in dropout_W_list:
for hidden_dim in hid_dims:
# gru
for opt_id in range(num_opt):
for lr in lr_s:
optimizer = opts(opt_id,lr)
for num_layers in num_l:
for rnn in ['gru']:
short_title = 'bi_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_DO-'+str(dropout_W)+'_w'+str(class_weight[1])
title = 'Bidirectional_Class'+str(class_weight[1])+'_'+rnn.upper()+'_'+str(hidden_dim)+'_'+str(num_layers)+'_'+str(type(optimizer).__name__)+'_'+str(lr)+'_epochs_'+str(nb_epoch)+'_DO-'+str(dropout_W)
print title
input_layer = Input(shape=(int(seq_len_param), input_dim),name='main_input')
mask = Masking(mask_value=pad_val)(input_layer)
x = mask
for i in range(num_layers):
if rnn == 'gru':
prev_frw = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=False,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = GRU(hidden_dim,#input_length=50,
return_sequences=True,go_backwards=True,stateful=False,
unroll=False,consume_less='gpu',
init='glorot_uniform', inner_init='orthogonal', activation='tanh',
inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None,
b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
else:
prev_frw = LSTM(hidden_dim, return_sequences=True,go_backwards=False,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
prev_bck = LSTM(hidden_dim, return_sequences=True,go_backwards=True,stateful=False,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=dropout_W, dropout_U=0.0)(x)
x = merge([prev_frw, prev_bck], mode='concat')
output_layer = TimeDistributed(Dense(3,activation='softmax'))(x)
model = Model(input=[input_layer],output=[output_layer])
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'],
sample_weight_mode="temporal")
########save architecture ######
arch_dir = './data/models/archs/'+short_title+'.yml'
yaml_string = model.to_yaml()
with open(arch_dir, 'wb') as output:
pickle.dump(yaml_string, output, pickle.HIGHEST_PROTOCOL)
print 'model saved!'
##############
user_mode = 'train'
trans_mode = 'train'
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=user_sample_size,epoch_size=epoch_limit,events_tbl=events_tbl)
# sub_sample=user_sample_size,epoch_size=samples_per_epoch)
########validation data
print 'Generating Validation set!'
user_mode = 'test'
trans_mode = 'test'
val_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size_val,usr_ratio=80,class_weight=class_weight,lbl_pad_val = lbl_pad_val, pad_val = pad_val,
sub_sample=None,epoch_size=None,events_tbl=events_tbl)
validation_data = next(val_gen)
print '################GENERATED#######################'
###############CALLBACKS
patience = 30
early_Stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=0, mode='auto')
save_path = './data/models/'+table+'/'
var_name = '.{epoch:02d}-{val_loss:.5f}.hdf5'
checkpoint = keras.callbacks.ModelCheckpoint(save_path+short_title+var_name, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
root_url = 'http://localhost:9000'
remote_log = keras.callbacks.RemoteMonitor(root=root_url)
# callbacks = [early_Stop,checkpoint]
callbacks = [early_Stop,checkpoint,remote_log]
callbacks = []
history = model.fit_generator(data_gen, samples_per_epoch, nb_epoch, verbose=1, callbacks=callbacks,validation_data=validation_data, nb_val_samples=None, class_weight=None, max_q_size=10000)
py.sign_in('bottydim', 'o1kuyms9zv')
auc_list = []
print '#########################TRAIN STATS################'
user_mode = 'train'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
data_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, data_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '##################EVALUATION USERS#########################'
user_mode = 'test'
trans_mode = 'train'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Transactions#########################'
user_mode = 'train'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
print '##################EVALUATION Pure#########################'
user_mode = 'test'
trans_mode = 'test'
val_samples = trans_num_table(table,disk_engine,mode=user_mode,trans_mode=trans_mode)
print '# samples',val_samples
plt_filename = './figures/GS/'+table+'/'+'ROC_'+user_mode+'_'+trans_mode+'_'+title+'_'+add_info+".png"
eval_gen = data_generator(user_mode,trans_mode,disk_engine,encoders,table=table,
batch_size=batch_size,usr_ratio=80,class_weight=None,lbl_pad_val = lbl_pad_val, pad_val = pad_val,events_tbl=events_tbl)
eval_list = eval_auc_generator(model, eval_gen, val_samples, max_q_size=10000,plt_filename=plt_filename)
auc_val = eval_list[0]
clc_report = eval_list[1]
acc = eval_list[2]
print "AUC:",auc_val
print 'CLassification report'
print clc_report
print 'Accuracy'
print acc
auc_list.append(str(auc_val))
print '#####################################################'
with io.open(rsl_file, 'a', encoding='utf-8') as file:
auc_string = ','.join(auc_list)
title_csv = title.replace('_',',')+','+str(history.history['acc'][-1])+','+str(history.history['loss'][-1])+','+str(auc_val)+','+str(acc)+','+auc_string+'\n'
file.write(unicode(title_csv))
print 'logged @ {file}'.format(file=rsl_file)
trim_point = -15
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+add_info+".png")
trim_point = 0
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'LOSS'+'_'+'FULL'+".png")
# iplot(fig,filename='figures/'+title,image='png')
# title = title.replace('Loss','Acc')
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['acc'][trim_point:])],
'layout': {'title': title}
}
filename_val='./results/figures/'+table+'/'+short_title+'_'+'ACC'+'_'+add_info+".png"
py.image.save_as(fig,filename=filename_val)
print 'exported @',filename_val
fig = {
'data': [Scatter(
x=history.epoch[trim_point:],
y=history.history['val_loss'][trim_point:])],
'layout': {'title': title}
}
py.image.save_as(fig,filename='./results/figures/'+table+'/'+short_title+'_'+'VAL LOSS'+'_'+add_info+".png")
print 'time taken: {time}'.format(time=days_hours_minutes_seconds(dt.datetime.now()-t_start))
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from dnsimple_zoneimport import meta
f = open('requirements.txt', 'r')
lines = f.readlines()
requirements = [l.strip().strip('\n') for l in lines if l.strip() and not l.strip().startswith('#')]
readme = open('README.rst').read()
setup(name='dnsimple-zoneimport',
version=meta.version,
description=meta.description,
author=meta.author,
author_email=meta.author_email,
url='https://github.com/wbrp/dnsimple-zoneimport',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
license=meta.license,
keywords='dnsimple dns "zone files" bind import api',
long_description=readme,
install_requires=requirements,
entry_points={
'console_scripts': [
'%s = dnsimple_zoneimport.importer:main' % meta.title.replace('-', '_'),
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Terminals',
],
)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base as test_base
import six
import testtools
from oslo_utils import reflection
if six.PY3:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'Exception',
'BaseException', 'object']
else:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'StandardError', 'Exception',
'BaseException', 'object']
def dummy_decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def mere_function(a, b):
pass
def function_with_defs(a, b, optional=None):
pass
def function_with_kwargs(a, b, **kwargs):
pass
class Class(object):
def method(self, c, d):
pass
@staticmethod
def static_method(e, f):
pass
@classmethod
def class_method(cls, g, h):
pass
class CallableClass(object):
def __call__(self, i, j):
pass
class ClassWithInit(object):
def __init__(self, k, l):
pass
class CallbackEqualityTest(test_base.BaseTestCase):
def test_different_simple_callbacks(self):
def a():
pass
def b():
pass
self.assertFalse(reflection.is_same_callback(a, b))
def test_static_instance_callbacks(self):
class A(object):
@staticmethod
def b(a, b, c):
pass
a = A()
b = A()
self.assertTrue(reflection.is_same_callback(a.b, b.b))
def test_different_instance_callbacks(self):
class A(object):
def b(self):
pass
def __eq__(self, other):
return True
b = A()
c = A()
self.assertFalse(reflection.is_same_callback(b.b, c.b))
self.assertTrue(reflection.is_same_callback(b.b, c.b, strict=False))
class GetCallableNameTest(test_base.BaseTestCase):
def test_mere_function(self):
name = reflection.get_callable_name(mere_function)
self.assertEqual('.'.join((__name__, 'mere_function')), name)
def test_method(self):
name = reflection.get_callable_name(Class.method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_instance_method(self):
name = reflection.get_callable_name(Class().method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_static_method(self):
name = reflection.get_callable_name(Class.static_method)
if six.PY3:
self.assertEqual('.'.join((__name__, 'Class', 'static_method')),
name)
else:
# NOTE(imelnikov): static method are just functions, class name
# is not recorded anywhere in them.
self.assertEqual('.'.join((__name__, 'static_method')), name)
def test_class_method(self):
name = reflection.get_callable_name(Class.class_method)
self.assertEqual('.'.join((__name__, 'Class', 'class_method')), name)
def test_constructor(self):
name = reflection.get_callable_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_callable_class(self):
name = reflection.get_callable_name(CallableClass())
self.assertEqual('.'.join((__name__, 'CallableClass')), name)
def test_callable_class_call(self):
name = reflection.get_callable_name(CallableClass().__call__)
self.assertEqual('.'.join((__name__, 'CallableClass',
'__call__')), name)
# These extended/special case tests only work on python 3, due to python 2
# being broken/incorrect with regard to these special cases...
@testtools.skipIf(not six.PY3, 'python 3.x is not currently available')
class GetCallableNameTestExtended(test_base.BaseTestCase):
# Tests items in http://legacy.python.org/dev/peps/pep-3155/
class InnerCallableClass(object):
def __call__(self):
pass
def test_inner_callable_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj.__call__)
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'InnerCallableClass', '__call__'))
self.assertEqual(expected_name, name)
def test_inner_callable_function(self):
def a():
def b():
pass
return b
name = reflection.get_callable_name(a())
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'test_inner_callable_function', '<locals>',
'a', '<locals>', 'b'))
self.assertEqual(expected_name, name)
def test_inner_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj)
expected_name = '.'.join((__name__,
'GetCallableNameTestExtended',
'InnerCallableClass'))
self.assertEqual(expected_name, name)
class GetCallableArgsTest(test_base.BaseTestCase):
def test_mere_function(self):
result = reflection.get_callable_args(mere_function)
self.assertEqual(['a', 'b'], result)
def test_function_with_defaults(self):
result = reflection.get_callable_args(function_with_defs)
self.assertEqual(['a', 'b', 'optional'], result)
def test_required_only(self):
result = reflection.get_callable_args(function_with_defs,
required_only=True)
self.assertEqual(['a', 'b'], result)
def test_method(self):
result = reflection.get_callable_args(Class.method)
self.assertEqual(['self', 'c', 'd'], result)
def test_instance_method(self):
result = reflection.get_callable_args(Class().method)
self.assertEqual(['c', 'd'], result)
def test_class_method(self):
result = reflection.get_callable_args(Class.class_method)
self.assertEqual(['g', 'h'], result)
def test_class_constructor(self):
result = reflection.get_callable_args(ClassWithInit)
self.assertEqual(['k', 'l'], result)
def test_class_with_call(self):
result = reflection.get_callable_args(CallableClass())
self.assertEqual(['i', 'j'], result)
def test_decorators_work(self):
@dummy_decorator
def special_fun(x, y):
pass
result = reflection.get_callable_args(special_fun)
self.assertEqual(['x', 'y'], result)
class AcceptsKwargsTest(test_base.BaseTestCase):
def test_no_kwargs(self):
self.assertEqual(False, reflection.accepts_kwargs(mere_function))
def test_with_kwargs(self):
self.assertEqual(True, reflection.accepts_kwargs(function_with_kwargs))
class GetClassNameTest(test_base.BaseTestCase):
def test_std_exception(self):
name = reflection.get_class_name(RuntimeError)
self.assertEqual('RuntimeError', name)
def test_class(self):
name = reflection.get_class_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_instance(self):
name = reflection.get_class_name(Class())
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_int(self):
name = reflection.get_class_name(42)
self.assertEqual('int', name)
class GetAllClassNamesTest(test_base.BaseTestCase):
def test_std_class(self):
names = list(reflection.get_all_class_names(RuntimeError))
self.assertEqual(RUNTIME_ERROR_CLASSES, names)
def test_std_class_up_to(self):
names = list(reflection.get_all_class_names(RuntimeError,
up_to=Exception))
self.assertEqual(RUNTIME_ERROR_CLASSES[:-2], names)
|
#!/usr/bin/env python
"""Encoding and decoding of a question once for each codec.
Example execution:
$ ./question.py
ASN.1 specification:
-- A simple protocol taken from Wikipedia.
Foo DEFINITIONS ::= BEGIN
Question ::= SEQUENCE {
id INTEGER,
question IA5String
}
Answer ::= SEQUENCE {
id INTEGER,
answer BOOLEAN
}
END
Question to encode: {'id': 1, 'question': 'Is 1+1=3?'}
BER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
DER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
JER:
Encoded: 7b226964223a312c227175657374696f6e223a22497320312b313d333f227d (31 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
OER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
PER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
UPER:
Encoded: 01010993cd03156c5eb37e (11 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
XER:
Encoded: 3c5175657374696f6e3e3c69643e313c2f69643e3c7175657374696f6e3e497320312b313d333f3c2f7175657374696f6e3e3c2f5175657374696f6e3e (61 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
Protocol Buffers:
Encoded: 08011209497320312b313d333f (13 bytes)
Decoded:
id: 1
question: "Is 1+1=3?"
$
"""
from __future__ import print_function
import os
from binascii import hexlify
import asn1tools
from foo_pb2 import Question
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
FOO_ASN_PATH = os.path.join(SCRIPT_DIR,
'..',
'..',
'..',
'tests',
'files',
'foo.asn')
# Print the specification.
print('ASN.1 specification:')
print()
with open(FOO_ASN_PATH) as fin:
print(fin.read())
# The question to encode.
question = {'id': 1, 'question': 'Is 1+1=3?'}
print("Question to encode:", question)
# Encode and decode the question once for each codec.
for codec in ['ber', 'der', 'jer', 'oer', 'per', 'uper', 'xer']:
foo = asn1tools.compile_files(FOO_ASN_PATH, codec)
encoded = foo.encode('Question', question)
decoded = foo.decode('Question', encoded)
print()
print('{}:'.format(codec.upper()))
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:', decoded)
# Also encode using protocol buffers.
question = Question()
question.id = 1
question.question = 'Is 1+1=3?'
encoded = question.SerializeToString()
decoded = question
print()
print('Protocol Buffers:')
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:')
print(decoded)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various utility functions and classes.
"""
from __future__ import division, unicode_literals, print_function
import abc
import glob
import json
import logging
import os
import pkg_resources
import platform
import re
import subprocess
from unicodedata import normalize
import blinker
import colorama
import psutil
import roman
from colorama import Fore, Back, Style
from spreads.vendor.pathlib import Path
class SpreadsException(Exception):
""" General exception """
pass
class DeviceException(SpreadsException):
""" Raised when a device-related error occured. """
pass
class MissingDependencyException(SpreadsException):
""" Raised when a dependency for a plugin is missing. """
pass
def get_version():
""" Get installed version via pkg_resources. """
return pkg_resources.require('spreads')[0].version
def find_in_path(name):
""" Find executable in $PATH.
:param name: name of the executable
:type name: unicode
:returns: Path to executable or None if not found
:rtype: unicode or None
"""
candidates = None
if is_os('windows'):
import _winreg
if name.startswith('scantailor'):
try:
cmd = _winreg.QueryValue(
_winreg.HKEY_CLASSES_ROOT,
'Scan Tailor Project\\shell\\open\\command')
bin_path = cmd.split('" "')[0][1:]
if name.endswith('-cli'):
bin_path = bin_path[:-4] + "-cli.exe"
return bin_path if os.path.exists(bin_path) else None
except OSError:
return None
else:
path_dirs = os.environ.get('PATH').split(';')
path_dirs.append(os.getcwd())
path_exts = os.environ.get('PATHEXT').split(';')
candidates = (os.path.join(p, name + e)
for p in path_dirs
for e in path_exts)
else:
candidates = (os.path.join(p, name)
for p in os.environ.get('PATH').split(':'))
return next((c for c in candidates if os.path.exists(c)), None)
def is_os(osname):
""" Check if the current operating system matches the expected.
:param osname: Operating system name as returned by
:py:func:`platform.system`
:returns: Whether the OS matches or not
:rtype: bool
"""
return platform.system().lower() == osname
def check_futures_exceptions(futures):
"""" Go through passed :py:class:`concurrent.futures._base.Future` objects
and re-raise the first Exception raised by any one of them.
:param futures: Iterable that contains the futures to be checked
:type futures: iterable with :py:class:`concurrent.futures._base.Future`
instances
"""
if any(x.exception() for x in futures):
raise next(x for x in futures if x.exception()).exception()
def get_free_space(path):
""" Return free space on file-system underlying the passed path.
:param path: Path on file-system the free space of which is desired.
:type path; unicode
:return: Free space in bytes.
:rtype: int
"""
return psutil.disk_usage(unicode(path)).free
def get_subprocess(cmdline, **kwargs):
""" Get a :py:class:`subprocess.Popen` instance.
On Windows systems, the process will be ran in the background and won't
open a cmd-window or appear in the taskbar.
The function signature matches that of the :py:class:`subprocess.Popen`
initialization method.
"""
if subprocess.mswindows and 'startupinfo' not in kwargs:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs['startupinfo'] = su
return subprocess.Popen(cmdline, **kwargs)
def wildcardify(pathnames):
""" Try to generate a single path with wildcards that matches all
`pathnames`.
:param pathnames: List of pathnames to find a wildcard string for
:type pathanmes: List of str/unicode
:return: The wildcard string or None if none was found
:rtype: unicode or None
"""
wildcard_str = ""
for idx, char in enumerate(pathnames[0]):
if all(p[idx] == char for p in pathnames[1:]):
wildcard_str += char
elif not wildcard_str or wildcard_str[-1] != "*":
wildcard_str += "*"
matched_paths = glob.glob(wildcard_str)
if not sorted(pathnames) == sorted(matched_paths):
return None
return wildcard_str
def diff_dicts(old, new):
""" Get the difference between two dictionaries.
:param old: Dictionary to base comparison on
:type old: dict
:param new: Dictionary to compare with
:type new: dict
:return: A (possibly nested) dictionary containing all items from `new`
that differ from the ones in `old`
:rtype: dict
"""
out = {}
for key, value in old.iteritems():
if new[key] != value:
out[key] = new[key]
elif isinstance(value, dict):
diff = diff_dicts(value, new[key])
if diff:
out[key] = diff
return out
def slugify(text, delimiter=u'-'):
"""Generates an ASCII-only slug.
Code adapted from Flask snipped by Armin Ronacher:
http://flask.pocoo.org/snippets/5/
:param text: Text to create slug for
:type text: unicode
:param delimiter: Delimiter to use in slug
:type delimiter: unicode
:return: The generated slug
:rtype: unicode
"""
punctuation_re = r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+'
result = []
for word in re.split(punctuation_re, text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delimiter.join(result))
class _instancemethodwrapper(object): # noqa
def __init__(self, callable):
self.callable = callable
self.__dontcall__ = False
def __getattr__(self, key):
return getattr(self.callable, key)
def __call__(self, *args, **kwargs):
if self.__dontcall__:
raise TypeError('Attempted to call abstract method.')
return self.callable(*args, **kwargs)
class _classmethod(classmethod): # noqa
def __init__(self, func):
super(_classmethod, self).__init__(func)
isabstractmethod = getattr(func, '__isabstractmethod__', False)
if isabstractmethod:
self.__isabstractmethod__ = isabstractmethod
def __get__(self, instance, owner):
result = _instancemethodwrapper(super(_classmethod, self)
.__get__(instance, owner))
isabstractmethod = getattr(self, '__isabstractmethod__', False)
if isabstractmethod:
result.__isabstractmethod__ = isabstractmethod
abstractmethods = getattr(owner, '__abstractmethods__', None)
if abstractmethods and result.__name__ in abstractmethods:
result.__dontcall__ = True
return result
class abstractclassmethod(_classmethod): # noqa
""" New decorator class that implements the @abstractclassmethod decorator
added in Python 3.3 for Python 2.7.
Kudos to http://stackoverflow.com/a/13640018/487903
"""
def __init__(self, func):
func = abc.abstractmethod(func)
super(abstractclassmethod, self).__init__(func)
class ColourStreamHandler(logging.StreamHandler):
""" A colorized output StreamHandler
Kudos to Leigh MacDonald: http://goo.gl/Lpr6C5
"""
# Some basic colour scheme defaults
colours = {
'DEBUG': Fore.CYAN,
'INFO': Fore.GREEN,
'WARN': Fore.YELLOW,
'WARNING': Fore.YELLOW,
'ERROR': Fore.RED,
'CRIT': Back.RED + Fore.WHITE,
'CRITICAL': Back.RED + Fore.WHITE
}
@property
def is_tty(self):
""" Check if we are using a "real" TTY. If we are not using a TTY it
means that the colour output should be disabled.
:return: Using a TTY status
:rtype: bool
"""
try:
return getattr(self.stream, 'isatty', None)()
except:
return False
def emit(self, record):
try:
message = self.format(record)
if not self.is_tty:
self.stream.write(message)
else:
self.stream.write(self.colours[record.levelname] +
message + Style.RESET_ALL)
self.stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class EventHandler(logging.Handler):
""" Subclass of :py:class:`logging.Handler` that emits a
:py:class:`blinker.base.Signal` whenever a new record is emitted.
"""
signals = blinker.Namespace()
on_log_emit = signals.signal('logrecord', doc="""\
Sent when a log record was emitted.
:keyword :class:`logging.LogRecord` record: the LogRecord
""")
def emit(self, record):
self.on_log_emit.send(record=record)
def get_data_dir(create=False):
""" Return (and optionally create) the user's default data directory.
:param create: Create the data directory if it doesn't exist
:type create: bool
:return: Path to the default data directory
:rtype: unicode
"""
unix_dir_var = 'XDG_DATA_HOME'
unix_dir_fallback = '~/.config'
windows_dir_var = 'APPDATA'
windows_dir_fallback = '~\\AppData\\Roaming'
mac_dir = '~/Library/Application Support'
base_dir = None
if is_os('darwin'):
if Path(unix_dir_fallback).exists:
base_dir = unix_dir_fallback
else:
base_dir = mac_dir
elif is_os('windows'):
if windows_dir_var in os.environ:
base_dir = os.environ[windows_dir_var]
else:
base_dir = windows_dir_fallback
else:
if unix_dir_var in os.environ:
base_dir = os.environ[unix_dir_var]
else:
base_dir = unix_dir_fallback
app_path = Path(base_dir)/'spreads'
if create and not app_path.exists():
app_path.mkdir()
return unicode(app_path)
def colorize(text, color):
""" Return text with a new ANSI foreground color.
:param text: Text to be wrapped
:param color: ANSI color to wrap text in
:type color: str (from `colorama.ansi <http://git.io/9qnt0Q>`)
:return: Colorized text
"""
return color + text + colorama.Fore.RESET
class RomanNumeral(object):
""" Number type that represents integers as Roman numerals and that
can be used in all arithmetic operations applicable to integers.
"""
@staticmethod
def is_roman(value):
""" Check if `value` is a valid Roman numeral.
:param value: Value to be checked
:type value: unicode
:returns: Whether the value is valid or not
:rtype: bool
"""
return bool(roman.romanNumeralPattern.match(value))
def __init__(self, value, case='upper'):
""" Create a new instance.
:param value: Value of the instance
:type value: int, unicode containing valid Roman numeral or
:py:class:`RomanNumeral`
"""
self._val = self._to_int(value)
self._case = case
if isinstance(value, basestring) and not self.is_roman(value):
self._case = 'lower'
elif isinstance(value, RomanNumeral):
self._case = value._case
def _to_int(self, value):
if isinstance(value, int):
return value
elif isinstance(value, basestring) and self.is_roman(value.upper()):
return roman.fromRoman(value.upper())
elif isinstance(value, RomanNumeral):
return value._val
else:
raise ValueError("Value must be a valid roman numeral, a string"
" representing one or an integer: '{0}'"
.format(value))
def __cmp__(self, other):
if self._val > self._to_int(other):
return 1
elif self._val == self._to_int(other):
return 0
elif self._val < self._to_int(other):
return -1
def __add__(self, other):
return RomanNumeral(self._val + self._to_int(other), self._case)
def __sub__(self, other):
return RomanNumeral(self._val - self._to_int(other), self._case)
def __int__(self):
return self._val
def __str__(self):
strval = roman.toRoman(self._val)
if self._case == 'lower':
return strval.lower()
else:
return strval
def __unicode__(self):
return unicode(str(self))
def __repr__(self):
return str(self)
class CustomJSONEncoder(json.JSONEncoder):
""" Custom :py:class:`json.JSONEncoder`.
Uses an object's `to_dict` method if present for serialization.
Serializes :py:class:`pathlib.Path` instances to the string
representation of their relative path to a BagIt-compliant directory or
their absolute path if not applicable.
"""
def default(self, obj):
if hasattr(obj, 'to_dict'):
return obj.to_dict()
if isinstance(obj, Path):
# Serialize paths that belong to a workflow as paths relative to
# its base directory
base = next((p for p in obj.parents if (p/'bagit.txt').exists()),
None)
if base:
return unicode(obj.relative_to(base))
else:
return unicode(obj.absolute())
return json.JSONEncoder.default(self, obj)
|
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl import MetadataSingleEntityTransformTask
from cumulusci.utils.xml.metadata_tree import MetadataElement
class AddValueSetEntries(MetadataSingleEntityTransformTask):
entity = "StandardValueSet"
task_options = {
**MetadataSingleEntityTransformTask.task_options,
"entries": {
"description": "Array of standardValues to insert. "
"Each standardValue should contain the keys 'fullName', the API name of the entry, "
"and 'label', the user-facing label. OpportunityStage entries require the additional "
"keys 'closed', 'won', 'forecastCategory', and 'probability'; CaseStatus entries "
"require 'closed'.",
"required": True,
},
"api_names": {
"description": "List of API names of StandardValueSets to affect, "
"such as 'OpportunityStage', 'AccountType', 'CaseStatus'",
"required": True,
},
}
def _transform_entity(self, metadata: MetadataElement, api_name: str):
for entry in self.options.get("entries", []):
if "fullName" not in entry or "label" not in entry:
raise TaskOptionsError(
"Standard value set entries must contain the 'fullName' and 'label' keys."
)
# Check for extra metadata on CaseStatus and OpportunityStage
if api_name == "OpportunityStage":
if not all(
[
"closed" in entry,
"forecastCategory" in entry,
"probability" in entry,
"won" in entry,
]
):
raise TaskOptionsError(
"OpportunityStage standard value set entries require the keys "
"'closed', 'forecastCategory', 'probability', and 'won'"
)
if api_name == "CaseStatus":
if "closed" not in entry:
raise TaskOptionsError(
"CaseStatus standard value set entries require the key 'closed'"
)
existing_entry = metadata.findall(
"standardValue", fullName=entry["fullName"]
)
if not existing_entry:
# Entry doesn't exist. Insert it.
elem = metadata.append(tag="standardValue")
elem.append("fullName", text=entry["fullName"])
elem.append("label", text=entry["label"])
elem.append("default", text="false")
if api_name in ["OpportunityStage", "CaseStatus"]:
elem.append("closed", str(entry["closed"]).lower())
if api_name == "OpportunityStage":
elem.append("won", str(entry["won"]).lower())
elem.append("probability", str(entry["probability"]))
elem.append("forecastCategory", entry["forecastCategory"])
return metadata
|
#*************************************************************************************
# Copyright 2018 OSIsoft, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this script was designed using the v1.0
# version of the OMF specification, as outlined here:
# http://omf-docs.osisoft.com/en/v1.0
# For more info, see OMF Developer Companion Guide:
# http://omf-companion-docs.osisoft.com
#*************************************************************************************
# ************************************************************************
# Import necessary packages
# ************************************************************************
import json
import time
import datetime
import platform
import socket
import gzip
import random # Used to generate sample data; comment out this line if real data is used
import requests
# ************************************************************************
# Specify options for sending web requests to the target PI System
# ************************************************************************
# Specify the producer token
# For more information see PI Connector Administration Guide
PRODUCER_TOKEN = "uid=acd124d1-a571-4124-bda6-d3baf529b888&crt=20180510202247360&sig=hFEUTLI1HzxkJjKvORjEkFLjloFKZIlt2bMRHAVPpTY="
# Specify the address of the destination endpoint
# For more information see PI Connector Administration Guide
INGRESS_URL = "http://localhost:8118/ingress/messages"
# Specify whether to compress OMF message before
# sending it to ingress endpoint
USE_COMPRESSION = True
# If self-signed certificates are used (true by default),
# do not verify HTTPS SSL certificates; normally, leave this as is
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(message_type, message_omf_json):
try:
# Compress json omf payload, if specified
compression = 'none'
if USE_COMPRESSION:
msg_body = gzip.compress(bytes(json.dumps(message_omf_json), 'utf-8'))
compression = 'gzip'
else:
msg_body = json.dumps(message_omf_json)
# Assemble headers
msg_headers = {
'producertoken': PRODUCER_TOKEN,
'messagetype': message_type,
'action': 'create',
'messageformat': 'JSON',
'omfversion': '1.0',
'compression': compression
}
# Send the request, and collect the response
response = requests.post(
INGRESS_URL,
headers = msg_headers,
data = msg_body,
verify = VERIFY_SSL,
timeout = WEB_REQUEST_TIMEOUT_SECONDS
)
# Print a debug message, if desired; note: you should receive a
# response code 204 if the request was successful!
print('Response from relay from the initial "{0}" message: {1} {2}'.format(message_type, response.status_code, response.text))
except Exception as e:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " An error ocurred during web request: " + str(e))
# ************************************************************************
# Turn off HTTPS warnings, if desired
# (if the default certificate configuration was used by the PI Connector)
# ************************************************************************
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
# Remove if targetting trusted targets
if not VERIFY_SSL:
requests.packages.urllib3.disable_warnings()
# ************************************************************************
# Send the types messages to define the types of streams that will be sent.
# These types are referenced in all later messages
# ************************************************************************
# The sample divides types, and sends static and dynamic types
# separatly only for readability; you can send all the type definitions
# in one message, as far as its size is below maximum allowed - 192K
# ************************************************************************
# Send a JSON packet to define static types
send_omf_message_to_endpoint("type", [
{
"id": "FirstStaticType",
"name": "First static type",
"classification": "static",
"type": "object",
"description": "First static asset type",
"properties": {
"index": {
"type": "string",
"isindex": True,
"name": "not in use",
"description": "not in use"
},
"name": {
"type": "string",
"isname": True,
"name": "not in use",
"description": "not in use"
},
"StringProperty": {
"type": "string",
"name": "First configuration attribute",
"description": "First static asset type's configuration attribute"
}
}
},
{
"id": "SecondStaticType",
"name": "Second static type",
"classification": "static",
"type": "object",
"description": "Second static asset type",
"properties": {
"index": {
"type": "string",
"isindex": True,
"name": "not in use",
"description": "not in use"
},
"name": {
"type": "string",
"isname": True,
"name": "not in use",
"description": "not in use"
},
"StringProperty": {
"type": "string",
"name": "Second configuration attribute",
"description": "Second static asset type's configuration attribute"
}
}
}
])
# Send a JSON packet to define dynamic types
send_omf_message_to_endpoint("type", [
{
"id": "FirstDynamicType",
"name": "First dynamic type",
"classification": "dynamic",
"type": "object",
"description": "not in use",
"properties": {
"timestamp": {
"format": "date-time",
"type": "string",
"isindex": True,
"name": "not in use",
"description": "not in use"
},
"IntegerProperty": {
"type": "integer",
"name": "Integer attribute",
"description": "PI point data referenced integer attribute"
}
}
},
{
"id": "SecondDynamicType",
"name": "Second dynamic type",
"classification": "dynamic",
"type": "object",
"description": "not in use",
"properties": {
"timestamp": {
"format": "date-time",
"type": "string",
"isindex": True,
"name": "not in use",
"description": "not in use"
},
"NumberProperty1": {
"type": "number",
"name": "Number attribute 1",
"description": "PI point data referenced number attribute 1",
"format": "float64"
},
"NumberProperty2": {
"type": "number",
"name": "Number attribute 2",
"description": "PI point data referenced number attribute 2",
"format": "float64"
},
"StringEnum": {
"type": "string",
"enum": ["False", "True"],
"name": "String enumeration",
"description": "String enumeration to replace boolean type"
}
}
},
{
"id": "ThirdDynamicType",
"name": "Third dynamic type",
"classification": "dynamic",
"type": "object",
"description": "not in use",
"properties": {
"timestamp": {
"format": "date-time",
"type": "string",
"isindex": True,
"name": "not in use",
"description": "not in use"
},
"IntegerEnum": {
"type": "integer",
"format": "int16",
"enum": [0, 1],
"name": "Integer enumeration",
"description": "Integer enumeration to replace boolean type"
}
}
}
])
# ************************************************************************
# Send a JSON packet to define containerids and the type
# (using the types listed above) for each new data events container.
# This instantiates these particular containers.
# We can now directly start sending data to it using its Id.
# ************************************************************************
send_omf_message_to_endpoint("container", [
{
"id": "Container1",
"typeid": "FirstDynamicType"
},
{
"id": "Container2",
"typeid": "FirstDynamicType"
},
{
"id": "Container3",
"typeid": "SecondDynamicType"
},
{
"id": "Container4",
"typeid": "ThirdDynamicType"
}
])
# ************************************************************************
# Send the messages to create the PI AF asset structure
#
# The following packets can be sent in one data message; the example
# splits the data into several messages only for readability;
# you can send all of the following data in one message,
# as far as its size is below maximum allowed - 192K
# ************************************************************************
# Send a JSON packet to define assets
send_omf_message_to_endpoint("data", [
{
"typeid": "FirstStaticType",
"values": [
{
"index": "Asset1",
"name": "Parent element",
"StringProperty": "Parent element attribute value"
}
]
},
{
"typeid": "SecondStaticType",
"values": [
{
"index": "Asset2",
"name": "Child element",
"StringProperty": "Child element attribute value"
}
]
}
])
# Send a JSON packet to define links between assets
# to create AF Asset structure
send_omf_message_to_endpoint("data", [
{
"typeid": "__Link",
"values": [
{
"source": {
"typeid": "FirstStaticType",
"index": "_ROOT"
},
"target": {
"typeid": "FirstStaticType",
"index": "Asset1"
}
},
{
"source": {
"typeid": "FirstStaticType",
"index": "Asset1"
},
"target": {
"typeid": "SecondStaticType",
"index": "Asset2"
}
}
]
}
])
# Send a JSON packet to define links between assets and
# containerids to create attributes with PI point references
# from containerid properties
send_omf_message_to_endpoint("data", [
{
"typeid": "__Link",
"values": [
{
"source": {
"typeid": "FirstStaticType",
"index": "Asset1"
},
"target": {
"containerid": "Container1"
}
},
{
"source": {
"typeid": "SecondStaticType",
"index": "Asset2"
},
"target": {
"containerid": "Container2"
}
},
{
"source": {
"typeid": "SecondStaticType",
"index": "Asset2"
},
"target": {
"containerid": "Container3"
}
},
{
"source": {
"typeid": "SecondStaticType",
"index": "Asset2"
},
"target": {
"containerid": "Container4"
}
}
]
}
])
# ************************************************************************
# Helper functions: REQUIRED: create a JSON message that contains data values
# for all defined containerids
#
# Note: if you do not send one of the values for the container, Relay
# will emit the default value for missing property - it is the default
# behavior of JSON serialization; this might lead to undesireable
# results: for example, putting a value of zero into referenced PI
# point
# ************************************************************************
def getCurrentTime():
return datetime.datetime.utcnow().isoformat() + 'Z'
# Creates a JSON packet containing data values for containers
# of type FirstDynamicType defined below
def create_data_values_for_first_dynamic_type(containerid):
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"IntegerProperty": int(100*random.random())
}
]
}
]
string_boolean_value = "True"
# Creates a JSON packet containing data values for containers
# of type SecondDynamicType defined below
def create_data_values_for_second_dynamic_type(containerid):
global string_boolean_value
if string_boolean_value == "True":
string_boolean_value = "False"
else:
string_boolean_value = "True"
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"NumberProperty1": 100*random.random(),
"NumberProperty2": 100*random.random(),
"StringEnum": string_boolean_value
}
]
}
]
# Creates a JSON packet containing data values for containers
# of type ThirdDynamicType defined below
integer_boolean_value = 0
def create_data_values_for_third_dynamic_type(containerid):
global integer_boolean_value
if integer_boolean_value == 0:
integer_boolean_value = 1
else:
integer_boolean_value = 0
return [
{
"containerid": containerid,
"values": [
{
"timestamp": getCurrentTime(),
"IntegerEnum": integer_boolean_value
}
]
}
]
# ************************************************************************
# Finally, loop indefinitely, sending random events
# conforming to the container types that we defined earlier
# Note: PI points will be created on the first data value message
# arrived for a given container
#
# Note: values for each containerid are sent as a batch; you can update
# different containerids at different times
# ************************************************************************
while True:
send_omf_message_to_endpoint("data", create_data_values_for_first_dynamic_type("container1"))
send_omf_message_to_endpoint("data", create_data_values_for_first_dynamic_type("container2"))
send_omf_message_to_endpoint("data", create_data_values_for_second_dynamic_type("container3"))
send_omf_message_to_endpoint("data", create_data_values_for_third_dynamic_type("container4"))
time.sleep(1)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 91