repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
spamexperts/orangeassassin
oa/context.py
GlobalContext.unload_plugin
python
def unload_plugin(self, name): if name not in self.plugins: raise oa.errors.PluginLoadError("Plugin %s not loaded." % name) plugin = self.plugins[name] for rule in plugin.eval_rules: self.eval_rules.pop(rule, None) for rule_type in plugin.cmds or (): self.cmds.pop(rule_type, None) self.pop_plugin_data(name) del self.plugins[name]
Unload the specified plugin and remove any data stored in this context.
https://github.com/spamexperts/orangeassassin/blob/9d0d4bd3ef7d09bf2da195280730f790f1229458/oa/context.py#L239-L253
from builtins import dict from builtins import object import timeit import sys try: import importlib.machinery except ImportError: pass import re import os import imp import getpass import logging import functools import importlib import collections import oa.conf import oa.errors import oa.networks import oa.rules.base import oa.plugins.base import oa.plugins.pyzor import oa.dns_interface import oa.regex DSN_SERVER_RE = oa.regex.Regex(r""" ^\[? ([0-9.]+| # IPv4 [0-9a-f:]+) # IPv6 \]? (?: : # A port is following the address ([0-9]+) # The port )?$ """, re.I | re.S | re.M | re.X) class _Context(object): def __init__(self): self.plugin_data = collections.defaultdict(dict) self.log = logging.getLogger("oa-logger") def __getstate__(self): odict = self.__dict__.copy() if "RelayCountryPlugin" in odict["plugin_data"]: del odict["plugin_data"]["RelayCountryPlugin"]["ipv4"] del odict["plugin_data"]["RelayCountryPlugin"]["ipv6"] odict["plugins_to_import"] = [] for plugin_name, plugin in odict["plugins"].copy().items(): if plugin.path_to_plugin is None: continue del odict["plugins"][plugin_name] odict["plugins_to_import"].append(plugin.path_to_plugin) for rule in plugin.eval_rules: del odict["eval_rules"][rule] return odict def __setstate__(self, d): self.__dict__.update(d) for name, path in d.get("plugins_to_import", None) or (): self.load_plugin(name, path) def set_plugin_data(self, plugin_name, key, value): self.plugin_data[plugin_name][key] = value def get_plugin_data(self, plugin_name, key=None): if key is None: return self.plugin_data[plugin_name] return self.plugin_data[plugin_name][key] def del_plugin_data(self, plugin_name, key=None): if key is None: del self.plugin_data[plugin_name] else: del self.plugin_data[plugin_name][key] def pop_plugin_data(self, plugin_name, key=None): if key is None: return self.plugin_data.pop(plugin_name, None) return self.plugin_data[plugin_name].pop(key, None) def _callback_chain(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): try: func(*args, **kwargs) except oa.errors.InhibitCallbacks: return True return False return wrapped_func class GlobalContext(_Context): def __init__(self, paranoid=False, ignore_unknown=True, lazy_mode=True): super(GlobalContext, self).__init__() self.plugins = dict() self.paranoid = paranoid self.lazy_mode = lazy_mode self.ignore_unknown = ignore_unknown self.eval_rules = dict() self.cmds = dict() self.dns = oa.dns_interface.DNSInterface() self.networks = oa.networks.NetworkList() self.conf = oa.conf.PADConf(self) self.username = getpass.getuser() def err(self, *args, **kwargs): if self.paranoid: self.log.error(*args, **kwargs) elif not self.ignore_unknown: self.log.warn(*args, **kwargs) else: self.log.debug(*args, **kwargs) def load_plugin(self, name, path=None): self.log.debug("Loading plugin %s from %s", name, path) class_name = name.rsplit(".", 1)[-1] if class_name in self.plugins: self.log.warning("Redefining plugin %s.", class_name) self.unload_plugin(class_name) if path is None: module_name, class_name = name.rsplit(".", 1) try: module = importlib.import_module(module_name) except ImportError as e: raise oa.errors.PluginLoadError("Unable to load %s: %s" % (module_name, e)) elif sys.version_info[0] == 3 and sys.version_info[1] > 2: module = self._load_module_py3(path) else: module = self._load_module_py2(path) plugin_class = getattr(module, class_name) if plugin_class is None: raise oa.errors.PluginLoadError("Missing plugin %s in %s" % (class_name, path)) if not issubclass(plugin_class, oa.plugins.base.BasePlugin): raise oa.errors.PluginLoadError("%s is not a subclass of " "BasePlugin" % class_name) plugin = plugin_class(self) self._load_cmds(plugin, class_name) self._load_eval_rules(plugin, class_name) self.log.info("Plugin %s loaded", name) if path is not None: plugin.path_to_plugin = (name, path) self.plugins[class_name] = plugin def _load_eval_rules(self, plugin, class_name): for rule in plugin.eval_rules: self.log.debug("Registering eval rule: %s.%s", class_name, rule) if rule in self.eval_rules: self.log.warning("Redefining eval rule: %s", rule) eval_rule = getattr(plugin, rule) if eval_rule is None: raise oa.errors.PluginLoadError("Undefined eval rule %s in " "%s" % (rule, class_name)) self.eval_rules[rule] = eval_rule def _load_cmds(self, plugin, class_name): if not plugin.cmds: return for rule_type, rule_class in plugin.cmds.items(): self.log.debug("Registering CMD rule: %s.%s", class_name, rule_type) if rule_type in self.cmds: self.log.warning("Redefining CMD rule: %s", rule_type) if not issubclass(rule_class, oa.rules.base.BaseRule): raise oa.errors.PluginLoadError("%s is not a subclass of " "BasePlugin" % class_name) self.cmds[rule_type] = rule_class
Apache License 2.0
schemathesis/schemathesis
src/schemathesis/specs/openapi/serialization.py
deep_object
python
def deep_object(item: Generated, name: str) -> None: generated = item.pop(name) if generated: item.update({f"{name}[{key}]": value for key, value in force_dict(generated).items()}) else: item[name] = ""
Serialize an object with `deepObject` style. id={"role": "admin", "firstName": "Alex"} => id[role]=admin&id[firstName]=Alex
https://github.com/schemathesis/schemathesis/blob/b3d0faaaf484574006624c2f23759a612d1ffb5e/src/schemathesis/specs/openapi/serialization.py#L205-L214
import json from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union from ...utils import compose Generated = Dict[str, Any] Definition = Dict[str, Any] DefinitionList = List[Definition] MapFunction = Callable[[Generated], Generated] def make_serializer( func: Callable[[DefinitionList], Generator[Optional[Callable], None, None]] ) -> Callable[[DefinitionList], Optional[Callable]]: def _wrapper(definitions: DefinitionList) -> Optional[Callable]: conversions = list(func(definitions)) if conversions: return compose(*[conv for conv in conversions if conv is not None]) return None return _wrapper def _serialize_openapi3(definitions: DefinitionList) -> Generator[Optional[Callable], None, None]: for definition in definitions: name = definition["name"] if "content" in definition: options = iter(definition["content"].keys()) media_type = next(options, None) if media_type == "application/json": yield to_json(name) else: style = definition.get("style") explode = definition.get("explode") type_ = definition.get("schema", {}).get("type") if definition["in"] == "path": yield from _serialize_path_openapi3(name, type_, style, explode) elif definition["in"] == "query": yield from _serialize_query_openapi3(name, type_, style, explode) elif definition["in"] == "header": yield from _serialize_header_openapi3(name, type_, explode) elif definition["in"] == "cookie": yield from _serialize_cookie_openapi3(name, type_, explode) def _serialize_path_openapi3( name: str, type_: str, style: Optional[str], explode: Optional[bool] ) -> Generator[Optional[Callable], None, None]: if style == "simple": if type_ == "object": if explode is False: yield comma_delimited_object(name) if explode is True: yield delimited_object(name) if type_ == "array": yield delimited(name, delimiter=",") if style == "label": if type_ == "object": yield label_object(name, explode=explode) elif type_ == "array": yield label_array(name, explode=explode) else: yield label_primitive(name) if style == "matrix": if type_ == "object": yield matrix_object(name, explode=explode) elif type_ == "array": yield matrix_array(name, explode=explode) else: yield matrix_primitive(name) def _serialize_query_openapi3( name: str, type_: str, style: Optional[str], explode: Optional[bool] ) -> Generator[Optional[Callable], None, None]: if type_ == "object": if style == "deepObject": yield deep_object(name) if style is None or style == "form": if explode is False: yield comma_delimited_object(name) if explode is True: yield extracted_object(name) elif type_ == "array" and explode is False: if style == "pipeDelimited": yield delimited(name, delimiter="|") if style == "spaceDelimited": yield delimited(name, delimiter=" ") if style is None or style == "form": yield delimited(name, delimiter=",") def _serialize_header_openapi3( name: str, type_: str, explode: Optional[bool] ) -> Generator[Optional[Callable], None, None]: yield to_string(name) if type_ == "array": yield delimited(name, delimiter=",") if type_ == "object": if explode is False: yield comma_delimited_object(name) if explode is True: yield delimited_object(name) def _serialize_cookie_openapi3( name: str, type_: str, explode: Optional[bool] ) -> Generator[Optional[Callable], None, None]: yield to_string(name) if explode and type_ in ("array", "object"): yield nothing(name) if explode is False: if type_ == "array": yield delimited(name, delimiter=",") if type_ == "object": yield comma_delimited_object(name) def _serialize_swagger2(definitions: DefinitionList) -> Generator[Optional[Callable], None, None]: for definition in definitions: name = definition["name"] collection_format = definition.get("collectionFormat", "csv") type_ = definition.get("type") if definition["in"] == "header": yield to_string(name) if type_ in ("array", "object"): if collection_format == "csv": yield delimited(name, delimiter=",") if collection_format == "ssv": yield delimited(name, delimiter=" ") if collection_format == "tsv": yield delimited(name, delimiter="\t") if collection_format == "pipes": yield delimited(name, delimiter="|") serialize_openapi3_parameters = make_serializer(_serialize_openapi3) serialize_swagger2_parameters = make_serializer(_serialize_swagger2) def conversion(func: Callable[..., None]) -> Callable: def _wrapper(name: str, **kwargs: Any) -> MapFunction: def _map(item: Generated) -> Generated: if name in (item or {}): func(item, name, **kwargs) return item return _map return _wrapper def make_delimited(data: Optional[Dict[str, Any]], delimiter: str = ",") -> str: return delimiter.join(f"{key}={value}" for key, value in force_dict(data or {}).items()) def force_iterable(value: Any) -> Union[List, Tuple]: if isinstance(value, (tuple, list)): return value return [value] def force_dict(value: Any) -> Dict: if isinstance(value, dict): return value return {"": value} @conversion def to_json(item: Generated, name: str) -> None: item[name] = json.dumps(item[name]) @conversion def delimited(item: Generated, name: str, delimiter: str) -> None: item[name] = delimiter.join(map(str, force_iterable(item[name] or ()))) @conversion
MIT License
vowpalwabbit/coba
coba/encodings.py
StringEncoder.fit
python
def fit(self, values: Sequence[Any]) -> 'StringEncoder': if self.is_fit: raise Exception("This encoder has already been fit.") return StringEncoder(is_fit=True)
Determine how to encode from given training data. Args: values: A collection of values to use for determining the encoding. Returns: An Encoder that has been fit. Remarks: See the base class for more information.
https://github.com/vowpalwabbit/coba/blob/ed5ba88eaf2a90f4113c37032e62c958d11350ab/coba/encodings.py#L96-L112
import sys import json import collections from numbers import Number from itertools import product from abc import ABC, abstractmethod from typing import Iterator, Sequence, Generic, TypeVar, Any, Dict, Tuple, Union _T_out = TypeVar('_T_out', bound=Any, covariant=True) class Encoder(Generic[_T_out], ABC): @property @abstractmethod def is_fit(self) -> bool: ... @abstractmethod def fit(self, values: Sequence[Any]) -> 'Encoder': ... @abstractmethod def encode(self, values: Sequence[Any]) -> Sequence[_T_out]: ... def fit_encode(self, values: Sequence[Any]) -> Sequence[_T_out]: if self.is_fit: return self.encode(values) else: return self.fit(values).encode(values) class StringEncoder(Encoder[str]): def __init__(self, is_fit = True) -> None: self._is_fit = is_fit @property def is_fit(self) -> bool: return self._is_fit
BSD 3-Clause New or Revised License
thefoundryvisionmongers/nuke-ml-server
Models/baseModel.py
BaseModel.get_buttons
python
def get_buttons(self): btn = {} if hasattr(self, 'buttons'): for button in self.buttons: value = getattr(self, button) assert type(value) in [bool], 'Broadcasted buttons need to be bool.' btn[button] = value return btn
Return the defined buttons of the model. To expose buttons in Nuke, self.buttons has to be filled with attribute names.
https://github.com/thefoundryvisionmongers/nuke-ml-server/blob/5dd4d04cd673c60de8093c600d6c54016fca92d6/Models/baseModel.py#L68-L79
import sys if sys.version_info.major > 2: unicode = str import numpy as np class BaseModel(object): def __init__(self): self.name = 'Base model' self.options = () self.buttons = () self.inputs = {'input': 3} self.outputs = {'output': 3} pass def inference(self, *inputs): raise NotImplementedError def get_options(self): opt = {} if hasattr(self, 'options'): for option in self.options: value = getattr(self, option) if isinstance(value, unicode): value = str(value) assert type(value) in [bool, int, float, str], 'Broadcasted options need to be one of bool, int, float, str.' opt[option] = value return opt def set_options(self, optionsDict): for name, value in optionsDict.items(): setattr(self, name, value)
Apache License 2.0
perfectbark/latex2docx
plasTeX/ConfigManager/__init__.py
ConfigManager.get_opt
python
def get_opt(self, section, option): optionstring = self[section][option].strip() if (optionstring[0] == '\'' and optionstring[-1] == '\'') or (optionstring[0] == '\"' and optionstring[-1] == '\"'): optionstring = optionstring[1:-1] return optionstring
Return the option with leading and trailing quotes removed
https://github.com/perfectbark/latex2docx/blob/e32f9dcc59cce7bea4e7b114687b2300c623d8c0/plasTeX/ConfigManager/__init__.py#L900-L906
import sys, string, re, urllib, copy, types, os from plasTeX.dictutils import ordereddict from UserList import UserList from UserDict import UserDict from textwrap import wrap __all__ = ['ConfigManager','BooleanOption','IntegerOption','CompoundOption', 'MultiOption','GenericOption','FloatOption','StringOption', 'InputFileOption','OutputFileOption','InputDirectoryOption', 'OutputDirectoryOption','CountedOption', 'BooleanArgument','IntegerArgument','CompoundArgument', 'MultiArgument','GenericArgument','FloatArgument','StringArgument', 'InputFileArgument','OutputFileArgument','InputDirectoryArgument', 'OutputDirectoryArgument','CountedArgument', 'BUILTIN','CODE','REGISTRY','CONFIG','CONFIGFILE','ENVIRON', 'ENVIRONMENT','COMMANDLINE','ALL','DEFAULTSECT', 'ON','OFF','TRUE','FALSE','YES','NO','CommandLineManager', 'GetoptError','ConfigError','NoOptionError'] DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 ON = TRUE = YES = 1 OFF = FALSE = NO = 0 TERMINAL_WIDTH = 76 MAX_NAME_WIDTH_RATIO = 0.25 PREPAD = 2 GUTTER = 4 BUILTIN = 2 CODE = 4 REGISTRY = 8 CONFIG = CONFIGFILE = 16 ENVIRON = ENVIRONMENT = 32 COMMANDLINE = 64 ALL = 0xffffff class Error(Exception): def __init__(self, msg=''): self.msg = msg Exception.__init__(self, msg) def __str__(self): return self.msg __repr__ = __str__ class GetoptError(Error): def __init__(self, msg, opt): self.msg = msg self.opt = opt Exception.__init__(self, msg, opt) def __str__(self): return self.msg __repr__ = __str__ class RequiresArgument(GetoptError): class MandatoryOption(GetoptError): class UnspecifiedArgument(GetoptError): class UnrecognizedArgument(GetoptError): class NonUniquePrefix(GetoptError): class UnknownCompoundGroup(GetoptError): def __init__(self, msg=''): GetoptError.__init__(self, msg, '') class ConfigError(Error): class NoSectionError(ConfigError): def __init__(self, section): ConfigError.__init__(self, 'No section: %s' % section) self.section = section class DuplicateSectionError(ConfigError): def __init__(self, section): ConfigError.__init__(self, "Section %s already exists" % section) self.section = section class InvalidOptionError(GetoptError, ConfigError): def __init__(self, option, value, msg='', type=''): if type: type += ' ' if not msg: msg="Invalid value for %soption `%s'" % (type, option) ConfigError.__init__(self, msg+': %s' % value) self.option = option self.value = value class NoOptionError(ConfigError): def __init__(self, option, section): ConfigError.__init__(self, "No option `%s' in section: %s" % (option, section)) self.option = option self.section = section class InterpolationError(ConfigError): def __init__(self, reference, option, section, rawval): ConfigError.__init__(self, "Bad value substitution:\n" "\tsection: [%s]\n" "\toption : %s\n" "\tkey : %s\n" "\trawval : %s\n" % (section, option, reference, rawval)) self.reference = reference self.option = option self.section = section class InterpolationDepthError(ConfigError): def __init__(self, option, section, rawval): ConfigError.__init__(self, "Value interpolation too deeply recursive:\n" "\tsection: [%s]\n" "\toption : %s\n" "\trawval : %s\n" % (section, option, rawval)) self.option = option self.section = section class ParsingError(ConfigError): def __init__(self, filename): ConfigError.__init__(self, 'File contains parsing errors: %s' % filename) self.filename = filename self.errors = [] def append(self, lineno, line): self.errors.append((lineno, line)) self.msg = self.msg + '\n\t[line %2d]: %s' % (lineno, line) class TooFewValues(GetoptError): def __init__(self, msg): GetoptError.__init__(self, msg, '') class TooManyValues(GetoptError): def __init__(self, msg): GetoptError.__init__(self, msg, '') class MissingSectionHeaderError(ParsingError): def __init__(self, filename, lineno, line): ConfigError.__init__( self, 'File contains no section headers.\nfile: %s, line: %d\n%s' % (filename, lineno, line)) self.filename = filename self.lineno = lineno self.line = line class ConfigSection(UserDict, object): def __init__(self, name, data={}): UserDict.__init__(self, data) self.name = name self.parent = None def copy(self): newcopy = self.__class__(self.name) for key, value in vars(self).items(): if key == 'data': continue setattr(newcopy, key, value) for key, value in self.data.items(): newcopy.data[key] = value.copy() return newcopy def setParent(self, parent): self.parent = parent def defaults(self): return self.parent.defaults() def __getitem__(self, key): return self.get(key) def set(self, option, value, source=BUILTIN): typemap = {str:StringOption, int:IntegerOption, float:FloatOption, list:MultiOption, tuple:MultiOption} if self.data.has_key(option): if self.data[option].source <= source: self.data[option].source = source self.data[option].setValue(value) else: if isinstance(value, GenericOption): value.setParent(self) value.name = str(option) self.data[option] = value elif type(value) in typemap.keys(): for key, opttype in typemap.items(): if isinstance(value, key): if type(value) == str and str(value).lower().strip() in ['on','off','true','false','yes','no']: opttype = BooleanOption self.data[option] = opttype(name=option, source=source) self.data[option].setParent(self) self.data[option].name = str(option) self.data[option].setValue(value) break else: raise TypeError, 'Could not find valid option type for "%s"' % value def __setitem__(self, key, value): self.set(key, value, source=BUILTIN) def getint(self, option): return int(self[option]) def getfloat(self, option): return float(self[option]) def getboolean(self, option): v = self[option] val = int(v) if val not in (0, 1): raise ValueError, 'Not a boolean: %s' % v return val def get(self, option, raw=0, vars={}): value = self.getraw(option, vars) if raw or value == None: return value if type(value) in [list, tuple]: strings = [s for s in value if isinstance(s,str) and s.find('%(')+1] if not strings: return value elif not(isinstance(value,str)) or value.find("%(") < 0: return value var_dict = self.defaults().data.copy() var_dict.update(self.data) var_dict.update(vars) if type(value) in [list, tuple]: new_values = [] for i in value: new_values.append(self.interpolate(option, var_dict, i)) return new_values else: return self.interpolate(option, var_dict, value) def interpolate(self, option, vars, rawval): value = rawval depth = 0 while depth < MAX_INTERPOLATION_DEPTH: depth = depth + 1 if value.find("%(") >= 0: try: value = value % vars except KeyError, key: raise InterpolationError(key, option, self.name, rawval) else: break if value.find("%(") >= 0: raise InterpolationDepthError(option, self.name, rawval) return value def getraw(self, option, vars={}): if vars.has_key(option): return vars[option].getValue() if self.has_key(option): return self.data[option].getValue() defaults = self.defaults() if defaults.has_key(option): return defaults.data[option].getValue() raise NoOptionError(option, self.name) def to_string(self, source=COMMANDLINE): s = '' keys = self.keys() keys.sort() for key in keys: if source & self.data[key].source: raw = self.getraw(key) option = self.data[key] if isinstance(option, MultiOption) and raw == []: continue if raw == None: continue comment = '' if option.summary: comment = option.summary if option.description: comment = option.description if comment: comment = comment.strip() % option.names() comment = comment.split('\n') s += '\n; %s\n' % '\n; '.join(comment) value = str(option).replace('\n', '\n ') if value.find('\n') + 1: value = '\n ' + value s += "%s %s %s\n" % (key, ConfigManager.OPTIONSEP, value) return s def __str__(self): return self.to_string() def __repr__(self): return self.to_string(ALL) class ConfigManager(UserDict, object): SECTCRE = re.compile( r'\[' r'(?P<header>[^]]+)' r'\]' ) OPTCRE = re.compile( r'(?P<option>[]\-[\w_.*,(){}]+)' r'[ \t]*(?P<vi>[:=])[ \t]*' r'(?P<value>.*)$' ) OPTIONSEP = '=' short_prefix = '-' long_prefix = '--' def __init__(self, defaults={}): UserDict.__init__(self) self[DEFAULTSECT] = ConfigSection(DEFAULTSECT, defaults) self.strict = 1 self._categories = {} self.unrecognized = [] def copy(self): newcopy = self.__class__() for key, value in vars(self).items(): if key == 'data': continue setattr(newcopy, key, value) for key, value in self.data.items(): newcopy.data[key] = value.copy() return newcopy def set_prefixes(cls, arg1, arg2=None): if arg1 == arg2 == None: raise ValueError, 'Short and long prefixes cannot both be None.' if arg2 is None: cls.long_prefix = arg1 cls.short_prefix = None else: cls.long_prefix = arg2 cls.short_prefix = arg1 set_prefixes = classmethod(set_prefixes) def add_help_on_option(self, category=None): self[DEFAULTSECT]['__help_on__'] = MultiOption( """ Display help on listed option names """, options = '%shelp-on' % self.long_prefix[0], category = category, callback = self.usage_on, ) def remove_help_on_option(self): try: del self[DEFAULTSECT]['__help_on__'] except: pass def add_category(self, key, title): self._categories[key] = title return key def get_category(self, key): if type(key) not in [list, tuple]: key = [key] if not key: return '' return self._categories[key[0]] def categories(self): return self._categories def set_strict(self, bool=1): self.strict = not(not(bool)) def defaults(self): return self[DEFAULTSECT] def sections(self): return self.keys() def add_section(self, section): if self.has_key(section): return self[section] self[section] = ConfigSection(section) return self[section] def has_section(self, section): return section in self.keys() def options(self, section): if self.has_key(section): return self[section].keys() else: raise NoSectionError(section) def read(self, filenames): if type(filenames) in [type(''), type(u'')]: filenames = [filenames] for filename in filenames: try: if filename.startswith('~'): filename = os.path.expanduser(filename) fp = urllib.urlopen(filename) except (OSError, IOError): continue self.__read(fp, filename) fp.close() return self def readfp(self, fp, filename=None): if filename is None: try: filename = fp.name except AttributeError: filename = '<???>' self.__read(fp, filename) return self def get(self, section, option, raw=0, vars={}): return self[section].get(option, raw, vars) def set(self, section, option, value, source=BUILTIN): if not section or section == DEFAULTSECT: sectdict = self[DEFAULTSECT] else: try: sectdict = self[section] except KeyError: raise NoSectionError(section) sectdict.set(option, value, source) def __setitem__(self, key, value): if isinstance(value, ConfigSection): self.data[key] = value self.data[key].setParent(self) else: self.data[key] = ConfigSection(str(key)) self.data[key].setParent(self) def __getitem__(self, key): if self.data.has_key(key): return self.data[key] if self.data[DEFAULTSECT].has_key(key): return self.data[DEFAULTSECT][key] raise NoSectionError(key) def getint(self, section, option): return self[section].getint(option) def getfloat(self, section, option): return self[section].getfloat(option) def getboolean(self, section, option): return self[section].get(option) def getraw(self, section, option): return self[section].getraw(option) def has_option(self, section, option): if not section: section=DEFAULTSECT elif not self.has_key(section): return 0 else: return self[section].has_key(option) def write(self, fp): fp.write(str(self)) def __str__(self): return self.to_string() def __repr__(self): return self.to_string(source=COMMANDLINE|CONFIGFILE|CODE|BUILTIN|REGISTRY|ENVIRONMENT) def to_string(self, source=COMMANDLINE|CONFIGFILE): if source & BUILTIN: func = repr else: func = str s = '' keys = [x for x in self.keys() if x != DEFAULTSECT] keys.sort() if self[DEFAULTSECT]: keys.insert(0, DEFAULTSECT) for section in keys: content = func(self[section]).strip() if content: s += "[%s]\n%s\n\n" % (section, content) return s def remove_option(self, section, option): if not section or section == DEFAULTSECT: sectdict = self[DEFAULTSECT] else: try: sectdict = self[section] except KeyError: raise NoSectionError(section) try: del sectdict[option] return 1 except KeyError: return 0 def remove_section(self, section): if self.has_key(section): del self[section] return 1 else: return 0 def __read(self, fp, fpname): cursect = None optname = None lineno = 0 e = None while 1: line = fp.readline() if not line: break lineno = lineno + 1 if line.strip() == '' or line[0] in '#;': continue if line.split()[0].lower() == 'rem' and line[0] in "rR": continue if line[0] in ' \t' and cursect is not None and optname: value = line.strip() if value and cursect.data[optname].source == CONFIGFILE: cursect.data[optname] += "%s" % value else: mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if self.has_key(sectname): cursect = self[sectname] else: cursect = ConfigSection(sectname) self[sectname] = cursect optname = None elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, `line`) else: mo = self.OPTCRE.match(line) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if vi in ('=', ':') and ';' in optval: pos = optval.find(';') if pos and optval[pos-1] in string.whitespace: optval = optval[:pos] optval = optval.strip() if optval == '""': optval = '' try: cursect.set(optname, optval, source=CONFIGFILE) cursect.data[optname].file = fpname except: print "Problem occurred in section '%s' while reading file %s." % (cursect.name, fpname) raise else: if not e: e = ParsingError(fpname) e.append(lineno, `line`) if e: raise e def get_default_option(self, option): try: return self[DEFAULTSECT][option] except KeyError: raise NoOptionError(option, DEFAULTSECT)
MIT License
spiderclub/haipproxy
haipproxy/client/py_cli.py
ProxyFetcher._refresh_periodically
python
def _refresh_periodically(self): while True: if len(self.pool) < int(2 * self.min_pool_size): self.get_proxies() time.sleep(0.2)
refresh self.pool periodically, checking rate is 10 times/second
https://github.com/spiderclub/haipproxy/blob/ab30ccf4b1d78e9304c27830006cc5800fe41bb3/haipproxy/client/py_cli.py#L180-L185
import time import threading from ..utils import get_redis_conn from ..config.rules import ( SCORE_MAPS, TTL_MAPS, SPEED_MAPS) from ..config.settings import ( TTL_VALIDATED_RESOURCE, LONGEST_RESPONSE_TIME, LOWEST_SCORE, LOWEST_TOTAL_PROXIES, DATA_ALL) from .core import IPFetcherMixin __all__ = ['ProxyFetcher'] lock = threading.RLock() class Strategy: strategy = None def check(self, strategy): return self.strategy == strategy def get_proxies_by_stragery(self, pool): raise NotImplementedError def process_feedback(self, pool, res, proxy, **kwargs): raise NotImplementedError class RobinStrategy(Strategy): def __init__(self): super().__init__() self.strategy = 'robin' def get_proxies_by_stragery(self, pool): if not pool: return None proxy = pool.pop(0) pool.append(proxy) return proxy def process_feedback(self, pool, res, proxy, **kwargs): if res == 'failure': if pool[-1] == proxy: with lock: if pool[-1] == proxy: pool.pop() return class GreedyStrategy(Strategy): def __init__(self): self.strategy = 'greedy' def get_proxies_by_stragery(self, pool): if not pool: return None return pool[0] def process_feedback(self, pool, res, proxy, **kwargs): if res == 'failure': if pool[0] == proxy: with lock: if pool[0] == proxy: pool.pop(0) return expected_time = kwargs.get('expected') real_time = kwargs.get('real') if expected_time * 1000 < real_time: pool.pop(0) pool.append(proxy) class ProxyFetcher(IPFetcherMixin): def __init__(self, usage, strategy='robin', fast_response=5, score_map=SCORE_MAPS, ttl_map=TTL_MAPS, speed_map=SPEED_MAPS, longest_response_time=LONGEST_RESPONSE_TIME, lowest_score=LOWEST_SCORE, ttl_validated_resource=TTL_VALIDATED_RESOURCE, min_pool_size=LOWEST_TOTAL_PROXIES, all_data=DATA_ALL, redis_args=None): if usage not in score_map.keys(): usage = 'https' score_queue = score_map.get(usage) ttl_queue = ttl_map.get(usage) speed_queue = speed_map.get(usage) super().__init__(score_queue, ttl_queue, speed_queue, longest_response_time, lowest_score, ttl_validated_resource, min_pool_size) self.strategy = strategy self.pool = list() self.min_pool_size = min_pool_size self.fast_response = fast_response self.all_data = all_data self.handlers = [RobinStrategy(), GreedyStrategy()] if isinstance(redis_args, dict): self.conn = get_redis_conn(**redis_args) else: self.conn = get_redis_conn() t = threading.Thread(target=self._refresh_periodically) t.setDaemon(True) t.start() def get_proxy(self): proxy = None self.refresh() for handler in self.handlers: if handler.strategy == self.strategy: proxy = handler.get_proxies_by_stragery(self.pool) return proxy def get_proxies(self): proxies = self.get_available_proxies(self.conn) print('{} proxies have been fetched'.format(len(proxies))) self.pool.extend(proxies) return self.pool def proxy_feedback(self, res, proxy, response_time=None): for handler in self.handlers: if handler.strategy == self.strategy: handler.process_feedback(self.pool, res, proxy, real=response_time, expected=self.fast_response) def refresh(self): if len(self.pool) < self.min_pool_size: self.get_proxies() def delete_proxy(self, proxy): pipe = self.conn.pipeline() pipe.srem(self.all_data, proxy) pipe.zrem(self.score_queue, proxy) pipe.zrem(self.speed_queue, proxy) pipe.zrem(self.ttl_queue, proxy) pipe.execute()
MIT License
williamsysu/textgan-pytorch
instructor/oracle_data/evogan_instructor.py
EvoGANInstructor.evolve_generator_population
python
def evolve_generator_population(self, evo_g_step): self.prepare_eval_real_data() best_score = np.zeros(cfg.n_parent) best_fit = [] best_child = [] best_child_opt = [] best_fake_samples = [] selected_mutation = [] with torch.no_grad(): real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float() if cfg.CUDA: real_samples = real_samples.cuda() self.d_out_real = self.dis(real_samples) for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_adv_opts)): self.load_gen(parent, parent_opt) self.prepare_eval_fake_data() Fq, Fd, score = self.evaluation(cfg.eval_type) best_score[i] = score best_fit.append([Fq, Fd, score]) best_child.append(copy.deepcopy(self.gen.state_dict())) best_child_opt.append(copy.deepcopy(self.gen_adv_opt.state_dict())) best_fake_samples.append(self.eval_fake_samples) target_idx = random.randint(0, len(self.parents) - 1) for j, criterionG in enumerate(self.G_criterion): self.load_gen(self.parents[target_idx], self.parent_adv_opts[target_idx]) self.variation(evo_g_step, criterionG) self.prepare_eval_fake_data() Fq, Fd, score = self.evaluation(cfg.eval_type) fit_com = score - best_score if max(fit_com) > 0: id_replace = np.where(fit_com == max(fit_com))[0][0] best_score[id_replace] = score best_fit[id_replace] = [Fq, Fd, score] best_child[id_replace] = copy.deepcopy(self.gen.state_dict()) best_child_opt[id_replace] = copy.deepcopy(self.gen_adv_opt.state_dict()) best_fake_samples[id_replace] = self.eval_fake_samples selected_mutation.append(criterionG.loss_mode) self.parents = copy.deepcopy(best_child) self.parent_adv_opts = copy.deepcopy(best_child_opt) self.best_fake_samples = torch.cat(best_fake_samples, dim=0) return best_score, np.array(best_fit), selected_mutation
1. randomly choose a parent from population; 2. variation; 3. evaluate all parents and child, choose the best
https://github.com/williamsysu/textgan-pytorch/blob/891635af6845edfee382de147faa4fc00c7e90eb/instructor/oracle_data/evogan_instructor.py#L293-L354
import copy import numpy as np import os import random import torch import torch.nn.functional as F import torch.optim as optim from tqdm import tqdm import config as cfg from instructor.oracle_data.instructor import BasicInstructor from metrics.nll import NLL from models.EvoGAN_D import EvoGAN_D from models.EvoGAN_G import EvoGAN_G from utils.data_loader import GenDataIter from utils.gan_loss import GANLoss from utils.helpers import get_fixed_temperature, get_losses, create_oracle class EvoGANInstructor(BasicInstructor): def __init__(self, opt): super(EvoGANInstructor, self).__init__(opt) self.gen = EvoGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA) self.parents = [EvoGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA).state_dict() for _ in range(cfg.n_parent)] self.dis = EvoGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA) self.init_model() self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr) self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr) self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr) self.parent_mle_opts = [copy.deepcopy(self.gen_opt.state_dict()) for _ in range(cfg.n_parent)] self.parent_adv_opts = [copy.deepcopy(self.gen_adv_opt.state_dict()) for _ in range(cfg.n_parent)] self.G_criterion = [GANLoss(loss_mode, 'G', cfg.d_type, CUDA=cfg.CUDA) for loss_mode in cfg.mu_type.split()] self.D_criterion = GANLoss(cfg.loss_type, 'D', cfg.d_type, CUDA=cfg.CUDA) def init_model(self): if cfg.oracle_pretrain: if not os.path.exists(cfg.oracle_state_dict_path): create_oracle() self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path, map_location='cuda:%d' % cfg.device)) if cfg.dis_pretrain: self.log.info( 'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path)) self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path, map_location='cuda:{}'.format(cfg.device))) if cfg.gen_pretrain: for i in range(cfg.n_parent): self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path + '%d' % i)) self.parents[i] = torch.load(cfg.pretrained_gen_path + '%d' % 0, map_location='cpu') if cfg.CUDA: self.oracle = self.oracle.cuda() self.gen = self.gen.cuda() if cfg.multi_gpu: self.dis = torch.nn.parallel.DataParallel(self.dis, device_ids=cfg.devices) self.dis = self.dis.cuda() def load_gen(self, parent, parent_opt, mle=False): self.gen.load_state_dict(copy.deepcopy(parent)) if mle: self.gen_opt.load_state_dict(copy.deepcopy(parent_opt)) self.gen_opt.zero_grad() else: self.gen_adv_opt.load_state_dict(copy.deepcopy(parent_opt)) self.gen_adv_opt.zero_grad() def _run(self): if not cfg.gen_pretrain: for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_mle_opts)): self.log.info('Starting Generator-{} MLE Training...'.format(i)) self.load_gen(parent, parent_opt, mle=True) self.pretrain_generator(cfg.MLE_train_epoch) self.parents[i] = copy.deepcopy(self.gen.state_dict()) if cfg.if_save and not cfg.if_test: torch.save(self.gen.state_dict(), cfg.pretrained_gen_path + '%d' % i) self.log.info('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path + '%d' % i)) self.log.info('Starting Adversarial Training...') progress = tqdm(range(cfg.ADV_train_epoch)) for adv_epoch in progress: if cfg.temperature == 1: score, fit_score, select_mu = self.evolve_generator(cfg.ADV_g_step) else: score, fit_score, select_mu = self.evolve_generator_with_temp(adv_epoch, cfg.ADV_g_step) d_loss = self.evolve_discriminator(cfg.ADV_d_step) best_id = int(np.argmax(score)) progress.set_description('mu: %s, d_loss = %.4f, temp = %.4f' % ( ' '.join(select_mu), d_loss, self.parents[best_id]['temperature'].item())) if adv_epoch % cfg.adv_log_step == 0 or adv_epoch == cfg.ADV_train_epoch - 1: best_id = int(np.argmax(score)) self.load_gen(self.parents[best_id], self.parent_adv_opts[best_id]) self.log.info('[ADV] epoch %d: temp = %.4f, d_loss = %.4f, %s' % ( adv_epoch, self.gen.temperature.item(), d_loss, self.cal_metrics(fmt_str=True))) if cfg.if_save and not cfg.if_test: self._save('ADV', adv_epoch) def _test(self): print('>>> Begin test...') self._run() pass def pretrain_generator(self, epochs): for epoch in range(epochs): self.sig.update() if self.sig.pre_sig: pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt) if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1: self.log.info( '[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True))) if cfg.if_save and not cfg.if_test: self._save('MLE', epoch) else: self.log.info('>>> Stop by pre signal, skip to adversarial training...') break def evolve_generator(self, evo_g_step): self.prepare_eval_real_data() best_score = np.zeros(cfg.n_parent) best_fit = [] best_child = [] best_child_opt = [] best_fake_samples = [] selected_mutation = [] count = 0 with torch.no_grad(): real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float() if cfg.CUDA: real_samples = real_samples.cuda() self.d_out_real = self.dis(real_samples) for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_adv_opts)): for j, criterionG in enumerate(self.G_criterion): self.load_gen(parent, parent_opt) self.variation(evo_g_step, criterionG) self.prepare_eval_fake_data() Fq, Fd, score = self.evaluation(cfg.eval_type) if count < cfg.n_parent: best_score[count] = score best_fit.append([Fq, Fd, score]) best_child.append(copy.deepcopy(self.gen.state_dict())) best_child_opt.append(copy.deepcopy(self.gen_adv_opt.state_dict())) best_fake_samples.append(self.eval_fake_samples) selected_mutation.append(criterionG.loss_mode) else: fit_com = score - best_score if max(fit_com) > 0: id_replace = np.where(fit_com == max(fit_com))[0][0] best_score[id_replace] = score best_fit[id_replace] = [Fq, Fd, score] best_child[id_replace] = copy.deepcopy(self.gen.state_dict()) best_child_opt[id_replace] = copy.deepcopy(self.gen_adv_opt.state_dict()) best_fake_samples[id_replace] = self.eval_fake_samples selected_mutation[id_replace] = criterionG.loss_mode count += 1 self.parents = copy.deepcopy(best_child) self.parent_adv_opts = copy.deepcopy(best_child_opt) self.best_fake_samples = torch.cat(best_fake_samples, dim=0) return best_score, np.array(best_fit), selected_mutation def evolve_generator_with_temp(self, cur_adv_step, evo_g_step): self.prepare_eval_real_data() best_score = np.zeros(cfg.n_parent) best_fit = [] best_child = [] best_child_opt = [] best_fake_samples = [] selected_mutation = [] count = 0 with torch.no_grad(): real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float() if cfg.CUDA: real_samples = real_samples.cuda() self.d_out_real = self.dis(real_samples) for i, (parent, parent_opt) in enumerate(zip(self.parents, self.parent_adv_opts)): for j, criterionG in enumerate(self.G_criterion): all_temp = self.get_evo_temp(cur_adv_step) temp_score = float('-inf') temp_fit = None temp_child = None temp_child_opt = None temp_fake_samples = None for temp in all_temp: self.load_gen(parent, parent_opt) self.gen.temperature.data = temp self.variation(evo_g_step, criterionG) self.prepare_eval_fake_data() _, _, t_score = self.evaluation('Ra') loss_Fq, loss_Fd, loss_score = self.evaluation(cfg.eval_type) if t_score > temp_score: temp_score = loss_score temp_fit = [loss_Fq, loss_Fd, loss_score] temp_child = copy.deepcopy(self.gen.state_dict()) temp_child_opt = copy.deepcopy(self.gen_adv_opt.state_dict()) temp_fake_samples = copy.deepcopy(self.eval_fake_samples) if count < cfg.n_parent: best_score[count] = temp_score best_fit.append(temp_fit) best_child.append(temp_child) best_child_opt.append(temp_child_opt) best_fake_samples.append(temp_fake_samples) selected_mutation.append(criterionG.loss_mode) else: fit_com = temp_score - best_score if max(fit_com) > 0: id_replace = np.where(fit_com == max(fit_com))[0][0] best_score[id_replace] = temp_score best_fit[id_replace] = temp_fit best_child[id_replace] = temp_child best_child_opt[id_replace] = temp_child_opt best_fake_samples[id_replace] = temp_fake_samples selected_mutation[id_replace] = criterionG.loss_mode count += 1 self.parents = copy.deepcopy(best_child) self.parent_adv_opts = copy.deepcopy(best_child_opt) self.best_fake_samples = torch.cat(best_fake_samples, dim=0) return best_score, np.array(best_fit), selected_mutation
MIT License
wind-river/crypto-detector
cryptodetector/rpm.py
_Stream._init_write_gz
python
def _init_write_gz(self): self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write("\037\213\010\010%s\002\377" % timestamp) if self.name.endswith(".gz"): self.name = self.name[:-3] self.__write(self.name + NUL)
Initialize for writing with gzip compression.
https://github.com/wind-river/crypto-detector/blob/d6156a818f57826e520ca117cc3baed55f51483c/cryptodetector/rpm.py#L290-L301
import sys import os import io import shutil import stat import errno import time import struct import copy import gzip import bz2 import lzma from functools import cmp_to_key from cryptodetector.exceptions import ExtractError, ReadError, CompressionError, StreamError, InvalidRPM, CryptoDetectorError try: import grp as GRP, pwd as PWD except ImportError: GRP = PWD = None MAGIC_NEWC = 0x070701 TRAILER_NAME = "TRAILER!!!" WORDSIZE = 4 NUL = b"\0" BLOCKSIZE = 512 HEADERSIZE_SVR4 = 110 S_IFLNK = 0o120000 S_IFREG = 0o100000 S_IFBLK = 0o060000 S_IFDIR = 0o040000 S_IFCHR = 0o020000 S_IFIFO = 0o010000 TSUID = 0o4000 TSGID = 0o2000 TSVTX = 0o1000 TUREAD = 0o400 TUWRITE = 0o200 TUEXEC = 0o100 TGREAD = 0o040 TGWRITE = 0o020 TGEXEC = 0o010 TOREAD = 0o004 TOWRITE = 0o002 TOEXEC = 0o001 def copyfileobj(src, dst, length=None): if length == 0: return if length is None: shutil.copyfileobj(src, dst) return bufsize = 16 * 1024 blocks, remainder = divmod(length, bufsize) for _ in range(blocks): buf = src.read(bufsize) if len(buf) < bufsize: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return FILEMODE_TABLE = ( ((S_IFLNK, "l"), (S_IFREG, "-"), (S_IFBLK, "b"), (S_IFDIR, "d"), (S_IFCHR, "c"), (S_IFIFO, "p")), ((TUREAD, "r"),), ((TUWRITE, "w"),), ((TUEXEC|TSUID, "s"), (TSUID, "S"), (TUEXEC, "x")), ((TGREAD, "r"),), ((TGWRITE, "w"),), ((TGEXEC|TSGID, "s"), (TSGID, "S"), (TGEXEC, "x")), ((TOREAD, "r"),), ((TOWRITE, "w"),), ((TOEXEC|TSVTX, "t"), (TSVTX, "T"), (TOEXEC, "x")) ) def filemode(mode): perm = [] for table in FILEMODE_TABLE: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm) def normpath(path): if os.sep != "/": return os.path.normpath(path).replace(os.sep, "/") else: return os.path.normpath(path) class _LowLevelFile(object): def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if not hasattr(os, "O_BINARY"): os.O_BINARY = 0 mode |= os.O_BINARY self.fd = os.open(name, mode) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream(object): def __init__(self, name, mode, comptype, fileobj, bufsize): self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = "" self.pos = 0 self.closed = False if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") self.zlib = zlib self.crc = zlib.crc32("") if mode == "r": self._init_read_gz() else: self._init_write_gz() if comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if mode == "r": self.dbuf = "" self.cmp = bz2.BZ2Decompressor() else: self.cmp = bz2.BZ2Compressor() if comptype == "xz": try: import lzma except ImportError: raise CompressionError("lzma module is not available") if mode == "r": self.dbuf = "" self.cmp = lzma.LZMADecompressor() else: self.cmp = lzma.LZMACompressor() def __del__(self): if hasattr(self, "closed") and not self.closed: self.close()
Apache License 2.0
eric3911/mini_ssd
object_detection/core/model.py
DetectionModel.postprocess
python
def postprocess(self, prediction_dict, true_image_shapes, **params): pass
Convert predicted output tensors to final detections. Outputs adhere to the following conventions: * Classes are integers in [0, num_classes); background classes are removed and the first non-background class is mapped to 0. If the model produces class-agnostic detections, then no output is produced for classes. * Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max] format and normalized relative to the image window. * `num_detections` is provided for settings where detections are padded to a fixed number of boxes. * We do not specifically assume any kind of probabilistic interpretation of the scores --- the only important thing is their relative ordering. Thus implementations of the postprocess function are free to output logits, probabilities, calibrated probabilities, or anything else. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detections, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] (If a model is producing class-agnostic detections, this field may be missing) instance_masks: [batch, max_detections, image_height, image_width] (optional) keypoints: [batch, max_detections, num_keypoints, 2] (optional) num_detections: [batch]
https://github.com/eric3911/mini_ssd/blob/6fb6e1bce3ab6e4adb832b37e78325803c7424b6/object_detection/core/model.py#L176-L213
from abc import ABCMeta from abc import abstractmethod from object_detection.core import standard_fields as fields class DetectionModel(object): __metaclass__ = ABCMeta def __init__(self, num_classes): self._num_classes = num_classes self._groundtruth_lists = {} @property def num_classes(self): return self._num_classes def groundtruth_lists(self, field): if field not in self._groundtruth_lists: raise RuntimeError('Groundtruth tensor %s has not been provided', field) return self._groundtruth_lists[field] def groundtruth_has_field(self, field): return field in self._groundtruth_lists @abstractmethod def preprocess(self, inputs): pass @abstractmethod def predict(self, preprocessed_inputs, true_image_shapes): pass @abstractmethod
MIT License
arx-game/arxcode
server/conf/lockfuncs.py
practitioner
python
def practitioner(accessing_obj, accessed_obj, *args, **kwargs): mage = Practitioner.practitioner_for_character(accessing_obj) if not mage: return False return mage.eyes_open
Checks if the accessing_obj has a magical Practitioner record.
https://github.com/arx-game/arxcode/blob/5299f1f75c4ee5ee19e1a26195aa24832f7ca817/server/conf/lockfuncs.py#L254-L262
from world.dominion.models import Organization, Member from world.magic.models import Practitioner def rank(accessing_obj, accessed_obj, *args, **kwargs): if not args: return False if accessing_obj.player_ob: accessing_obj = accessing_obj.player_ob if hasattr(accessing_obj, "dbobj"): accessing_obj = accessing_obj.dbobj try: rank_num = int(args[0]) except ValueError: if len(args) == 1: return organization(accessing_obj, accessed_obj, *args, **kwargs) args = args[::-1] try: rank_num = int(args[0]) except (ValueError, TypeError): print("Malformed lock 'rank' in %s." % accessed_obj) return False if len(args) == 1: org_obj = accessed_obj else: try: org_obj = Organization.objects.get(name__iexact=args[1]) except Organization.DoesNotExist: return False try: member = accessing_obj.Dominion.memberships.get( organization=org_obj, deguilded=False ) return member.rank <= rank_num except (AttributeError, Member.DoesNotExist): return False def organization(accessing_obj, accessed_obj, *args, **kwargs): if not args: return False try: if accessing_obj.player_ob: accessing_obj = accessing_obj.player_ob except AttributeError: pass if hasattr(accessing_obj, "dbobj"): accessing_obj = accessing_obj.dbobj try: org_obj = Organization.objects.get(name__iexact=args[0]) except Organization.DoesNotExist: return False try: accessing_obj.Dominion.memberships.get(organization=org_obj, deguilded=False) return True except (AttributeError, Member.DoesNotExist): return False org = organization def ability(accessing_obj, accessed_obj, *args, **kwargs): if not args: return False if len(args) == 1: if args[0] == "all": return True name = accessed_obj.ability val = int(args[0]) else: name = args[0] val = int(args[1]) if name == "all": from world.traits.models import Trait ability_list = Trait.get_valid_ability_names(Trait.CRAFTING) else: ability_list = name.split(",") for ability_name in ability_list: ability_name = ability_name.lower().strip() try: pab = accessing_obj.traits.get_ability_value(ability_name) except AttributeError: return False if pab >= val: return True return False def skill(accessing_obj, accessed_obj, *args, **kwargs): if not args: return False if len(args) == 1: if args[0] == "all": return True name = accessed_obj.skill val = int(args[0]) else: name = args[0] val = int(args[1]) if name == "all": from world.traits.models import Trait skill_list = Trait.get_valid_skill_names(Trait.CRAFTING) else: skill_list = name.split(",") if accessing_obj.char_ob: accessing_obj = accessing_obj.char_ob for skill_name in skill_list: skill_name = skill_name.lower().strip() try: pab = accessing_obj.traits.get_skill_value(skill_name) if pab >= val: return True except AttributeError: return False return False def roomkey(accessing_obj, accessed_obj, *args, **kwargs): if not args: return False roomid = int(args[0]) try: return accessing_obj.item_data.has_key_by_id(roomid) except AttributeError: return False def chestkey(accessing_obj, accessed_obj, *args, **kwargs): return roomkey(accessing_obj, accessed_obj, *args, **kwargs) def cattr(accessing_obj, accessed_obj, *args, **kwargs): from evennia.locks.lockfuncs import attr try: if accessing_obj.player_ob: return attr(accessing_obj, accessed_obj, *args, **kwargs) char_ob = accessing_obj.char_ob return attr(char_ob, accessed_obj, *args, **kwargs) except Exception: return False def decorator(accessing_obj, accessed_obj, *args, **kwargs): obj = accessed_obj.location or accessed_obj try: if accessing_obj in obj.homeowners: return True return accessing_obj in obj.decorators except (AttributeError, ValueError, TypeError): return False decorators = decorator
MIT License
z-x-yang/gct
TensorFlow/preprocessing.py
train_image
python
def train_image(image_buffer, height, width, bbox, batch_position, resize_method, distortions, scope=None, summary_verbosity=0, distort_color_in_yiq=False, fuse_decode_and_crop=False): with tf.name_scope(scope or 'distort_image'): sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( tf.image.extract_jpeg_shape(image_buffer), bounding_boxes=bbox, min_object_covered=0.1, aspect_ratio_range=[0.75, 1.33], area_range=[0.05, 1.0], max_attempts=100, use_image_if_no_bounding_boxes=True) bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box if summary_verbosity >= 3: image = tf.image.decode_jpeg(image_buffer, channels=3, dct_method='INTEGER_FAST') image = tf.image.convert_image_dtype(image, dtype=tf.float32) image_with_distorted_box = tf.image.draw_bounding_boxes( tf.expand_dims(image, 0), distort_bbox) tf.summary.image( 'images_with_distorted_bounding_box', image_with_distorted_box) if fuse_decode_and_crop: offset_y, offset_x, _ = tf.unstack(bbox_begin) target_height, target_width, _ = tf.unstack(bbox_size) crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) image = tf.image.decode_and_crop_jpeg( image_buffer, crop_window, channels=3) else: image = tf.image.decode_jpeg(image_buffer, channels=3, dct_method='INTEGER_FAST') image = tf.slice(image, bbox_begin, bbox_size) distorted_image = tf.image.random_flip_left_right(image) image_resize_method = get_image_resize_method(resize_method, batch_position) distorted_image = tf.image.resize_images( distorted_image, [height, width], image_resize_method, align_corners=False) distorted_image.set_shape([height, width, 3]) if summary_verbosity >= 3: tf.summary.image('cropped_resized_maybe_flipped_image', tf.expand_dims(distorted_image, 0)) if distortions: distorted_image = tf.cast(distorted_image, dtype=tf.float32) distorted_image /= 255. distorted_image = distort_color(distorted_image, batch_position, distort_color_in_yiq=distort_color_in_yiq) distorted_image *= 255 if summary_verbosity >= 3: tf.summary.image( 'final_distorted_image', tf.expand_dims(distorted_image, 0)) return distorted_image
Distort one image for training a network. Distorting images provides a useful technique for augmenting the data set during training in order to make the network invariant to aspects of the image that do not effect the label. Args: image_buffer: scalar string Tensor representing the raw JPEG image buffer. height: integer width: integer bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. batch_position: position of the image in a batch, which affects how images are distorted and resized. NOTE: this argument can be an integer or a tensor resize_method: round_robin, nearest, bilinear, bicubic, or area. distortions: If true, apply full distortions for image colors. scope: Optional scope for op_scope. summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both summaries and checkpoints. distort_color_in_yiq: distort color of input images in YIQ space. fuse_decode_and_crop: fuse the decode/crop operation. Returns: 3-D float Tensor of distorted image used for training.
https://github.com/z-x-yang/gct/blob/68983edd87f8cfbe709b1b51214c69eb9c81abd7/TensorFlow/preprocessing.py#L262-L371
import math from six.moves import xrange import tensorflow as tf from tensorflow.contrib.image.python.ops import distort_image_ops from tensorflow.python.layers import utils from tensorflow.python.ops import data_flow_ops import cnn_util import data_utils def parse_example_proto(example_serialized): feature_map = { 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), } sparse_float32 = tf.VarLenFeature(dtype=tf.float32) feature_map.update( {k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) features = tf.parse_single_example(example_serialized, feature_map) label = tf.cast(features['image/class/label'], dtype=tf.int32) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) bbox = tf.concat([ymin, xmin, ymax, xmax], 0) bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) return features['image/encoded'], label, bbox, features['image/class/text'] _RESIZE_METHOD_MAP = { 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, 'bilinear': tf.image.ResizeMethod.BILINEAR, 'bicubic': tf.image.ResizeMethod.BICUBIC, 'area': tf.image.ResizeMethod.AREA } def get_image_resize_method(resize_method, batch_position=0): if resize_method != 'round_robin': return _RESIZE_METHOD_MAP[resize_method] resize_methods = list(_RESIZE_METHOD_MAP.values()) def lookup(index): return resize_methods[index] def resize_method_0(): return utils.smart_cond(batch_position % len(resize_methods) == 0, lambda: lookup(0), resize_method_1) def resize_method_1(): return utils.smart_cond(batch_position % len(resize_methods) == 1, lambda: lookup(1), resize_method_2) def resize_method_2(): return utils.smart_cond(batch_position % len(resize_methods) == 2, lambda: lookup(2), lambda: lookup(3)) return resize_method_0() def decode_jpeg(image_buffer, scope=None): with tf.name_scope(scope or 'decode_jpeg'): image = tf.image.decode_jpeg(image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') return image def normalized_image(images): images = tf.multiply(images, 1. / 127.5) return tf.subtract(images, 1.0) def eval_image(image, height, width, batch_position, resize_method, summary_verbosity=0): with tf.name_scope('eval_image'): if summary_verbosity >= 3: tf.summary.image( 'original_image', tf.expand_dims(image, 0)) shape = tf.shape(image) image_height = shape[0] image_width = shape[1] image_height_float = tf.cast(image_height, tf.float32) image_width_float = tf.cast(image_width, tf.float32) scale_factor = 1.15 max_ratio = tf.maximum(height / image_height_float, width / image_width_float) resize_height = tf.cast(image_height_float * max_ratio * scale_factor, tf.int32) resize_width = tf.cast(image_width_float * max_ratio * scale_factor, tf.int32) image_resize_method = get_image_resize_method(resize_method, batch_position) distorted_image = tf.image.resize_images(image, [resize_height, resize_width], image_resize_method, align_corners=False) total_crop_height = (resize_height - height) crop_top = total_crop_height // 2 total_crop_width = (resize_width - width) crop_left = total_crop_width // 2 distorted_image = tf.slice(distorted_image, [crop_top, crop_left, 0], [height, width, 3]) distorted_image.set_shape([height, width, 3]) if summary_verbosity >= 3: tf.summary.image( 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) image = distorted_image return image
MIT License
datadotworld/data.world-py
datadotworld/client/_swagger/models/paginated_metadata_resource_results.py
PaginatedMetadataResourceResults.__eq__
python
def __eq__(self, other): if not isinstance(other, PaginatedMetadataResourceResults): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/datadotworld/data.world-py/blob/7e5f474b655f4f0c88cc6862353e4d52c0e0bb31/datadotworld/client/_swagger/models/paginated_metadata_resource_results.py#L166-L173
from pprint import pformat from six import iteritems import re class PaginatedMetadataResourceResults(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'count': 'int', 'records': 'list[MetadataResourceDto]', 'next_page_token': 'str' } attribute_map = { 'count': 'count', 'records': 'records', 'next_page_token': 'nextPageToken' } def __init__(self, count=None, records=None, next_page_token=None): self._count = None self._records = None self._next_page_token = None self.count = count self.records = records if next_page_token is not None: self.next_page_token = next_page_token @property def count(self): return self._count @count.setter def count(self, count): if count is None: raise ValueError("Invalid value for `count`, must not be `None`") if count is not None and count < 0: raise ValueError("Invalid value for `count`, must be a value greater than or equal to `0`") self._count = count @property def records(self): return self._records @records.setter def records(self, records): if records is None: raise ValueError("Invalid value for `records`, must not be `None`") self._records = records @property def next_page_token(self): return self._next_page_token @next_page_token.setter def next_page_token(self, next_page_token): self._next_page_token = next_page_token def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pformat(self.to_dict()) def __repr__(self): return self.to_str()
Apache License 2.0
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor_package/google/protobuf/internal/test_util.py
ExpectAllFieldsAndExtensionsInOrder
python
def ExpectAllFieldsAndExtensionsInOrder(serialized): my_extension_int = unittest_pb2.my_extension_int my_extension_string = unittest_pb2.my_extension_string expected_strings = [] message = unittest_pb2.TestFieldOrderings() message.my_int = 1 expected_strings.append(message.SerializeToString()) message.Clear() message.Extensions[my_extension_int] = 23 expected_strings.append(message.SerializeToString()) message.Clear() message.my_string = 'foo' expected_strings.append(message.SerializeToString()) message.Clear() message.Extensions[my_extension_string] = 'bar' expected_strings.append(message.SerializeToString()) message.Clear() message.my_float = 1.0 expected_strings.append(message.SerializeToString()) message.Clear() expected = b''.join(expected_strings) if expected != serialized: raise ValueError('Expected %r, found %r' % (expected, serialized))
Ensures that serialized is the serialization we expect for a message filled with SetAllFieldsAndExtensions(). (Specifically, ensures that the serialization is in canonical, tag-number order).
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor_package/google/protobuf/internal/test_util.py#L378-L405
__author__ = '[email protected] (Will Robinson)' import numbers import operator import os.path from google.protobuf import unittest_import_pb2 from google.protobuf import unittest_pb2 try: long except NameError: long = int def IsProto2(message): return message.DESCRIPTOR.syntax == "proto2" def SetAllNonLazyFields(message): message.optional_int32 = 101 message.optional_int64 = 102 message.optional_uint32 = 103 message.optional_uint64 = 104 message.optional_sint32 = 105 message.optional_sint64 = 106 message.optional_fixed32 = 107 message.optional_fixed64 = 108 message.optional_sfixed32 = 109 message.optional_sfixed64 = 110 message.optional_float = 111 message.optional_double = 112 message.optional_bool = True message.optional_string = u'115' message.optional_bytes = b'116' if IsProto2(message): message.optionalgroup.a = 117 message.optional_nested_message.bb = 118 message.optional_foreign_message.c = 119 message.optional_import_message.d = 120 message.optional_public_import_message.e = 126 message.optional_nested_enum = unittest_pb2.TestAllTypes.BAZ message.optional_foreign_enum = unittest_pb2.FOREIGN_BAZ if IsProto2(message): message.optional_import_enum = unittest_import_pb2.IMPORT_BAZ message.optional_string_piece = u'124' message.optional_cord = u'125' message.repeated_int32.append(201) message.repeated_int64.append(202) message.repeated_uint32.append(203) message.repeated_uint64.append(204) message.repeated_sint32.append(205) message.repeated_sint64.append(206) message.repeated_fixed32.append(207) message.repeated_fixed64.append(208) message.repeated_sfixed32.append(209) message.repeated_sfixed64.append(210) message.repeated_float.append(211) message.repeated_double.append(212) message.repeated_bool.append(True) message.repeated_string.append(u'215') message.repeated_bytes.append(b'216') if IsProto2(message): message.repeatedgroup.add().a = 217 message.repeated_nested_message.add().bb = 218 message.repeated_foreign_message.add().c = 219 message.repeated_import_message.add().d = 220 message.repeated_lazy_message.add().bb = 227 message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR) message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAR) if IsProto2(message): message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAR) message.repeated_string_piece.append(u'224') message.repeated_cord.append(u'225') message.repeated_int32.append(0) message.repeated_int64.append(0) message.repeated_uint32.append(0) message.repeated_uint64.append(0) message.repeated_sint32.append(0) message.repeated_sint64.append(0) message.repeated_fixed32.append(0) message.repeated_fixed64.append(0) message.repeated_sfixed32.append(0) message.repeated_sfixed64.append(0) message.repeated_float.append(0) message.repeated_double.append(0) message.repeated_bool.append(True) message.repeated_string.append(u'0') message.repeated_bytes.append(b'0') message.repeated_int32[1] = 301 message.repeated_int64[1] = 302 message.repeated_uint32[1] = 303 message.repeated_uint64[1] = 304 message.repeated_sint32[1] = 305 message.repeated_sint64[1] = 306 message.repeated_fixed32[1] = 307 message.repeated_fixed64[1] = 308 message.repeated_sfixed32[1] = 309 message.repeated_sfixed64[1] = 310 message.repeated_float[1] = 311 message.repeated_double[1] = 312 message.repeated_bool[1] = False message.repeated_string[1] = u'315' message.repeated_bytes[1] = b'316' if IsProto2(message): message.repeatedgroup.add().a = 317 message.repeated_nested_message.add().bb = 318 message.repeated_foreign_message.add().c = 319 message.repeated_import_message.add().d = 320 message.repeated_lazy_message.add().bb = 327 message.repeated_nested_enum.append(unittest_pb2.TestAllTypes.BAR) message.repeated_nested_enum[1] = unittest_pb2.TestAllTypes.BAZ message.repeated_foreign_enum.append(unittest_pb2.FOREIGN_BAZ) if IsProto2(message): message.repeated_import_enum.append(unittest_import_pb2.IMPORT_BAZ) message.repeated_string_piece.append(u'324') message.repeated_cord.append(u'325') if IsProto2(message): message.default_int32 = 401 message.default_int64 = 402 message.default_uint32 = 403 message.default_uint64 = 404 message.default_sint32 = 405 message.default_sint64 = 406 message.default_fixed32 = 407 message.default_fixed64 = 408 message.default_sfixed32 = 409 message.default_sfixed64 = 410 message.default_float = 411 message.default_double = 412 message.default_bool = False message.default_string = '415' message.default_bytes = b'416' message.default_nested_enum = unittest_pb2.TestAllTypes.FOO message.default_foreign_enum = unittest_pb2.FOREIGN_FOO message.default_import_enum = unittest_import_pb2.IMPORT_FOO message.default_string_piece = '424' message.default_cord = '425' message.oneof_uint32 = 601 message.oneof_nested_message.bb = 602 message.oneof_string = '603' message.oneof_bytes = b'604' def SetAllFields(message): SetAllNonLazyFields(message) message.optional_lazy_message.bb = 127 def SetAllExtensions(message): extensions = message.Extensions pb2 = unittest_pb2 import_pb2 = unittest_import_pb2 extensions[pb2.optional_int32_extension] = 101 extensions[pb2.optional_int64_extension] = 102 extensions[pb2.optional_uint32_extension] = 103 extensions[pb2.optional_uint64_extension] = 104 extensions[pb2.optional_sint32_extension] = 105 extensions[pb2.optional_sint64_extension] = 106 extensions[pb2.optional_fixed32_extension] = 107 extensions[pb2.optional_fixed64_extension] = 108 extensions[pb2.optional_sfixed32_extension] = 109 extensions[pb2.optional_sfixed64_extension] = 110 extensions[pb2.optional_float_extension] = 111 extensions[pb2.optional_double_extension] = 112 extensions[pb2.optional_bool_extension] = True extensions[pb2.optional_string_extension] = u'115' extensions[pb2.optional_bytes_extension] = b'116' extensions[pb2.optionalgroup_extension].a = 117 extensions[pb2.optional_nested_message_extension].bb = 118 extensions[pb2.optional_foreign_message_extension].c = 119 extensions[pb2.optional_import_message_extension].d = 120 extensions[pb2.optional_public_import_message_extension].e = 126 extensions[pb2.optional_lazy_message_extension].bb = 127 extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ extensions[pb2.optional_nested_enum_extension] = pb2.TestAllTypes.BAZ extensions[pb2.optional_foreign_enum_extension] = pb2.FOREIGN_BAZ extensions[pb2.optional_import_enum_extension] = import_pb2.IMPORT_BAZ extensions[pb2.optional_string_piece_extension] = u'124' extensions[pb2.optional_cord_extension] = u'125' extensions[pb2.repeated_int32_extension].append(201) extensions[pb2.repeated_int64_extension].append(202) extensions[pb2.repeated_uint32_extension].append(203) extensions[pb2.repeated_uint64_extension].append(204) extensions[pb2.repeated_sint32_extension].append(205) extensions[pb2.repeated_sint64_extension].append(206) extensions[pb2.repeated_fixed32_extension].append(207) extensions[pb2.repeated_fixed64_extension].append(208) extensions[pb2.repeated_sfixed32_extension].append(209) extensions[pb2.repeated_sfixed64_extension].append(210) extensions[pb2.repeated_float_extension].append(211) extensions[pb2.repeated_double_extension].append(212) extensions[pb2.repeated_bool_extension].append(True) extensions[pb2.repeated_string_extension].append(u'215') extensions[pb2.repeated_bytes_extension].append(b'216') extensions[pb2.repeatedgroup_extension].add().a = 217 extensions[pb2.repeated_nested_message_extension].add().bb = 218 extensions[pb2.repeated_foreign_message_extension].add().c = 219 extensions[pb2.repeated_import_message_extension].add().d = 220 extensions[pb2.repeated_lazy_message_extension].add().bb = 227 extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAR) extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAR) extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAR) extensions[pb2.repeated_string_piece_extension].append(u'224') extensions[pb2.repeated_cord_extension].append(u'225') extensions[pb2.repeated_int32_extension].append(301) extensions[pb2.repeated_int64_extension].append(302) extensions[pb2.repeated_uint32_extension].append(303) extensions[pb2.repeated_uint64_extension].append(304) extensions[pb2.repeated_sint32_extension].append(305) extensions[pb2.repeated_sint64_extension].append(306) extensions[pb2.repeated_fixed32_extension].append(307) extensions[pb2.repeated_fixed64_extension].append(308) extensions[pb2.repeated_sfixed32_extension].append(309) extensions[pb2.repeated_sfixed64_extension].append(310) extensions[pb2.repeated_float_extension].append(311) extensions[pb2.repeated_double_extension].append(312) extensions[pb2.repeated_bool_extension].append(False) extensions[pb2.repeated_string_extension].append(u'315') extensions[pb2.repeated_bytes_extension].append(b'316') extensions[pb2.repeatedgroup_extension].add().a = 317 extensions[pb2.repeated_nested_message_extension].add().bb = 318 extensions[pb2.repeated_foreign_message_extension].add().c = 319 extensions[pb2.repeated_import_message_extension].add().d = 320 extensions[pb2.repeated_lazy_message_extension].add().bb = 327 extensions[pb2.repeated_nested_enum_extension].append(pb2.TestAllTypes.BAZ) extensions[pb2.repeated_foreign_enum_extension].append(pb2.FOREIGN_BAZ) extensions[pb2.repeated_import_enum_extension].append(import_pb2.IMPORT_BAZ) extensions[pb2.repeated_string_piece_extension].append(u'324') extensions[pb2.repeated_cord_extension].append(u'325') extensions[pb2.default_int32_extension] = 401 extensions[pb2.default_int64_extension] = 402 extensions[pb2.default_uint32_extension] = 403 extensions[pb2.default_uint64_extension] = 404 extensions[pb2.default_sint32_extension] = 405 extensions[pb2.default_sint64_extension] = 406 extensions[pb2.default_fixed32_extension] = 407 extensions[pb2.default_fixed64_extension] = 408 extensions[pb2.default_sfixed32_extension] = 409 extensions[pb2.default_sfixed64_extension] = 410 extensions[pb2.default_float_extension] = 411 extensions[pb2.default_double_extension] = 412 extensions[pb2.default_bool_extension] = False extensions[pb2.default_string_extension] = u'415' extensions[pb2.default_bytes_extension] = b'416' extensions[pb2.default_nested_enum_extension] = pb2.TestAllTypes.FOO extensions[pb2.default_foreign_enum_extension] = pb2.FOREIGN_FOO extensions[pb2.default_import_enum_extension] = import_pb2.IMPORT_FOO extensions[pb2.default_string_piece_extension] = u'424' extensions[pb2.default_cord_extension] = '425' extensions[pb2.oneof_uint32_extension] = 601 extensions[pb2.oneof_nested_message_extension].bb = 602 extensions[pb2.oneof_string_extension] = u'603' extensions[pb2.oneof_bytes_extension] = b'604' def SetAllFieldsAndExtensions(message): message.my_int = 1 message.my_string = 'foo' message.my_float = 1.0 message.Extensions[unittest_pb2.my_extension_int] = 23 message.Extensions[unittest_pb2.my_extension_string] = 'bar'
Apache License 2.0
bungnoid/gltools
utils/mathUtils.py
distributeValue
python
def distributeValue(samples,spacing=1.0,rangeStart=0.0,rangeEnd=1.0): vList = [rangeStart] vDist = abs(rangeEnd - rangeStart) unit = 1.0 factor = 1.0 for i in range(samples-2): unit += factor * spacing factor *= spacing unit = vDist/unit totalUnit = unit for i in range(samples-2): multFactor = totalUnit/vDist vList.append(rangeStart-((rangeStart - rangeEnd) * multFactor)) unit *= spacing totalUnit += unit vList.append(rangeEnd) return vList
Returns a list of values distributed between a start and end range @param samples: Number of values to sample across the value range @type samples: int @param spacing: Incremental scale for each sample distance @type spacing: float @param rangeStart: Minimum value in the sample range @type rangeStart: float @param rangeEnd: Maximum value in the sample range @type rangeEnd: float
https://github.com/bungnoid/gltools/blob/8ff0899de43784a18bd4543285655e68e28fb5e5/utils/mathUtils.py#L144-L180
import maya.cmds as mc import maya.OpenMaya as OpenMaya def isEqual(x,y,tolerance=0.00001): return abs(x-y) < tolerance def mag(vector=(0,0,0)): return OpenMaya.MVector(vector[0],vector[1],vector[2]).length() def normalizeVector(vector=(0,0,0)): normal = OpenMaya.MVector(vector[0],vector[1],vector[2]).normal() return (normal.x,normal.y,normal.z) def dotProduct(vector1=(0.0,0.0,0.0),vector2=(0.0,0.0,0.0)): vec1 = OpenMaya.MVector(vector1[0],vector1[1],vector1[2]) vec2 = OpenMaya.MVector(vector2[0],vector2[1],vector2[2]) return vec1 * vec2 def distanceBetween(point1=[0.0,0.0,0.0],point2=[0.0,0.0,0.0]): pnt1 = OpenMaya.MPoint(point1[0],point1[1],point1[2],1.0) pnt2 = OpenMaya.MPoint(point2[0],point2[1],point2[2],1.0) return OpenMaya.MVector(pnt1-pnt2).length() def offsetVector(point1=(0.0,0.0,0.0),point2=(0.0,0.0,0.0)): pnt1 = OpenMaya.MPoint(point1[0],point1[1],point1[2],1.0) pnt2 = OpenMaya.MPoint(point2[0],point2[1],point2[2],1.0) vec = pnt2 - pnt1 return (vec.x,vec.y,vec.z) def crossProduct(vector1=(0.0,0.0,0.0),vector2=(0.0,0.0,0.0)): vec1 = OpenMaya.MVector(vector1[0],vector1[1],vector1[2]) vec2 = OpenMaya.MVector(vector2[0],vector2[1],vector2[2]) crossProduct = vec1 ^ vec2 return (crossProduct.x,crossProduct.y,crossProduct.z) def averagePosition(pos1=(0.0,0.0,0.0),pos2=(0.0,0.0,0.0),weight=0.5): return (pos1[0]+((pos2[0]-pos1[0])*weight),pos1[1]+((pos2[1]-pos1[1])*weight),pos1[2]+((pos2[2]-pos1[2])*weight)) def closestPointOnLine(pt,lineA,lineB,clampSegment=False): ptOffset = offsetVector(lineA,pt) lineOffset = offsetVector(lineA,lineB) dot = dotProduct(ptOffset,lineOffset) if clampSegment: if dot < 0.0: return lineA if dot > 1.0: return lineB return [lineA[0]+(lineOffset[0]*dot),lineA[1]+(lineOffset[1]*dot),lineA[2]+(lineOffset[2]*dot)] def smoothStep(value,rangeStart=0.0,rangeEnd=1.0,smooth=1.0): rangeVal = rangeEnd - rangeStart nValue = value / rangeVal sValue = pow(nValue,2) * (3-(nValue*2)) sValue = nValue + ((sValue-nValue)*smooth) value = rangeStart + (rangeVal * sValue) return value
MIT License
openstack/vitrage
vitrage/tests/base.py
BaseTest.assert_timestamp_equal
python
def assert_timestamp_equal(self, first, second, msg=None): return self.assertAlmostEqual(timeutils.delta_seconds(first, second), 0.0, places=5, msg=msg)
Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values.
https://github.com/openstack/vitrage/blob/95b33dbf39b040e23915882a2879c87aec239ca9/vitrage/tests/base.py#L76-L85
import logging import os from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_utils import timeutils from oslotest import base import sys from testtools import matchers from testtools.matchers import HasLength from vitrage.common import config CONF = cfg.CONF IsEmpty = lambda: HasLength(0) class BaseTest(base.BaseTestCase): def conf_reregister_opts(self, opts, group=None): self.conf.reset() if group in self.conf: for opt in opts: self.conf.unregister_opt(opt, group=group) self.conf.register_opts(opts, group=group) def unregister_opts(): self.conf.reset() for opt in opts: self.conf.unregister_opt(opt, group=group) self.addCleanup(unregister_opts) def setUp(self): super(BaseTest, self).setUp() self.cfg_fixture = self.useFixture( config_fixture.Config(CONF)) config.parse_config([]) logging.disable(logging.CRITICAL) self.conf = self.cfg_fixture.conf def config(self, **kw): self.cfg_fixture.config(**kw) def assert_list_equal(self, l1, l2, message=None): if tuple(sys.version_info)[0:2] < (2, 7): self.assertEqual(l1, l2, message) else: super(BaseTest, self).assertListEqual(l1, l2, message) def assert_dict_equal(self, d1, d2, message=None): if tuple(sys.version_info)[0:2] < (2, 7): self.assertEqual(d1, d2) else: super(BaseTest, self).assertDictEqual(d1, d2, message)
Apache License 2.0
ambitioninc/django-entity-event
entity_event/models.py
Unsubscription.__str__
python
def __str__(self): s = '{entity} from {source} by {medium}' entity = self.entity.__str__() source = self.source.__str__() medium = self.medium.__str__() return s.format(entity=entity, source=source, medium=medium)
Readable representation of ``Unsubscription`` objects.
https://github.com/ambitioninc/django-entity-event/blob/5b48bbe2fa3c80cbac01a62768df269635a1e36a/entity_event/models.py#L769-L777
from collections import defaultdict from datetime import datetime from operator import or_ from six.moves import reduce from cached_property import cached_property from django.contrib.postgres.fields import JSONField from django.core.serializers.json import DjangoJSONEncoder from django.db import models, transaction from django.db.models import Q from django.db.models.query import QuerySet from django.template.loader import render_to_string from django.template import Context, Template from entity.models import Entity, EntityRelationship from entity_event.context_serializer import DefaultContextSerializer class Medium(models.Model): name = models.CharField(max_length=256, unique=True) display_name = models.CharField(max_length=256) description = models.TextField() time_created = models.DateTimeField(auto_now_add=True) rendering_style = models.ForeignKey('entity_event.RenderingStyle', null=True, on_delete=models.CASCADE) additional_context = JSONField(null=True, default=None, encoder=DjangoJSONEncoder) def __str__(self): return self.display_name @transaction.atomic def events(self, **event_filters): events = self.get_filtered_events(**event_filters) subscriptions = Subscription.objects.cache_related().filter( medium=self ) subscription_q_objects = [ Q( eventactor__entity__in=self.followed_by(sub.subscribed_entities()), source_id=sub.source_id ) for sub in subscriptions if sub.only_following ] subscription_q_objects.append( Q(source_id__in=[ sub.source_id for sub in subscriptions if not sub.only_following ]) ) events = events.cache_related().filter(reduce(or_, subscription_q_objects)) return events @transaction.atomic def entity_events(self, entity, **event_filters): events = self.get_filtered_events(**event_filters) subscriptions = Subscription.objects.filter(medium=self) subscriptions = self.subset_subscriptions(subscriptions, entity) subscription_q_objects = [ Q( eventactor__entity__in=self.followed_by(entity), source_id=sub.source_id ) for sub in subscriptions if sub.only_following ] subscription_q_objects.append( Q(source_id__in=[sub.source_id for sub in subscriptions if not sub.only_following]) ) return [ event for event in events.filter(reduce(or_, subscription_q_objects)) if self.filter_source_targets_by_unsubscription(event.source_id, [entity]) ] @transaction.atomic def events_targets(self, entity_kind=None, **event_filters): events = self.get_filtered_events(**event_filters) subscriptions = Subscription.objects.filter(medium=self).select_related('entity') subscribed_cache = {} for sub in subscriptions: subscribed_cache[sub.id] = sub.subscribed_entities() event_pairs = [] for event in events: targets = [] for sub in subscriptions: if event.source_id != sub.source_id: continue subscribed = subscribed_cache[sub.id] if sub.only_following: potential_targets = self.followers_of( event.eventactor_set.values_list('entity__id', flat=True) ) subscription_targets = list(Entity.objects.filter( Q(id__in=subscribed), Q(id__in=potential_targets) )) else: subscription_targets = list(subscribed) targets.extend(subscription_targets) targets = self.filter_source_targets_by_unsubscription(event.source_id, targets) if entity_kind: targets = [t for t in targets if t.entity_kind == entity_kind] if targets: event_pairs.append((event, targets)) return event_pairs def subset_subscriptions(self, subscriptions, entity=None): if entity is None: return subscriptions super_entities = EntityRelationship.objects.filter( sub_entity=entity).values_list('super_entity') subscriptions = subscriptions.filter( Q(entity=entity, sub_entity_kind=None) | Q(entity__in=super_entities, sub_entity_kind=entity.entity_kind) ) return subscriptions @cached_property def unsubscriptions(self): unsubscriptions = defaultdict(list) for unsub in Unsubscription.objects.filter(medium=self).values('entity', 'source'): unsubscriptions[unsub['source']].append(unsub['entity']) return unsubscriptions def filter_source_targets_by_unsubscription(self, source_id, targets): unsubscriptions = self.unsubscriptions return [t for t in targets if t.id not in unsubscriptions[source_id]] def get_filtered_events_queryset(self, start_time, end_time, seen, include_expired, actor, queryset=None): if queryset is None: queryset = Event.objects queryset = queryset.annotate( event_seen_medium=models.FilteredRelation( 'eventseen', condition=Q(eventseen__medium=self) ) ) now = datetime.utcnow() filters = [] if start_time is not None: filters.append(Q(time__gte=start_time)) if end_time is not None: filters.append(Q(time__lte=end_time)) if not include_expired: filters.append(Q(time_expires__gte=now)) if seen is True: filters.append(Q(eventseen__medium=self)) elif seen is False: filters.append(Q(event_seen_medium__id__isnull=True)) if actor is not None: filters.append(Q(eventactor__entity=actor)) return queryset.filter(*filters) def get_filtered_events( self, start_time=None, end_time=None, seen=None, mark_seen=False, include_expired=False, actor=None ): events = self.get_filtered_events_queryset( start_time=start_time, end_time=end_time, seen=seen, include_expired=include_expired, actor=actor, queryset=Event.objects ) if seen is False and mark_seen: events = Event.objects.filter(id__in=list(events.values_list('id', flat=True))) events.mark_seen(self) return events def followed_by(self, entities): if isinstance(entities, Entity): entities = Entity.objects.filter(id=entities.id) super_entities = EntityRelationship.objects.filter( sub_entity__in=entities).values_list('super_entity') followed_by = Entity.objects.filter( Q(id__in=entities) | Q(id__in=super_entities)) return followed_by def followers_of(self, entities): if isinstance(entities, Entity): entities = Entity.objects.filter(id=entities.id) sub_entities = EntityRelationship.objects.filter( super_entity__in=entities).values_list('sub_entity') followers_of = Entity.objects.filter( Q(id__in=entities) | Q(id__in=sub_entities)) return followers_of def render(self, events): from entity_event import context_loader context_loader.load_contexts_and_renderers(events, [self]) return {e: e.render(self) for e in events} class Source(models.Model): name = models.CharField(max_length=256, unique=True) display_name = models.CharField(max_length=256) description = models.TextField() group = models.ForeignKey('entity_event.SourceGroup', on_delete=models.CASCADE) def __str__(self): return self.display_name class SourceGroup(models.Model): name = models.CharField(max_length=256, unique=True) display_name = models.CharField(max_length=256) description = models.TextField() def __str__(self): return self.display_name class Unsubscription(models.Model): entity = models.ForeignKey('entity.Entity', on_delete=models.CASCADE) medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE) source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
MIT License
cap-ntu/ml-model-ci
modelci/persistence/service_.py
save
python
def save(model_in: MLModel): if _collection.count_documents( filter=model_in.dict( use_enum_values=True, include={'architecture', 'framework', 'engine', 'version', 'task', 'dataset'} ), limit=1 ): raise ServiceException( f'Model with primary keys architecture={model_in.architecture}, ' f'framework={model_in.framework}, engine={model_in.engine}, version={model_in.version},' f'task={model_in.task}, and dataset={model_in.dataset} has exists.' ) weight_id = _fs.put(bytes(model_in.weight), filename=model_in.weight.filename) model = MLModel(**model_in.dict(exclude={'weight'}), weight=weight_id) model.id = _collection.insert_one(model.dict(exclude_none=True, by_alias=True, use_enum_values=True)).inserted_id return model
Register a model into ModelDB and GridFS. `model.id` should be set as `None`, otherwise, the function will raise a `ValueError`. Args: model_in (MLModelIn): model object to be registered Return: MLModel: Saved ML model object. Raises: BadRequestValueException: If `model.id` is not None. ServiceException: If model has exists with the same primary keys (name, framework, engine and version).
https://github.com/cap-ntu/ml-model-ci/blob/f77635e469477b640a5c2d9b7ad3fe13374ce59e/modelci/persistence/service_.py#L27-L59
from typing import List import gridfs from bson import ObjectId from fastapi.encoders import jsonable_encoder from modelci.config import db_settings from modelci.experimental.mongo_client import MongoClient from modelci.hub.profile_ import Profiler from modelci.persistence.exceptions import ServiceException from modelci.types.models.mlmodel import MLModel, ModelUpdateSchema _db = MongoClient()[db_settings.mongo_db] _collection = _db['model_d_o'] _fs = gridfs.GridFS(_db)
Apache License 2.0
nervanasystems/neon
neon/transforms/cost.py
MeanSquared.__init__
python
def __init__(self): self.func = lambda y, t: self.be.mean(self.be.square(y - t), axis=0) / 2. self.funcgrad = lambda y, t: (y - t) / y.shape[0]
Define the cost function and its gradient as lambda functions.
https://github.com/nervanasystems/neon/blob/8c3fb8a93b4a89303467b25817c60536542d08bd/neon/transforms/cost.py#L198-L203
from __future__ import division from builtins import str from neon import NervanaObject import numpy as np from collections import Counter from neon import logger as neon_logger class Cost(NervanaObject): def __call__(self, y, t): return self.func(y, t) def bprop(self, y, t): return self.funcgrad(y, t) class CrossEntropyBinary(Cost): def __init__(self, scale=1): self.scale = scale def __call__(self, y, t): assert y.shape == t.shape, "CrossEntropy requires network output shape to match targets" return self.be.sum(self.be.safelog(1 - y) * (t - 1) - self.be.safelog(y) * t, axis=0) def bprop(self, y, t): return self.scale * (y - t) class CrossEntropyMulti(Cost): def __init__(self, scale=1, usebits=False): super(CrossEntropyMulti, self).__init__() self.usebits = usebits self.scale = scale self.logscale = np.float(1. / np.log(2.0) if usebits else 1.) def __call__(self, y, t): if y.shape != t.shape: raise ValueError(( "CrossEntropy requires network output shape to match " "targets. Network output shape was {} and targets shape " "was {}" ).format(y.shape, t.shape)) return (self.be.sum(-t * self.logscale * self.be.safelog(y), axis=0)) def bprop(self, y, t): return self.logscale * self.scale * (y - t) class SumSquared(Cost): def __init__(self): self.func = lambda y, t: self.be.sum(self.be.square(y - t), axis=0) / 2. self.funcgrad = lambda y, t: (y - t) class MeanSquared(Cost):
Apache License 2.0
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/html_fixed_save_options_data.py
HtmlFixedSaveOptionsData.update_last_saved_time_property
python
def update_last_saved_time_property(self): return self._update_last_saved_time_property
Gets the update_last_saved_time_property of this HtmlFixedSaveOptionsData. # noqa: E501 Gets or sets a value indicating whether the Aspose.Words.Properties.BuiltInDocumentProperties.LastSavedTime property is updated before saving. # noqa: E501 :return: The update_last_saved_time_property of this HtmlFixedSaveOptionsData. # noqa: E501 :rtype: bool
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/html_fixed_save_options_data.py#L510-L518
import pprint import re import datetime import six import json class HtmlFixedSaveOptionsData(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_embedding_post_script_fonts': 'bool', 'custom_time_zone_info_data': 'TimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'str', 'dml_effects_rendering_mode': 'str', 'dml_rendering_mode': 'str', 'file_name': 'str', 'flat_opc_xml_mapping_only': 'bool', 'iml_rendering_mode': 'str', 'save_format': 'str', 'update_created_time_property': 'bool', 'update_fields': 'bool', 'update_last_printed_property': 'bool', 'update_last_saved_time_property': 'bool', 'update_sdt_content': 'bool', 'zip_output': 'bool', 'color_mode': 'str', 'jpeg_quality': 'int', 'metafile_rendering_options': 'MetafileRenderingOptionsData', 'numeral_format': 'str', 'optimize_output': 'bool', 'page_count': 'int', 'page_index': 'int', 'css_class_names_prefix': 'str', 'encoding': 'str', 'export_embedded_css': 'bool', 'export_embedded_fonts': 'bool', 'export_embedded_images': 'bool', 'export_form_fields': 'bool', 'font_format': 'str', 'page_horizontal_alignment': 'str', 'page_margins': 'float', 'resources_folder': 'str', 'resources_folder_alias': 'str', 'save_font_face_css_separately': 'bool', 'show_page_border': 'bool', 'use_target_machine_fonts': 'bool' } attribute_map = { 'allow_embedding_post_script_fonts': 'AllowEmbeddingPostScriptFonts', 'custom_time_zone_info_data': 'CustomTimeZoneInfoData', 'dml3_d_effects_rendering_mode': 'Dml3DEffectsRenderingMode', 'dml_effects_rendering_mode': 'DmlEffectsRenderingMode', 'dml_rendering_mode': 'DmlRenderingMode', 'file_name': 'FileName', 'flat_opc_xml_mapping_only': 'FlatOpcXmlMappingOnly', 'iml_rendering_mode': 'ImlRenderingMode', 'save_format': 'SaveFormat', 'update_created_time_property': 'UpdateCreatedTimeProperty', 'update_fields': 'UpdateFields', 'update_last_printed_property': 'UpdateLastPrintedProperty', 'update_last_saved_time_property': 'UpdateLastSavedTimeProperty', 'update_sdt_content': 'UpdateSdtContent', 'zip_output': 'ZipOutput', 'color_mode': 'ColorMode', 'jpeg_quality': 'JpegQuality', 'metafile_rendering_options': 'MetafileRenderingOptions', 'numeral_format': 'NumeralFormat', 'optimize_output': 'OptimizeOutput', 'page_count': 'PageCount', 'page_index': 'PageIndex', 'css_class_names_prefix': 'CssClassNamesPrefix', 'encoding': 'Encoding', 'export_embedded_css': 'ExportEmbeddedCss', 'export_embedded_fonts': 'ExportEmbeddedFonts', 'export_embedded_images': 'ExportEmbeddedImages', 'export_form_fields': 'ExportFormFields', 'font_format': 'FontFormat', 'page_horizontal_alignment': 'PageHorizontalAlignment', 'page_margins': 'PageMargins', 'resources_folder': 'ResourcesFolder', 'resources_folder_alias': 'ResourcesFolderAlias', 'save_font_face_css_separately': 'SaveFontFaceCssSeparately', 'show_page_border': 'ShowPageBorder', 'use_target_machine_fonts': 'UseTargetMachineFonts' } def __init__(self, allow_embedding_post_script_fonts=None, custom_time_zone_info_data=None, dml3_d_effects_rendering_mode=None, dml_effects_rendering_mode=None, dml_rendering_mode=None, file_name=None, flat_opc_xml_mapping_only=None, iml_rendering_mode=None, save_format=None, update_created_time_property=None, update_fields=None, update_last_printed_property=None, update_last_saved_time_property=None, update_sdt_content=None, zip_output=None, color_mode=None, jpeg_quality=None, metafile_rendering_options=None, numeral_format=None, optimize_output=None, page_count=None, page_index=None, css_class_names_prefix=None, encoding=None, export_embedded_css=None, export_embedded_fonts=None, export_embedded_images=None, export_form_fields=None, font_format=None, page_horizontal_alignment=None, page_margins=None, resources_folder=None, resources_folder_alias=None, save_font_face_css_separately=None, show_page_border=None, use_target_machine_fonts=None): self._allow_embedding_post_script_fonts = None self._custom_time_zone_info_data = None self._dml3_d_effects_rendering_mode = None self._dml_effects_rendering_mode = None self._dml_rendering_mode = None self._file_name = None self._flat_opc_xml_mapping_only = None self._iml_rendering_mode = None self._save_format = None self._update_created_time_property = None self._update_fields = None self._update_last_printed_property = None self._update_last_saved_time_property = None self._update_sdt_content = None self._zip_output = None self._color_mode = None self._jpeg_quality = None self._metafile_rendering_options = None self._numeral_format = None self._optimize_output = None self._page_count = None self._page_index = None self._css_class_names_prefix = None self._encoding = None self._export_embedded_css = None self._export_embedded_fonts = None self._export_embedded_images = None self._export_form_fields = None self._font_format = None self._page_horizontal_alignment = None self._page_margins = None self._resources_folder = None self._resources_folder_alias = None self._save_font_face_css_separately = None self._show_page_border = None self._use_target_machine_fonts = None self.discriminator = None if allow_embedding_post_script_fonts is not None: self.allow_embedding_post_script_fonts = allow_embedding_post_script_fonts if custom_time_zone_info_data is not None: self.custom_time_zone_info_data = custom_time_zone_info_data if dml3_d_effects_rendering_mode is not None: self.dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode if dml_effects_rendering_mode is not None: self.dml_effects_rendering_mode = dml_effects_rendering_mode if dml_rendering_mode is not None: self.dml_rendering_mode = dml_rendering_mode if file_name is not None: self.file_name = file_name if flat_opc_xml_mapping_only is not None: self.flat_opc_xml_mapping_only = flat_opc_xml_mapping_only if iml_rendering_mode is not None: self.iml_rendering_mode = iml_rendering_mode if save_format is not None: self.save_format = save_format if update_created_time_property is not None: self.update_created_time_property = update_created_time_property if update_fields is not None: self.update_fields = update_fields if update_last_printed_property is not None: self.update_last_printed_property = update_last_printed_property if update_last_saved_time_property is not None: self.update_last_saved_time_property = update_last_saved_time_property if update_sdt_content is not None: self.update_sdt_content = update_sdt_content if zip_output is not None: self.zip_output = zip_output if color_mode is not None: self.color_mode = color_mode if jpeg_quality is not None: self.jpeg_quality = jpeg_quality if metafile_rendering_options is not None: self.metafile_rendering_options = metafile_rendering_options if numeral_format is not None: self.numeral_format = numeral_format if optimize_output is not None: self.optimize_output = optimize_output if page_count is not None: self.page_count = page_count if page_index is not None: self.page_index = page_index if css_class_names_prefix is not None: self.css_class_names_prefix = css_class_names_prefix if encoding is not None: self.encoding = encoding if export_embedded_css is not None: self.export_embedded_css = export_embedded_css if export_embedded_fonts is not None: self.export_embedded_fonts = export_embedded_fonts if export_embedded_images is not None: self.export_embedded_images = export_embedded_images if export_form_fields is not None: self.export_form_fields = export_form_fields if font_format is not None: self.font_format = font_format if page_horizontal_alignment is not None: self.page_horizontal_alignment = page_horizontal_alignment if page_margins is not None: self.page_margins = page_margins if resources_folder is not None: self.resources_folder = resources_folder if resources_folder_alias is not None: self.resources_folder_alias = resources_folder_alias if save_font_face_css_separately is not None: self.save_font_face_css_separately = save_font_face_css_separately if show_page_border is not None: self.show_page_border = show_page_border if use_target_machine_fonts is not None: self.use_target_machine_fonts = use_target_machine_fonts @property def allow_embedding_post_script_fonts(self): return self._allow_embedding_post_script_fonts @allow_embedding_post_script_fonts.setter def allow_embedding_post_script_fonts(self, allow_embedding_post_script_fonts): self._allow_embedding_post_script_fonts = allow_embedding_post_script_fonts @property def custom_time_zone_info_data(self): return self._custom_time_zone_info_data @custom_time_zone_info_data.setter def custom_time_zone_info_data(self, custom_time_zone_info_data): self._custom_time_zone_info_data = custom_time_zone_info_data @property def dml3_d_effects_rendering_mode(self): return self._dml3_d_effects_rendering_mode @dml3_d_effects_rendering_mode.setter def dml3_d_effects_rendering_mode(self, dml3_d_effects_rendering_mode): allowed_values = ["Basic", "Advanced"] if not dml3_d_effects_rendering_mode.isdigit(): if dml3_d_effects_rendering_mode not in allowed_values: raise ValueError( "Invalid value for `dml3_d_effects_rendering_mode` ({0}), must be one of {1}" .format(dml3_d_effects_rendering_mode, allowed_values)) self._dml3_d_effects_rendering_mode = dml3_d_effects_rendering_mode else: self._dml3_d_effects_rendering_mode = allowed_values[int(dml3_d_effects_rendering_mode) if six.PY3 else long(dml3_d_effects_rendering_mode)] @property def dml_effects_rendering_mode(self): return self._dml_effects_rendering_mode @dml_effects_rendering_mode.setter def dml_effects_rendering_mode(self, dml_effects_rendering_mode): self._dml_effects_rendering_mode = dml_effects_rendering_mode @property def dml_rendering_mode(self): return self._dml_rendering_mode @dml_rendering_mode.setter def dml_rendering_mode(self, dml_rendering_mode): self._dml_rendering_mode = dml_rendering_mode @property def file_name(self): return self._file_name @file_name.setter def file_name(self, file_name): self._file_name = file_name @property def flat_opc_xml_mapping_only(self): return self._flat_opc_xml_mapping_only @flat_opc_xml_mapping_only.setter def flat_opc_xml_mapping_only(self, flat_opc_xml_mapping_only): self._flat_opc_xml_mapping_only = flat_opc_xml_mapping_only @property def iml_rendering_mode(self): return self._iml_rendering_mode @iml_rendering_mode.setter def iml_rendering_mode(self, iml_rendering_mode): self._iml_rendering_mode = iml_rendering_mode @property def save_format(self): return self._save_format @save_format.setter def save_format(self, save_format): self._save_format = save_format @property def update_created_time_property(self): return self._update_created_time_property @update_created_time_property.setter def update_created_time_property(self, update_created_time_property): self._update_created_time_property = update_created_time_property @property def update_fields(self): return self._update_fields @update_fields.setter def update_fields(self, update_fields): self._update_fields = update_fields @property def update_last_printed_property(self): return self._update_last_printed_property @update_last_printed_property.setter def update_last_printed_property(self, update_last_printed_property): self._update_last_printed_property = update_last_printed_property @property
MIT License
diptochakrabarty/flask-online-store
venv/lib/python3.6/site-packages/alembic/ddl/impl.py
DefaultImpl.requires_recreate_in_batch
python
def requires_recreate_in_batch(self, batch_op): return False
Return True if the given :class:`.BatchOperationsImpl` would need the table to be recreated and copied in order to proceed. Normally, only returns True on SQLite when operations other than add_column are present.
https://github.com/diptochakrabarty/flask-online-store/blob/74db206565aee1920a508d009a4b866d9a848c6a/venv/lib/python3.6/site-packages/alembic/ddl/impl.py#L84-L93
from collections import namedtuple import re from sqlalchemy import cast from sqlalchemy import schema from sqlalchemy import text from . import base from .. import util from ..util import sqla_compat from ..util.compat import string_types from ..util.compat import text_type from ..util.compat import with_metaclass class ImplMeta(type): def __init__(cls, classname, bases, dict_): newtype = type.__init__(cls, classname, bases, dict_) if "__dialect__" in dict_: _impls[dict_["__dialect__"]] = cls return newtype _impls = {} Params = namedtuple("Params", ["token0", "tokens", "args", "kwargs"]) class DefaultImpl(with_metaclass(ImplMeta)): __dialect__ = "default" transactional_ddl = False command_terminator = ";" type_synonyms = ({"NUMERIC", "DECIMAL"},) type_arg_extract = () def __init__( self, dialect, connection, as_sql, transactional_ddl, output_buffer, context_opts, ): self.dialect = dialect self.connection = connection self.as_sql = as_sql self.literal_binds = context_opts.get("literal_binds", False) self.output_buffer = output_buffer self.memo = {} self.context_opts = context_opts if transactional_ddl is not None: self.transactional_ddl = transactional_ddl if self.literal_binds: if not self.as_sql: raise util.CommandError( "Can't use literal_binds setting without as_sql mode" ) @classmethod def get_by_dialect(cls, dialect): return _impls[dialect.name] def static_output(self, text): self.output_buffer.write(text_type(text + "\n\n")) self.output_buffer.flush()
MIT License
lyft/cartography
cartography/intel/azure/sql.py
_load_server_dns_aliases
python
def _load_server_dns_aliases( neo4j_session: neo4j.Session, dns_aliases: List[Dict], update_tag: int, ) -> None: ingest_dns_aliases = """ UNWIND {dns_aliases_list} as dns_alias MERGE (alias:AzureServerDNSAlias{id: dns_alias.id}) ON CREATE SET alias.firstseen = timestamp() SET alias.name = dns_alias.name, alias.dnsrecord = dns_alias.azure_dns_record, alias.lastupdated = {azure_update_tag} WITH alias, dns_alias MATCH (s:AzureSQLServer{id: dns_alias.server_id}) MERGE (s)-[r:USED_BY]->(alias) ON CREATE SET r.firstseen = timestamp() SET r.lastupdated = {azure_update_tag} """ neo4j_session.run( ingest_dns_aliases, dns_aliases_list=dns_aliases, azure_update_tag=update_tag, )
Ingest the DNS Alias details into neo4j.
https://github.com/lyft/cartography/blob/e45803027b209322db286f363540eb058a831d1e/cartography/intel/azure/sql.py#L385-L409
import logging from typing import Any from typing import Dict from typing import Generator from typing import List from typing import Tuple import neo4j from azure.core.exceptions import ClientAuthenticationError from azure.core.exceptions import HttpResponseError from azure.core.exceptions import ResourceNotFoundError from azure.mgmt.sql import SqlManagementClient from azure.mgmt.sql.models import SecurityAlertPolicyName from azure.mgmt.sql.models import TransparentDataEncryptionName from msrestazure.azure_exceptions import CloudError from .util.credentials import Credentials from cartography.util import run_cleanup_job from cartography.util import timeit logger = logging.getLogger(__name__) @timeit def get_client(credentials: Credentials, subscription_id: str) -> SqlManagementClient: client = SqlManagementClient(credentials, subscription_id) return client @timeit def get_server_list(credentials: Credentials, subscription_id: str) -> List[Dict]: try: client = get_client(credentials, subscription_id) server_list = list(map(lambda x: x.as_dict(), client.servers.list())) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving servers - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Server resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving servers - {e}") return [] for server in server_list: x = server['id'].split('/') server['resourceGroup'] = x[x.index('resourceGroups') + 1] return server_list @timeit def load_server_data( neo4j_session: neo4j.Session, subscription_id: str, server_list: List[Dict], azure_update_tag: int, ) -> None: ingest_server = """ UNWIND {server_list} as server MERGE (s:AzureSQLServer{id: server.id}) ON CREATE SET s.firstseen = timestamp(), s.resourcegroup = server.resourceGroup, s.location = server.location SET s.lastupdated = {azure_update_tag}, s.name = server.name, s.kind = server.kind, s.state = server.state, s.version = server.version WITH s MATCH (owner:AzureSubscription{id: {AZURE_SUBSCRIPTION_ID}}) MERGE (owner)-[r:RESOURCE]->(s) ON CREATE SET r.firstseen = timestamp() SET r.lastupdated = {azure_update_tag} """ neo4j_session.run( ingest_server, server_list=server_list, AZURE_SUBSCRIPTION_ID=subscription_id, azure_update_tag=azure_update_tag, ) @timeit def sync_server_details( neo4j_session: neo4j.Session, credentials: Credentials, subscription_id: str, server_list: List[Dict], sync_tag: int, ) -> None: details = get_server_details(credentials, subscription_id, server_list) load_server_details(neo4j_session, credentials, subscription_id, details, sync_tag) @timeit def get_server_details( credentials: Credentials, subscription_id: str, server_list: List[Dict], ) -> Generator[Any, Any, Any]: for server in server_list: dns_alias = get_dns_aliases(credentials, subscription_id, server) ad_admins = get_ad_admins(credentials, subscription_id, server) r_databases = get_recoverable_databases(credentials, subscription_id, server) rd_databases = get_restorable_dropped_databases(credentials, subscription_id, server) fgs = get_failover_groups(credentials, subscription_id, server) elastic_pools = get_elastic_pools(credentials, subscription_id, server) databases = get_databases(credentials, subscription_id, server) yield server['id'], server['name'], server[ 'resourceGroup' ], dns_alias, ad_admins, r_databases, rd_databases, fgs, elastic_pools, databases @timeit def get_dns_aliases(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) dns_aliases = list( map( lambda x: x.as_dict(), client.server_dns_aliases.list_by_server(server['resourceGroup'], server['name']), ), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving DNS Aliases - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"DNS Alias resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving Azure Server DNS Aliases - {e}") return [] return dns_aliases @timeit def get_ad_admins(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) ad_admins = list( map( lambda x: x.as_dict(), client.server_azure_ad_administrators.list_by_server( server['resourceGroup'], server['name'], ), ), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving Azure AD Administrators - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Azure AD Administrators resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving server azure AD Administrators - {e}") return [] return ad_admins @timeit def get_recoverable_databases(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) recoverable_databases = list( map( lambda x: x.as_dict(), client.recoverable_databases.list_by_server( server['resourceGroup'], server['name'], ), ), ) except CloudError: return [] except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving recoverable databases - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Recoverable databases resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving recoverable databases - {e}") return [] return recoverable_databases @timeit def get_restorable_dropped_databases(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) restorable_dropped_databases = list( map( lambda x: x.as_dict(), client.restorable_dropped_databases.list_by_server( server['resourceGroup'], server['name'], ), ), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving Restorable Dropped Databases - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Restorable Dropped Databases resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving restorable dropped databases - {e}") return [] return restorable_dropped_databases @timeit def get_failover_groups(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) failover_groups = list( map(lambda x: x.as_dict(), client.failover_groups.list_by_server(server['resourceGroup'], server['name'])), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving Failover groups - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Failover groups resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving failover groups - {e}") return [] return failover_groups @timeit def get_elastic_pools(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) elastic_pools = list( map(lambda x: x.as_dict(), client.elastic_pools.list_by_server(server['resourceGroup'], server['name'])), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving Elastic Pools - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"Elastic Pools resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving elastic pools - {e}") return [] return elastic_pools @timeit def get_databases(credentials: Credentials, subscription_id: str, server: Dict) -> List[Dict]: try: client = get_client(credentials, subscription_id) databases = list( map(lambda x: x.as_dict(), client.databases.list_by_server(server['resourceGroup'], server['name'])), ) except ClientAuthenticationError as e: logger.warning(f"Client Authentication Error while retrieving SQL databases - {e}") return [] except ResourceNotFoundError as e: logger.warning(f"SQL databases resource not found error - {e}") return [] except HttpResponseError as e: logger.warning(f"Error while retrieving databases - {e}") return [] return databases @timeit def load_server_details( neo4j_session: neo4j.Session, credentials: Credentials, subscription_id: str, details: List[Tuple[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]], update_tag: int, ) -> None: dns_aliases = [] ad_admins = [] recoverable_databases = [] restorable_dropped_databases = [] failover_groups = [] elastic_pools = [] databases = [] for server_id, name, rg, dns_alias, ad_admin, r_database, rd_database, fg, elastic_pool, database in details: if len(dns_alias) > 0: for alias in dns_alias: alias['server_name'] = name alias['server_id'] = server_id dns_aliases.append(alias) if len(ad_admin) > 0: for admin in ad_admin: admin['server_name'] = name admin['server_id'] = server_id ad_admins.append(admin) if len(r_database) > 0: for rdb in r_database: rdb['server_name'] = name rdb['server_id'] = server_id recoverable_databases.append(rdb) if len(rd_database) > 0: for rddb in rd_database: rddb['server_name'] = name rddb['server_id'] = server_id restorable_dropped_databases.append(rddb) if len(fg) > 0: for group in fg: group['server_name'] = name group['server_id'] = server_id failover_groups.append(group) if len(elastic_pool) > 0: for pool in elastic_pool: pool['server_name'] = name pool['server_id'] = server_id elastic_pools.append(pool) if len(database) > 0: for db in database: db['server_name'] = name db['server_id'] = server_id db['resource_group_name'] = rg databases.append(db) _load_server_dns_aliases(neo4j_session, dns_aliases, update_tag) _load_server_ad_admins(neo4j_session, ad_admins, update_tag) _load_recoverable_databases(neo4j_session, recoverable_databases, update_tag) _load_restorable_dropped_databases(neo4j_session, restorable_dropped_databases, update_tag) _load_failover_groups(neo4j_session, failover_groups, update_tag) _load_elastic_pools(neo4j_session, elastic_pools, update_tag) _load_databases(neo4j_session, databases, update_tag) sync_database_details(neo4j_session, credentials, subscription_id, databases, update_tag) @timeit
Apache License 2.0
onsdigital/eq-survey-runner
app/jinja_filters.py
format_date_range_no_repeated_month_year
python
def format_date_range_no_repeated_month_year(context, start_date, end_date, date_format='d MMMM yyyy'): start_datetime = convert_to_datetime(start_date) end_datetime = convert_to_datetime(end_date) first_date_format = date_format if start_datetime.year == end_datetime.year: first_date_format = date_format.replace('yyyy', '') if start_datetime.month == end_datetime.month: first_date_format = first_date_format.replace('MMMM', '') first_date_format = first_date_format.replace(' ', ' ').strip() if not first_date_format: first_date_format = date_format output = flask_babel.gettext('%(from_date)s to %(to_date)s', from_date=format_date_custom(context, start_date, first_date_format), to_date=format_date_custom(context, end_date, date_format)) return mark_safe(context, output)
Format a date range, ensuring months and years are not repeated. If the dates are in the same year, the first year (YYYY) will be removed. If the dates are in the same month and year, the first year (YYYY) and month will be removed. e.g. Friday 1 to Sunday 3 October or Thursday 30 September to Sunday 3 October Assumptions: - The date format uses space as a seperator - The date format can have leading and trailing whitespace stripped :param (jinja2.nodes.EvalContext) context: Evaluation context. :param (str) start_date : The date format that should be used for output. MMMM, YYYY will be removed if necessary :param (str) end_date: The date format that should be used for output. MMMM, YYYY will be removed if necessary :param (str) date_format: The date format that should be used for output. MMMM, YYYY will be removed if necessary :returns (str): The formatted range.
https://github.com/onsdigital/eq-survey-runner/blob/17c2e6d09a7f23ec1891521c68477ed7a3043b4f/app/jinja_filters.py#L250-L292
import re import string from datetime import datetime import flask import flask_babel from babel import units, numbers from dateutil import relativedelta, tz from jinja2 import Markup, contextfunction, escape, evalcontextfilter, evalcontextfunction, Undefined from jinja2.exceptions import UndefinedError from app.questionnaire.rules import convert_to_datetime blueprint = flask.Blueprint('filters', __name__) @blueprint.app_template_filter() def format_number(value): if value or value == 0: return numbers.format_decimal(value, locale=flask_babel.get_locale()) return '' @evalcontextfunction def format_currency(context, value, currency='GBP'): currency_value = get_formatted_currency(value, currency) result = "<span class='date'>{currency_value}</span>".format(currency_value=currency_value) return mark_safe(context, result) def get_formatted_currency(value, currency='GBP'): if value or value == 0: return numbers.format_currency(number=value, currency=currency, locale=flask_babel.get_locale()) return '' @blueprint.app_template_filter() def get_currency_symbol(currency='GBP'): return numbers.get_currency_symbol(currency, locale=flask_babel.get_locale()) @blueprint.app_template_filter() def format_percentage(value): return '{}%'.format(value) @blueprint.app_template_filter() def format_address_list(first_address=None, second_address=None): if all(isinstance(field, Undefined) for field in first_address) or all(field == '' for field in first_address): address = second_address else: address = first_address address_list = concatenated_list(list_items=address, delimiter='<br />') if not address_list: raise Exception('No valid address passed to format_address_list filter') return address_list def format_unit(unit, value, length='short'): return units.format_unit(value=value, measurement_unit=unit, length=length, locale=flask_babel.get_locale()) def format_unit_input_label(unit, unit_length='short'): if unit_length == 'long': return units.format_unit(value=2, measurement_unit=unit, length=unit_length, locale=flask_babel.get_locale()).replace('2 ', '') return units.format_unit(value='', measurement_unit=unit, length=unit_length, locale=flask_babel.get_locale()).strip() def format_duration(value): parts = [] if 'years' in value and (value['years'] > 0 or len(value) == 1): parts.append(flask_babel.ngettext('%(num)s year', '%(num)s years', value['years'])) if 'months' in value and (value['months'] > 0 or len(value) == 1 or ('years' in value and value['years'] == 0)): parts.append(flask_babel.ngettext('%(num)s month', '%(num)s months', value['months'])) return ' '.join(parts) def as_london_tz(value): return value.replace(tzinfo=tz.gettz('UTC')).astimezone(tz.gettz('Europe/London')) @evalcontextfilter @blueprint.app_template_filter() def format_multilined_string(context, value): escaped_value = escape(value) new_line_regex = r'(?:\r\n|\r|\n)+' value_with_line_break_tag = re.sub(new_line_regex, '<br>', escaped_value) result = '{}'.format(value_with_line_break_tag) return mark_safe(context, result) @evalcontextfunction @blueprint.app_template_filter() def get_current_date(context): now = as_london_tz(datetime.utcnow()).strftime('%-d %B %Y') result = "<span class='date'>{date}</span>".format(date=now) return mark_safe(context, result) @evalcontextfilter @blueprint.app_template_filter() def format_date(context, value): value = value[0] if isinstance(value, list) else value if not isinstance(value, str): return value date_format = 'd MMMM yyyy' if value and re.match(r'\d{4}-\d{2}$', value): date_format = 'MMMM yyyy' if value and re.match(r'\d{4}$', value): date_format = 'yyyy' date_to_format = convert_to_datetime(value).date() result = "<span class='date'>{date}</span>".format( date=flask_babel.format_date(date_to_format, format=date_format)) return mark_safe(context, result) @evalcontextfilter @blueprint.app_template_filter() def format_date_custom(context, value, date_format='EEEE d MMMM yyyy'): london_date = datetime.strptime(value, '%Y-%m-%d') result = "<span class='date'>{date}</span>".format(date=flask_babel.format_datetime(london_date, format=date_format)) return mark_safe(context, result) @evalcontextfilter @blueprint.app_template_filter() def format_datetime(context, value): london_date_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f') london_date = london_date_time.date() formatted_date = flask_babel.format_date(london_date, format='d MMMM yyyy') formatted_time = flask_babel.format_time(london_date_time, format='HH:mm') result = "<span class='date'>{date}</span>".format( date=flask_babel.gettext('%(date)s at %(time)s', date=formatted_date, time=formatted_time), ) return mark_safe(context, result) @evalcontextfunction @blueprint.app_template_filter() def format_conditional_date(context, *dates): first_valid_date = _get_first_non_empty_item(dates) return format_date(context, first_valid_date) @blueprint.app_template_filter() def calculate_offset_from_weekday_in_last_whole_week(input_datetime, offset, day_of_week='MO'): if input_datetime: parsed_datetime = datetime.strptime(input_datetime.split('T')[0], '%Y-%m-%d') else: parsed_datetime = datetime.utcnow() offset_days = offset.get('days', 0) offset_weeks = offset.get('weeks', 0) offset_years = offset.get('years', 0) offset = relativedelta.relativedelta(days=offset_days, weeks=offset_weeks, years=offset_years) weekdays = ('MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU') try: day_of_week_index = weekdays.index(day_of_week) except ValueError: raise Exception('Invalid day of week passed to calculate_offset_from_weekday_in_last_whole_week') day_of_week_offset = relativedelta.relativedelta(days=(-parsed_datetime.weekday() - (7 - day_of_week_index))) day_of_last_week = parsed_datetime + day_of_week_offset offset_output = day_of_last_week + offset return datetime.strftime(offset_output, '%Y-%m-%d') @blueprint.app_template_filter() def calculate_years_difference(from_str, to_str): if from_str is None or to_str is None: raise Exception('Valid date(s) not passed to calculate_years_difference filter') to_date = datetime.now() if to_str == 'now' else convert_to_datetime(to_str) from_date = convert_to_datetime(from_str) difference = relativedelta.relativedelta(to_date, from_date) year_string = flask_babel.ngettext('%(num)s year', '%(num)s years', difference.years) return year_string @evalcontextfunction def format_date_range(context, start_date, end_date=None): if end_date: result = flask_babel.gettext('%(from_date)s to %(to_date)s', from_date=format_date(context, start_date), to_date=format_date(context, end_date)) else: result = format_date(context, start_date) return mark_safe(context, result) @evalcontextfunction
MIT License
pypa/pipenv
pipenv/vendor/jinja2/bccache.py
BytecodeCache.get_source_checksum
python
def get_source_checksum(self, source: str) -> str: return sha1(source.encode("utf-8")).hexdigest()
Returns a checksum for the source.
https://github.com/pypa/pipenv/blob/9378cb515189d11841a4de49a5ac3c01fca509ec/pipenv/vendor/jinja2/bccache.py#L161-L163
import errno import fnmatch import marshal import os import pickle import stat import sys import tempfile import typing as t from hashlib import sha1 from io import BytesIO from types import CodeType if t.TYPE_CHECKING: import typing_extensions as te from .environment import Environment class _MemcachedClient(te.Protocol): def get(self, key: str) -> bytes: ... def set(self, key: str, value: bytes, timeout: t.Optional[int] = None) -> None: ... bc_version = 5 bc_magic = ( b"j2" + pickle.dumps(bc_version, 2) + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) ) class Bucket: def __init__(self, environment: "Environment", key: str, checksum: str) -> None: self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self) -> None: self.code: t.Optional[CodeType] = None def load_bytecode(self, f: t.BinaryIO) -> None: magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return checksum = pickle.load(f) if self.checksum != checksum: self.reset() return try: self.code = marshal.load(f) except (EOFError, ValueError, TypeError): self.reset() return def write_bytecode(self, f: t.BinaryIO) -> None: if self.code is None: raise TypeError("can't write empty bucket") f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal.dump(self.code, f) def bytecode_from_string(self, string: bytes) -> None: self.load_bytecode(BytesIO(string)) def bytecode_to_string(self) -> bytes: out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache: def load_bytecode(self, bucket: Bucket) -> None: raise NotImplementedError() def dump_bytecode(self, bucket: Bucket) -> None: raise NotImplementedError() def clear(self) -> None: def get_cache_key( self, name: str, filename: t.Optional[t.Union[str]] = None ) -> str: hash = sha1(name.encode("utf-8")) if filename is not None: hash.update(f"|{filename}".encode("utf-8")) return hash.hexdigest()
MIT License
mrloghmani/rcfusion
code/imgaug/imgaug.py
imresize_single_image
python
def imresize_single_image(image, sizes, interpolation=None): grayscale = False if image.ndim == 2: grayscale = True image = image[:, :, np.newaxis] assert len(image.shape) == 3, image.shape rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation) if grayscale: return np.squeeze(rs[0, :, :, 0]) else: return rs[0, ...]
Resizes a single image. Parameters ---------- image : (H,W,C) ndarray or (H,W) ndarray Array of the image to resize. Expected to usually be of dtype uint8. sizes : iterable of two ints See `imresize_many_images()`. interpolation : None or string or int, optional(default=None) See `imresize_many_images()`. Returns ------- out : (H',W',C) ndarray or (H',W') ndarray The resized image.
https://github.com/mrloghmani/rcfusion/blob/0849b7bc43c76d4ce6538ca4c3a8a7ec6f6c9c08/code/imgaug/imgaug.py#L525-L556
from __future__ import print_function, division, absolute_import from abc import ABCMeta, abstractmethod import random import numpy as np import copy import numbers import cv2 import math from scipy import misc, ndimage import multiprocessing import threading import sys import six import six.moves as sm import os from skimage import draw if sys.version_info[0] == 2: import cPickle as pickle from Queue import Empty as QueueEmpty elif sys.version_info[0] == 3: import pickle from queue import Empty as QueueEmpty xrange = range ALL = "ALL" QUOKKA_FP = os.path.join( os.path.dirname(os.path.abspath(__file__)), "quokka.jpg" ) DEFAULT_FONT_FP = os.path.join( os.path.dirname(os.path.abspath(__file__)), "DejaVuSans.ttf" ) CURRENT_RANDOM_STATE = np.random.RandomState(42) def is_np_array(val): return isinstance(val, np.ndarray) def is_single_integer(val): return isinstance(val, numbers.Integral) def is_single_float(val): return isinstance(val, numbers.Real) and not is_single_integer(val) def is_single_number(val): return is_single_integer(val) or is_single_float(val) def is_iterable(val): return isinstance(val, (tuple, list)) def is_string(val): return isinstance(val, six.string_types) def is_integer_array(val): return is_np_array(val) and issubclass(val.dtype.type, np.integer) def is_callable(val): if sys.version_info[0] == 3 and sys.version_info[1] <= 2: return hasattr(val, '__call__') else: return callable(val) def seed(seedval): CURRENT_RANDOM_STATE.seed(seedval) def current_random_state(): return CURRENT_RANDOM_STATE def new_random_state(seed=None, fully_random=False): if seed is None: if not fully_random: seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0] return np.random.RandomState(seed) def dummy_random_state(): return np.random.RandomState(1) def copy_random_state(random_state, force_copy=False): if random_state == np.random and not force_copy: return random_state else: rs_copy = dummy_random_state() orig_state = random_state.get_state() rs_copy.set_state(orig_state) return rs_copy def derive_random_state(random_state): return derive_random_states(random_state, n=1)[0] def derive_random_states(random_state, n=1): seed = random_state.randint(0, 10**6, 1)[0] return [ia.new_random_state(seed+i) for i in sm.xrange(n)] def forward_random_state(random_state): random_state.uniform() def quokka(size=None): img = ndimage.imread(QUOKKA_FP, mode="RGB") if size is not None: img = misc.imresize(img, size) return img def quokka_square(size=None): img = ndimage.imread(QUOKKA_FP, mode="RGB") img = img[0:643, 0:643] if size is not None: img = misc.imresize(img, size) return img def angle_between_vectors(v1, v2): v1_u = v1 / np.linalg.norm(v1) v2_u = v2 / np.linalg.norm(v2) return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) def draw_text(img, y, x, text, color=[0, 255, 0], size=25): from PIL import Image, ImageDraw, ImageFont assert img.dtype in [np.uint8, np.float32] input_dtype = img.dtype if img.dtype == np.float32: img = img.astype(np.uint8) is_float32 = False for i in range(len(color)): val = color[i] if isinstance(val, float): val = int(val * 255) val = np.clip(val, 0, 255) color[i] = val shape = img.shape img = Image.fromarray(img) font = ImageFont.truetype(DEFAULT_FONT_FP, size) context = ImageDraw.Draw(img) context.text((x, y), text, fill=tuple(color), font=font) img_np = np.asarray(img) img_np.setflags(write=True) if img_np.dtype != input_dtype: img_np = img_np.astype(input_dtype) return img_np def imresize_many_images(images, sizes=None, interpolation=None): s = images.shape assert len(s) == 4, s nb_images = s[0] im_height, im_width = s[1], s[2] nb_channels = s[3] height, width = sizes[0], sizes[1] if height == im_height and width == im_width: return np.copy(images) ip = interpolation assert ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC] if ip is None: if height > im_height or width > im_width: ip = cv2.INTER_AREA else: ip = cv2.INTER_LINEAR elif ip in ["nearest", cv2.INTER_NEAREST]: ip = cv2.INTER_NEAREST elif ip in ["linear", cv2.INTER_LINEAR]: ip = cv2.INTER_LINEAR elif ip in ["area", cv2.INTER_AREA]: ip = cv2.INTER_AREA elif ip in ["cubic", cv2.INTER_CUBIC]: ip = cv2.INTER_CUBIC else: raise Exception("Invalid interpolation order") result = np.zeros((nb_images, height, width, nb_channels), dtype=np.uint8) for img_idx in sm.xrange(nb_images): result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip) if len(result_img.shape) == 2: result_img = result_img[:, :, np.newaxis] result[img_idx] = result_img return result
MIT License
olitheolix/aiokubernetes
aiokubernetes/models/v1beta1_network_policy_ingress_rule.py
V1beta1NetworkPolicyIngressRule.ports
python
def ports(self, ports): self._ports = ports
Sets the ports of this V1beta1NetworkPolicyIngressRule. List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list. # noqa: E501 :param ports: The ports of this V1beta1NetworkPolicyIngressRule. # noqa: E501 :type: list[V1beta1NetworkPolicyPort]
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1beta1_network_policy_ingress_rule.py#L92-L101
import pprint import re from aiokubernetes.models.v1beta1_network_policy_peer import V1beta1NetworkPolicyPeer from aiokubernetes.models.v1beta1_network_policy_port import V1beta1NetworkPolicyPort class V1beta1NetworkPolicyIngressRule(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { '_from': 'list[V1beta1NetworkPolicyPeer]', 'ports': 'list[V1beta1NetworkPolicyPort]' } attribute_map = { '_from': 'from', 'ports': 'ports' } def __init__(self, _from=None, ports=None): self.__from = None self._ports = None self.discriminator = None if _from is not None: self._from = _from if ports is not None: self.ports = ports @property def _from(self): return self.__from @_from.setter def _from(self, _from): self.__from = _from @property def ports(self): return self._ports @ports.setter
Apache License 2.0
bigmlcom/bigmler
bigmler/command.py
get_log_reversed
python
def get_log_reversed(file_name, stack_level): lines_list = tail(open(file_name, "r"), window=(stack_level + 1)) return lines_list[0].decode(u.BIGML_SYS_ENCODING)
Reads the line of a log file that has the chosen stack_level
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/command.py#L76-L81
import os import shlex import shutil from bigml.multivote import PLURALITY import bigmler.processing.args as a import bigmler.utils as u from bigmler.defaults import DEFAULTS_FILE from bigmler.defaults import get_user_defaults from bigmler.prediction import MAX_MODELS from bigmler.parser import create_parser COMMAND_LOG = ".bigmler" DIRS_LOG = ".bigmler_dir_stack" SESSIONS_LOG = "bigmler_sessions" CONNECTION_OPTIONS = ["--username", "--api-key", "--org-project"] def tail(file_handler, window=1): bufsiz = 1024 file_handler.seek(0, 2) file_bytes = file_handler.tell() size = window + 1 block = -1 data = [] while size > 0 and file_bytes > 0: if (file_bytes - bufsiz) > 0: file_handler.seek(block * bufsiz, 2) new_data = [file_handler.read(bufsiz)] new_data.extend(data) data = new_data else: file_handler.seek(0, 0) data.append(file_handler.read(file_bytes)) lines_found = data[0].count('\n') size -= lines_found file_bytes -= bufsiz block -= 1 return ''.join(data).splitlines()[-window:]
Apache License 2.0
chaffelson/whoville
whoville/cloudbreak/models/stack_v2_request.py
StackV2Request.tags
python
def tags(self): return self._tags
Gets the tags of this StackV2Request. stack related tags :return: The tags of this StackV2Request. :rtype: Tags
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/stack_v2_request.py#L316-L324
from pprint import pformat from six import iteritems import re class StackV2Request(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'general': 'GeneralSettings', 'placement': 'PlacementSettings', 'platform_variant': 'str', 'ambari_version': 'str', 'hdp_version': 'str', 'parameters': 'dict(str, object)', 'inputs': 'dict(str, object)', 'custom_domain': 'CustomDomainSettings', 'tags': 'Tags', 'instance_groups': 'list[InstanceGroupsV2]', 'failure_policy': 'FailurePolicyRequest', 'stack_authentication': 'StackAuthentication', 'network': 'NetworkV2Request', 'image_settings': 'ImageSettings', 'flex_id': 'int', 'cluster': 'ClusterV2Request', 'gateway_port': 'int' } attribute_map = { 'general': 'general', 'placement': 'placement', 'platform_variant': 'platformVariant', 'ambari_version': 'ambariVersion', 'hdp_version': 'hdpVersion', 'parameters': 'parameters', 'inputs': 'inputs', 'custom_domain': 'customDomain', 'tags': 'tags', 'instance_groups': 'instanceGroups', 'failure_policy': 'failurePolicy', 'stack_authentication': 'stackAuthentication', 'network': 'network', 'image_settings': 'imageSettings', 'flex_id': 'flexId', 'cluster': 'cluster', 'gateway_port': 'gatewayPort' } def __init__(self, general=None, placement=None, platform_variant=None, ambari_version=None, hdp_version=None, parameters=None, inputs=None, custom_domain=None, tags=None, instance_groups=None, failure_policy=None, stack_authentication=None, network=None, image_settings=None, flex_id=None, cluster=None, gateway_port=None): self._general = None self._placement = None self._platform_variant = None self._ambari_version = None self._hdp_version = None self._parameters = None self._inputs = None self._custom_domain = None self._tags = None self._instance_groups = None self._failure_policy = None self._stack_authentication = None self._network = None self._image_settings = None self._flex_id = None self._cluster = None self._gateway_port = None self.general = general if placement is not None: self.placement = placement if platform_variant is not None: self.platform_variant = platform_variant if ambari_version is not None: self.ambari_version = ambari_version if hdp_version is not None: self.hdp_version = hdp_version if parameters is not None: self.parameters = parameters if inputs is not None: self.inputs = inputs if custom_domain is not None: self.custom_domain = custom_domain if tags is not None: self.tags = tags self.instance_groups = instance_groups if failure_policy is not None: self.failure_policy = failure_policy if stack_authentication is not None: self.stack_authentication = stack_authentication if network is not None: self.network = network if image_settings is not None: self.image_settings = image_settings if flex_id is not None: self.flex_id = flex_id if cluster is not None: self.cluster = cluster if gateway_port is not None: self.gateway_port = gateway_port @property def general(self): return self._general @general.setter def general(self, general): if general is None: raise ValueError("Invalid value for `general`, must not be `None`") self._general = general @property def placement(self): return self._placement @placement.setter def placement(self, placement): self._placement = placement @property def platform_variant(self): return self._platform_variant @platform_variant.setter def platform_variant(self, platform_variant): self._platform_variant = platform_variant @property def ambari_version(self): return self._ambari_version @ambari_version.setter def ambari_version(self, ambari_version): self._ambari_version = ambari_version @property def hdp_version(self): return self._hdp_version @hdp_version.setter def hdp_version(self, hdp_version): self._hdp_version = hdp_version @property def parameters(self): return self._parameters @parameters.setter def parameters(self, parameters): self._parameters = parameters @property def inputs(self): return self._inputs @inputs.setter def inputs(self, inputs): self._inputs = inputs @property def custom_domain(self): return self._custom_domain @custom_domain.setter def custom_domain(self, custom_domain): self._custom_domain = custom_domain @property
Apache License 2.0
samuel-buteau/universal-battery-database
cycling/models.py
Cycle.get_temperature
python
def get_temperature(self): return float(self.cycling_file.database_file.valid_metadata.temperature)
Really important that this only be called when the file is known to be valid!!!
https://github.com/samuel-buteau/universal-battery-database/blob/55e64db74eb05cd9f0541a243bb540c0deba7d60/cycling/models.py#L409-L413
from django.db import models import base64 import numpy import pickle import datetime from django.db.models import Q, Max, Min import matplotlib.pyplot as plt import filename_database.models from io import BytesIO from Key import Key import plot_constants CHARGE = 'chg' DISCHARGE = 'dchg' POLARITIES = [(CHARGE, 'CHARGE'), (DISCHARGE, 'DISCHARGE')] def id_dict_from_id_list(id_list): n = len(id_list) id_dict = {} for i in range(n): id_dict[id_list[i]] = i return id_dict def get_files_for_cell_id(cell_id): return CyclingFile.objects.filter( database_file__deprecated = False ).filter(database_file__valid_metadata__cell_id = cell_id) def clamp(a, x, b): x = min(x, b) x = max(x, a) return x def make_voltage_grid(min_v, max_v, n_samples, my_cell_ids): if n_samples < 2: n_samples = 2 all_cycs = Cycle.objects.filter( discharge_group__cell_id__in = my_cell_ids, valid_cycle = True, ) my_max = max( all_cycs.aggregate(Max("chg_maximum_voltage"))[ "chg_maximum_voltage__max" ], all_cycs.aggregate(Max("dchg_maximum_voltage"))[ "dchg_maximum_voltage__max" ] ) my_min = min( all_cycs.aggregate(Min("chg_minimum_voltage"))[ "chg_minimum_voltage__min" ], all_cycs.aggregate(Min("dchg_minimum_voltage"))[ "dchg_minimum_voltage__min" ] ) my_max = clamp(min_v, my_max, max_v) my_min = clamp(min_v, my_min, max_v) delta = (my_max - my_min) / float(n_samples - 1) return numpy.array([my_min + delta * float(i) for i in range(n_samples)]) def make_current_grid(min_c, max_c, n_samples, my_cell_ids): if n_samples < 2: n_samples = 2 all_cycs = Cycle.objects.filter( discharge_group__cell_id__in = my_cell_ids, valid_cycle = True, ) my_max = max( abs( all_cycs.aggregate(Max("chg_maximum_current"))[ "chg_maximum_current__max" ] ), abs( all_cycs.aggregate(Max("dchg_maximum_current"))[ "dchg_maximum_current__max" ] ) ) my_min = min( abs(all_cycs.aggregate(Min("chg_minimum_current"))[ "chg_minimum_current__min" ]), abs(all_cycs.aggregate(Min("dchg_minimum_current"))[ "dchg_minimum_current__min" ]) ) my_max = clamp(min_c, my_max, max_c) my_min = clamp(min_c, my_min, max_c) my_max = current_to_log_current(my_max) my_min = current_to_log_current(my_min) delta = (my_max - my_min) / float(n_samples - 1.) return numpy.array([my_min + delta * float(i) for i in range(n_samples)]) def current_to_log_current(current): return numpy.log(abs(current) + 1e-5) def make_sign_grid(): return numpy.array([1., -1.]) def make_temperature_grid(min_t, max_t, n_samples, my_cell_ids): if n_samples < 2: n_samples = 2 my_files = CyclingFile.objects.filter( database_file__deprecated = False ).filter(database_file__valid_metadata__cell_id__in = my_cell_ids) my_max = my_files.aggregate( Max("database_file__valid_metadata__temperature") )["database_file__valid_metadata__temperature__max"] my_min = my_files.aggregate( Min("database_file__valid_metadata__temperature") )["database_file__valid_metadata__temperature__min"] my_max = clamp(min_t, my_max, max_t) if my_max < 55.: my_max = 55. my_min = clamp(min_t, my_min, max_t) if my_min > 20.: my_min = 20. delta = (my_max - my_min) / float(n_samples - 1) return numpy.array([my_min + delta * float(i) for i in range(n_samples)]) def compute_from_database( cell_id, lower_cycle = None, upper_cycle = None, valid = True, ): files_cell_id = CyclingFile.objects.filter( database_file__deprecated = False, database_file__valid_metadata__cell_id = cell_id, ).order_by("database_file__last_modified") polarity = DISCHARGE groups = {} for cycle_group in get_discharge_groups_from_cell_id(cell_id): q_curves = [] for f in files_cell_id: offset_cycle = f.database_file.valid_metadata.start_cycle filters = Q(valid_cycle = valid) & Q(cycling_file = f) if not (lower_cycle is None and upper_cycle is None): filters = filters & Q( cycle_number__range = ( lower_cycle - offset_cycle, upper_cycle - offset_cycle, ), ) if polarity == DISCHARGE: filters = Q(discharge_group = cycle_group) & filters elif polarity == CHARGE: filters = Q(charge_group = cycle_group) & filters cycles = Cycle.objects.filter(filters) if cycles.exists(): q_curves += list([ ( float(cyc.cycle_number + offset_cycle), -cyc.dchg_total_capacity, ) for cyc in cycles.order_by("cycle_number") ]) if len(q_curves) > 0: groups[( cycle_group.constant_rate, cycle_group.end_rate_prev, cycle_group.end_rate, cycle_group.end_voltage, cycle_group.end_voltage_prev, cycle_group.polarity, )] = numpy.array( q_curves, dtype = [ (Key.N, 'f4'), ("last_cc_capacity", 'f4'), ], ) return groups def make_file_legends_and_vertical( ax, cell_id, lower_cycle = None, upper_cycle = None, show_invalid = False, vertical_barriers = None, list_all_options = None, leg1 = None, ): files_cell_id = CyclingFile.objects.filter( database_file__deprecated = False, database_file__valid_metadata__cell_id = cell_id, ).order_by("database_file__last_modified") file_leg = [] if len(files_cell_id) >= 1: for f_i, f in enumerate(files_cell_id): offset_cycle = f.database_file.valid_metadata.start_cycle if show_invalid: min_cycle = offset_cycle + Cycle.objects.filter( cycling_file = f ).aggregate(Min("cycle_number"))["cycle_number__min"] max_cycle = offset_cycle + Cycle.objects.filter( cycling_file = f ).aggregate(Max("cycle_number"))["cycle_number__max"] else: min_cycle = offset_cycle + Cycle.objects.filter( cycling_file = f, valid_cycle = True, ).aggregate(Min("cycle_number"))["cycle_number__min"] max_cycle = offset_cycle + Cycle.objects.filter( cycling_file = f, valid_cycle = True, ).aggregate(Max("cycle_number"))["cycle_number__max"] if lower_cycle is not None: if min_cycle < lower_cycle: min_cycle = lower_cycle - .5 if min_cycle > upper_cycle: continue if upper_cycle is not None: if max_cycle > upper_cycle: max_cycle = upper_cycle + .5 if max_cycle < lower_cycle: continue bla = plt.axvspan( min_cycle, max_cycle, ymin = .05 * (1 + f_i), ymax = .05 * (2 + f_i), facecolor = plot_constants.COLORS[f_i], alpha = 0.1 ) file_leg.append( ( bla, "File {} Last Modif: {}-{}-{}. Size: {}KB".format( f_i, f.database_file.last_modified.year, f.database_file.last_modified.month, f.database_file.last_modified.day, int(f.database_file.filesize / 1024), ), ) ) if vertical_barriers is not None: for index_set_i in range(len(vertical_barriers) + 1): col = ["1.", ".1"][index_set_i % 2] if index_set_i == 0 and len(vertical_barriers) > 0: min_x, max_x = (lower_cycle - 0.5, vertical_barriers[0]) elif index_set_i == 0 and len(vertical_barriers) == 0: min_x, max_x = (lower_cycle - 0.5, upper_cycle + 0.5) elif index_set_i == len(vertical_barriers): min_x, max_x = (vertical_barriers[-1], upper_cycle + 0.5) else: min_x, max_x = ( vertical_barriers[index_set_i - 1], vertical_barriers[index_set_i], ) print(min_x, max_x) ax.axvspan(min_x, max_x, facecolor = col, alpha = 0.1) plt.text( 0.9 * min_x + .1 * max_x, .99 * ax.get_ylim()[0] + .01 * ax.get_ylim()[1], list_all_options[index_set_i], size = 18, ) for index_set_i in range(len(list_all_options) - 1): plt.axvline( x = vertical_barriers[index_set_i], color = "k", linestyle = "--", ) ax.tick_params( direction = "in", length = 7, width = 2, labelsize = 11, bottom = True, top = True, left = True, right = True, ) if len(file_leg) > 0: if list_all_options is None: loc = "lower left" else: loc = "upper left" ax.legend([x[0] for x in file_leg], [x[1] for x in file_leg], loc = loc) ax.add_artist(leg1) def get_byte_image(fig, dpi): buf = BytesIO() plt.savefig(buf, format = "png", dpi = dpi) image_base64 = base64.b64encode( buf.getvalue() ).decode("utf-8").replace("\n", "") buf.close() plt.close(fig) return image_base64 def get_discharge_groups_from_cell_id(cell_id): return list( CycleGroup.objects.filter( cell_id = cell_id, polarity = DISCHARGE, ).order_by("constant_rate") ) class CyclingFile(models.Model): database_file = models.OneToOneField( filename_database.models.DatabaseFile, on_delete = models.CASCADE, ) import_time = models.DateTimeField(default = datetime.datetime(1970, 1, 1)) process_time = models.DateTimeField(default = datetime.datetime(1970, 1, 1)) def get_cycles_array(self, fil = Q()): return numpy.array( [ ( cyc.id, cyc.cycle_number, cyc.chg_total_capacity, cyc.chg_average_voltage, cyc.chg_minimum_voltage, cyc.chg_maximum_voltage, cyc.chg_average_current_by_capacity, cyc.chg_average_current_by_voltage, cyc.chg_minimum_current, cyc.chg_maximum_current, cyc.chg_duration, cyc.dchg_total_capacity, cyc.dchg_average_voltage, cyc.dchg_minimum_voltage, cyc.dchg_maximum_voltage, cyc.dchg_average_current_by_capacity, cyc.dchg_average_current_by_voltage, cyc.dchg_minimum_current, cyc.dchg_maximum_current, cyc.dchg_duration, ) for cyc in self.cycle_set.filter(fil).order_by("cycle_number") ], dtype = [ ("id", int), ("cycle_number", int), ("chg_total_capacity", float), ("chg_average_voltage", float), ("chg_minimum_voltage", float), ("chg_maximum_voltage", float), ("chg_average_current_by_capacity", float), ("chg_average_current_by_voltage", float), ("chg_minimum_current", float), ("chg_maximum_current", float), ("chg_duration", float), ("dchg_total_capacity", float), ("dchg_average_voltage", float), ("dchg_minimum_voltage", float), ("dchg_maximum_voltage", float), ("dchg_average_current_by_capacity", float), ("dchg_average_current_by_voltage", float), ("dchg_minimum_current", float), ("dchg_maximum_current", float), ("dchg_duration", float), ] ) class CycleGroup(models.Model): cell_id = models.IntegerField() constant_rate = models.FloatField() end_rate = models.FloatField() end_rate_prev = models.FloatField() end_voltage = models.FloatField() end_voltage_prev = models.FloatField() polarity = models.CharField( max_length = 4, choices = POLARITIES, blank = True, ) class Cycle(models.Model): cycling_file = models.ForeignKey(CyclingFile, on_delete = models.CASCADE) cycle_number = models.IntegerField() def get_offset_cycle(self): return self.cycle_number + float( self.cycling_file.database_file.valid_metadata.start_cycle )
Apache License 2.0
pantsbuild/pex
pex/third_party/__init__.py
install
python
def install(root=None, expose=None): VendorImporter.install_vendored(prefix=import_prefix(), root=root, expose=expose)
Installs the default :class:`VendorImporter` for PEX vendored code. Any distributions listed in ``expose`` will also be exposed for direct import; ie: ``install(expose=['setuptools'])`` would make both ``setuptools`` and ``wheel`` available for import via ``from pex.third_party import setuptools, wheel``, but only ``setuptools`` could be directly imported via ``import setuptools``. NB: Even when exposed, vendored code is not the same as the same un-vendored code and will properly fail type-tests against un-vendored types. For example, in an interpreter that has ``setuptools`` installed in its site-packages: >>> from pkg_resources import Requirement >>> orig_req = Requirement.parse('wheel==0.31.1') >>> from pex import third_party >>> third_party.install(expose=['setuptools']) >>> import sys >>> sys.modules.pop('pkg_resources') <module 'pkg_resources' from '/home/jsirois/dev/pantsbuild/jsirois-pex/.tox/py27-repl/lib/python2.7/site-packages/pkg_resources/__init__.pyc'> # noqa >>> from pkg_resources import Requirement >>> new_req = Requirement.parse('wheel==0.31.1') >>> new_req == orig_req False >>> new_req == Requirement.parse('wheel==0.31.1') True >>> type(orig_req) <class 'pkg_resources.Requirement'> >>> type(new_req) <class 'pex.vendor._vendored.setuptools.pkg_resources.Requirement'> >>> from pex.third_party.pkg_resources import Requirement as PrefixedRequirement >>> new_req == PrefixedRequirement.parse('wheel==0.31.1') True >>> sys.modules.pop('pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> sys.modules.pop('pex.third_party.pkg_resources') <module 'pex.vendor._vendored.setuptools.pkg_resources' from 'pex/vendor/_vendored/setuptools/pkg_resources/__init__.pyc'> # noqa >>> :param expose: A list of vendored distribution names to expose directly on the ``sys.path``. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found.
https://github.com/pantsbuild/pex/blob/a2eb72e4f627ce0630cd120b5411b2fda7974ce9/pex/third_party/__init__.py#L423-L465
from __future__ import absolute_import import contextlib import importlib import os import re import shutil import sys import zipfile from collections import OrderedDict, namedtuple from contextlib import closing def _tracer(): from pex.tracer import TRACER return TRACER class _Loader(namedtuple("_Loader", ["module_name", "vendor_module_name"])): def load_module(self, fullname): assert fullname in ( self.module_name, self.vendor_module_name, ), "{} got an unexpected module {}".format(self, fullname) vendored_module = importlib.import_module(self.vendor_module_name) sys.modules[fullname] = vendored_module _tracer().log("{} imported via {}".format(fullname, self), V=9) return vendored_module def unload(self): for mod in (self.module_name, self.vendor_module_name): if mod in sys.modules: sys.modules.pop(mod) _tracer().log("un-imported {}".format(mod), V=9) submod_prefix = mod + "." for submod in sorted(m for m in sys.modules.keys() if m.startswith(submod_prefix)): sys.modules.pop(submod) _tracer().log("un-imported {}".format(submod), V=9) class _Importable(namedtuple("_Importable", ["module", "is_pkg", "path", "prefix"])): _exposed = False def expose(self): self._exposed = True importlib.import_module(self.module) _tracer().log("Exposed {}".format(self), V=3) def loader_for(self, fullname): if fullname.startswith(self.prefix + "."): target = fullname[len(self.prefix + ".") :] else: if not self._exposed: return None target = fullname if target == self.module or self.is_pkg and target.startswith(self.module + "."): vendor_path = os.path.join(self.path, *target.split(".")) vendor_module_name = vendor_path.replace(os.sep, ".") return _Loader(fullname, vendor_module_name) class _DirIterator(namedtuple("_DirIterator", ["rootdir"])): def iter_root_modules(self, relpath): for entry in self._iter_root(relpath): if os.path.isfile(entry): name, ext = os.path.splitext(os.path.basename(entry)) if ext == ".py" and name != "__init__": yield name def iter_root_packages(self, relpath): for entry in self._iter_root(relpath): if os.path.isfile(os.path.join(entry, "__init__.py")): yield os.path.basename(entry) def _iter_root(self, relpath): root = os.path.join(self.rootdir, relpath) if not os.path.isdir(root): return for entry in os.listdir(root): yield os.path.join(root, entry) class _ZipIterator(namedtuple("_ZipIterator", ["zipfile_path", "prefix"])): @classmethod def containing(cls, root): prefix = "" path = root while path: if zipfile.is_zipfile(path): return cls(zipfile_path=path, prefix="{}/".format(prefix) if prefix else "") path_base = os.path.basename(path) prefix = "{}/{}".format(path_base, prefix) if prefix else path_base path = os.path.dirname(path) raise ValueError("Could not find the zip file housing {}".format(root)) def iter_root_modules(self, relpath): for package in self._filter_names(relpath, r"(?P<module>[^/]+)\.py", "module"): if package != "__init__": yield package def iter_root_packages(self, relpath): for package in self._filter_names(relpath, r"(?P<package>[^/]+)/__init__\.py", "package"): yield package def _filter_names(self, relpath, pattern, group): relpath_pat = "" if not relpath else "{}/".format(relpath.replace(os.sep, "/")) pat = re.compile(r"^{}{}{}$".format(self.prefix, relpath_pat, pattern)) with contextlib.closing(zipfile.ZipFile(self.zipfile_path)) as zf: for name in zf.namelist(): match = pat.match(name) if match: yield match.group(group) class VendorImporter(object): @staticmethod def _abs_root(root=None): from pex import vendor return os.path.abspath(root or vendor.VendorSpec.ROOT) @classmethod def _iter_importables(cls, root, path_items, prefix): module_iterator = ( _DirIterator(root) if os.path.isdir(root) else _ZipIterator.containing(root) ) for path_item in path_items: for module_name in module_iterator.iter_root_modules(path_item): yield _Importable(module=module_name, is_pkg=False, path=path_item, prefix=prefix) for package_name in module_iterator.iter_root_packages(path_item): yield _Importable(module=package_name, is_pkg=True, path=path_item, prefix=prefix) @classmethod def _iter_all_installed_vendor_importers(cls): for importer in sys.meta_path: if isinstance(importer, cls): yield importer @classmethod def _iter_installed_vendor_importers(cls, prefix, root, path_items): for importer in cls._iter_all_installed_vendor_importers(): if importer._importables and importer._importables[0].prefix == prefix: if importer._root == root: if {importable.path for importable in importer._importables} == set(path_items): yield importer @classmethod def install_vendored(cls, prefix, root=None, expose=None): from pex import vendor root = cls._abs_root(root) vendored_path_items = [spec.relpath for spec in vendor.iter_vendor_specs()] installed = list(cls._iter_installed_vendor_importers(prefix, root, vendored_path_items)) assert ( len(installed) <= 1 ), "Unexpected extra importers installed for vendored code:\n\t{}".format( "\n\t".join(map(str, installed)) ) if installed: vendor_importer = installed[0] else: vendor_importer = cls.install( uninstallable=True, prefix=prefix, path_items=vendored_path_items, root=root ) if expose: exposed_paths = [] for path in cls.expose(expose, root): sys.path.insert(0, path) exposed_paths.append(os.path.relpath(path, root)) vendor_importer._expose(exposed_paths) @classmethod def expose(cls, dists, root=None): from pex import vendor root = cls._abs_root(root) def iter_available(): yield "pex", root for spec in vendor.iter_vendor_specs(): yield spec.key, spec.relpath path_by_key = OrderedDict( (key, relpath) for key, relpath in iter_available() if key in dists ) unexposed = set(dists) - set(path_by_key.keys()) if unexposed: raise ValueError( "The following vendored dists are not available to expose: {}".format( ", ".join(sorted(unexposed)) ) ) exposed_paths = path_by_key.values() for exposed_path in exposed_paths: yield os.path.join(root, exposed_path) @classmethod def install(cls, uninstallable, prefix, path_items, root=None): root = cls._abs_root(root) importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix)) vendor_importer = cls(root=root, importables=importables, uninstallable=uninstallable) sys.meta_path.insert(0, vendor_importer) _tracer().log("Installed {}".format(vendor_importer), V=3) return vendor_importer @classmethod def uninstall_all(cls): for vendor_importer in cls._iter_all_installed_vendor_importers(): vendor_importer.uninstall() def __init__(self, root, importables, uninstallable=True): self._root = root self._importables = importables self._uninstallable = uninstallable self._loaders = [] def uninstall(self): if not self._uninstallable: _tracer().log("Not uninstalling {}".format(self), V=9) return if self in sys.meta_path: sys.meta_path.remove(self) maybe_exposed = frozenset( os.path.join(self._root, importable.path) for importable in self._importables ) sys.path[:] = [path_item for path_item in sys.path if path_item not in maybe_exposed] for loader in self._loaders: loader.unload() _tracer().log("Uninstalled {}".format(self), V=3) def find_module(self, fullname, path=None): for importable in self._importables: loader = importable.loader_for(fullname) if loader is not None: self._loaders.append(loader) return loader return None def _expose(self, paths): for importable in self._importables: if importable.path in paths: importable.expose() def __repr__(self): return "{classname}(root={root!r}, importables={importables!r})".format( classname=self.__class__.__name__, root=self._root, importables=self._importables ) class IsolationResult(namedtuple("IsolatedPex", ["pex_hash", "chroot_path"])): _ISOLATED = None def isolated(): global _ISOLATED if _ISOLATED is None: from pex import vendor from pex.common import atomic_directory, is_pyc_temporary_file from pex.util import CacheHelper from pex.variables import ENV from pex.third_party.pkg_resources import resource_isdir, resource_listdir, resource_stream module = "pex" vendor_lockfiles = tuple( os.path.join(os.path.relpath(vendor_spec.relpath, module), "constraints.txt") for vendor_spec in vendor.iter_vendor_specs() ) def recursive_copy(srcdir, dstdir): os.mkdir(dstdir) for entry_name in resource_listdir(module, srcdir): if not entry_name: continue src_entry = "{}/{}".format(srcdir, entry_name) if srcdir else entry_name dst_entry = os.path.join(dstdir, entry_name) if resource_isdir(module, src_entry): if os.path.basename(src_entry) == "__pycache__": continue recursive_copy(src_entry, dst_entry) elif ( not entry_name.endswith(".pyc") and not is_pyc_temporary_file(entry_name) and src_entry not in vendor_lockfiles ): with open(dst_entry, "wb") as fp: with closing(resource_stream(module, src_entry)) as resource: shutil.copyfileobj(resource, fp) pex_path = os.path.join(vendor.VendorSpec.ROOT, "pex") with _tracer().timed("Hashing pex"): assert os.path.isdir(pex_path), ( "Expected the `pex` module to be available via an installed distribution or " "else via an installed or loose PEX. Loaded the `pex` module from {} and argv0 is " "{}.".format(pex_path, sys.argv[0]) ) dir_hash = CacheHelper.dir_hash(pex_path) isolated_dir = os.path.join(ENV.PEX_ROOT, "isolated", dir_hash) with _tracer().timed("Isolating pex"): with atomic_directory(isolated_dir, exclusive=True) as chroot: if not chroot.is_finalized: with _tracer().timed("Extracting pex to {}".format(isolated_dir)): recursive_copy("", os.path.join(chroot.work_dir, "pex")) _ISOLATED = IsolationResult(pex_hash=dir_hash, chroot_path=isolated_dir) return _ISOLATED def uninstall(): VendorImporter.uninstall_all() def import_prefix(): return __name__
Apache License 2.0
ali5h/rules_pip
third_party/py/click/decorators.py
password_option
python
def password_option(*param_decls, **attrs): def decorator(f): attrs.setdefault("prompt", True) attrs.setdefault("confirmation_prompt", True) attrs.setdefault("hide_input", True) return option(*(param_decls or ("--password",)), **attrs)(f) return decorator
Shortcut for password prompts. This is equivalent to decorating a function with :func:`option` with the following parameters:: @click.command() @click.option('--password', prompt=True, confirmation_prompt=True, hide_input=True) def changeadmin(password): pass
https://github.com/ali5h/rules_pip/blob/fb02cb7bf5c03bc8cd4269679e4aea2e1839b501/third_party/py/click/decorators.py#L231-L250
import inspect import sys from functools import update_wrapper from ._compat import iteritems from ._unicodefun import _check_for_unicode_literals from .core import Argument from .core import Command from .core import Group from .core import Option from .globals import get_current_context from .utils import echo def pass_context(f): def new_func(*args, **kwargs): return f(get_current_context(), *args, **kwargs) return update_wrapper(new_func, f) def pass_obj(f): def new_func(*args, **kwargs): return f(get_current_context().obj, *args, **kwargs) return update_wrapper(new_func, f) def make_pass_decorator(object_type, ensure=False): def decorator(f): def new_func(*args, **kwargs): ctx = get_current_context() if ensure: obj = ctx.ensure_object(object_type) else: obj = ctx.find_object(object_type) if obj is None: raise RuntimeError( "Managed to invoke callback without a context" " object of type '{}' existing".format(object_type.__name__) ) return ctx.invoke(f, obj, *args, **kwargs) return update_wrapper(new_func, f) return decorator def _make_command(f, name, attrs, cls): if isinstance(f, Command): raise TypeError("Attempted to convert a callback into a command twice.") try: params = f.__click_params__ params.reverse() del f.__click_params__ except AttributeError: params = [] help = attrs.get("help") if help is None: help = inspect.getdoc(f) if isinstance(help, bytes): help = help.decode("utf-8") else: help = inspect.cleandoc(help) attrs["help"] = help _check_for_unicode_literals() return cls( name=name or f.__name__.lower().replace("_", "-"), callback=f, params=params, **attrs ) def command(name=None, cls=None, **attrs): if cls is None: cls = Command def decorator(f): cmd = _make_command(f, name, attrs, cls) cmd.__doc__ = f.__doc__ return cmd return decorator def group(name=None, **attrs): attrs.setdefault("cls", Group) return command(name, **attrs) def _param_memo(f, param): if isinstance(f, Command): f.params.append(param) else: if not hasattr(f, "__click_params__"): f.__click_params__ = [] f.__click_params__.append(param) def argument(*param_decls, **attrs): def decorator(f): ArgumentClass = attrs.pop("cls", Argument) _param_memo(f, ArgumentClass(param_decls, **attrs)) return f return decorator def option(*param_decls, **attrs): def decorator(f): option_attrs = attrs.copy() if "help" in option_attrs: option_attrs["help"] = inspect.cleandoc(option_attrs["help"]) OptionClass = option_attrs.pop("cls", Option) _param_memo(f, OptionClass(param_decls, **option_attrs)) return f return decorator def confirmation_option(*param_decls, **attrs): def decorator(f): def callback(ctx, param, value): if not value: ctx.abort() attrs.setdefault("is_flag", True) attrs.setdefault("callback", callback) attrs.setdefault("expose_value", False) attrs.setdefault("prompt", "Do you want to continue?") attrs.setdefault("help", "Confirm the action without prompting.") return option(*(param_decls or ("--yes",)), **attrs)(f) return decorator
MIT License
suomichain/suomi-core
consensus/fastpow/core/suomi_poet/poet_consensus/poet_settings_view.py
PoetSettingsView.signup_commit_maximum_delay
python
def signup_commit_maximum_delay(self): if self._signup_commit_maximum_delay is None: self._signup_commit_maximum_delay = self._get_config_setting( name='suomi.poet.signup_commit_maximum_delay', value_type=int, default_value=PoetSettingsView. _SIGNUP_COMMIT_MAXIMUM_DELAY_, validate_function=lambda value: value >= 0) return self._signup_commit_maximum_delay
Return the signup commit maximum delay if config setting exists and is valid, otherwise return the default. The signup commit maximum delay is the maximum allowed number of blocks between the head of the block chain when the signup information was created and subsequent validator registry transaction was submitted and when said transaction was committed to the blockchain. For example, if the signup commit maximum delay is one and the signup information's containing validator registry transaction was created/submitted when the blockchain head was block number 100, then the validator registry transaction must have been committed either in block 101 (i.e., zero blocks between 100 and 101) or block 102 (i.e., one block between 100 and 102).
https://github.com/suomichain/suomi-core/blob/dada0499ddc2b4b4a5d9a975de5af63b87ded9d2/consensus/fastpow/core/suomi_poet/poet_consensus/poet_settings_view.py#L230-L254
import math import logging from suomi_validator.state.settings_view import SettingsView LOGGER = logging.getLogger(__name__) class PoetSettingsView(object): _BLOCK_CLAIM_DELAY_ = 1 _ENCLAVE_MODULE_NAME_ = 'suomi_poet_simulator.poet_enclave_simulator.poet_enclave_simulator' _INITIAL_WAIT_TIME_ = 3000.0 _KEY_BLOCK_CLAIM_LIMIT_ = 250 _POPULATION_ESTIMATE_SAMPLE_SIZE_ = 50 _REGISTRATION_RETRY_DELAY_ = 10 _SIGNUP_COMMIT_MAXIMUM_DELAY_ = 10 _TARGET_WAIT_TIME_ = 20.0 _ZTEST_MAXIMUM_WIN_DEVIATION_ = 3.075 _ZTEST_MINIMUM_WIN_COUNT_ = 3 def __init__(self, state_view): self._settings_view = SettingsView(state_view) self._block_claim_delay = None self._enclave_module_name = None self._initial_wait_time = None self._key_block_claim_limit = None self._population_estimate_sample_size = None self._registration_retry_delay = None self._signup_commit_maximum_delay = None self._target_wait_time = None self._ztest_maximum_win_deviation = None self._ztest_minimum_win_count = None def _get_config_setting(self, name, value_type, default_value, validate_function=None): try: value = self._settings_view.get_setting( key=name, default_value=default_value, value_type=value_type) if validate_function is not None: if not validate_function(value): raise ValueError( 'Value ({}) for {} is not valid'.format( value, name)) except ValueError: value = default_value return value @property def block_claim_delay(self): if self._block_claim_delay is None: self._block_claim_delay = self._get_config_setting( name='suomi.poet.block_claim_delay', value_type=int, default_value=PoetSettingsView._BLOCK_CLAIM_DELAY_, validate_function=lambda value: value >= 0) return self._block_claim_delay @property def enclave_module_name(self): if self._enclave_module_name is None: self._enclave_module_name = self._get_config_setting( name='suomi.poet.enclave_module_name', value_type=str, default_value=PoetSettingsView._ENCLAVE_MODULE_NAME_, validate_function=lambda value: value) return self._enclave_module_name @property def initial_wait_time(self): if self._initial_wait_time is None: self._initial_wait_time = self._get_config_setting( name='suomi.poet.initial_wait_time', value_type=float, default_value=PoetSettingsView._INITIAL_WAIT_TIME_, validate_function=lambda value: math.isfinite(value) and value >= 0) return self._initial_wait_time @property def key_block_claim_limit(self): if self._key_block_claim_limit is None: self._key_block_claim_limit = self._get_config_setting( name='suomi.poet.key_block_claim_limit', value_type=int, default_value=PoetSettingsView._KEY_BLOCK_CLAIM_LIMIT_, validate_function=lambda value: value > 0) return self._key_block_claim_limit @property def population_estimate_sample_size(self): if self._population_estimate_sample_size is None: self._population_estimate_sample_size = self._get_config_setting( name='suomi.poet.population_estimate_sample_size', value_type=int, default_value=PoetSettingsView. _POPULATION_ESTIMATE_SAMPLE_SIZE_, validate_function=lambda value: value > 0) return self._population_estimate_sample_size @property def registration_retry_delay(self): if self._registration_retry_delay is None: self._registration_retry_delay = self._get_config_setting( name='suomi.poet._registration_retry_delay', value_type=int, default_value=PoetSettingsView. _REGISTRATION_RETRY_DELAY_, validate_function=lambda value: value > 1) return self._registration_retry_delay @property
Apache License 2.0
morganstanley/testplan
testplan/parser.py
TestplanParser.process_args
python
def process_args(self, namespace): args = dict(**vars(namespace)) filter_args = filtering.parse_filter_args( parsed_args=args, arg_names=("tags", "tags_all", "patterns") ) if filter_args: args["test_filter"] = filter_args if args.get("shuffle"): args["test_sorter"] = ordering.ShuffleSorter( seed=args["shuffle_seed"], shuffle_type=args["shuffle"] ) if args["verbose"] or args["debug"]: args["stdout_style"] = styles.Style( styles.StyleEnum.ASSERTION_DETAIL, styles.StyleEnum.ASSERTION_DETAIL, ) if args["debug"]: args["logger_level"] = logger.DEBUG elif args["verbose"]: args["logger_level"] = logger.INFO if args["list"] and not args["test_lister"]: args["test_lister"] = listing.NameLister() return args
Override this method to add extra argument processing logic. Can be used for interdependent argument processing. Testplan will use the result dictionary to initialize the configuration.
https://github.com/morganstanley/testplan/blob/8cb6a0ed0682698b2d6af82382fbb66d8d9e3ff7/testplan/parser.py#L330-L371
import argparse import copy import sys from testplan.common.utils import logger from testplan import defaults from testplan.report.testing import styles, ReportTagsAction from testplan.testing import listing, filtering, ordering class HelpParser(argparse.ArgumentParser): def error(self, message): error_header = "=" * 30 + " ERROR " + "=" * 30 error_ctx = [ "\n", error_header, "\n", "\n", message, "\n", "=" * len(error_header), "\n", ] self.print_help() sys.stderr.writelines(error_ctx) sys.exit(2) class TestplanParser(object): def __init__(self, name, default_options): self.cmd_line = copy.copy(sys.argv) self.name = name self._default_options = default_options def add_arguments(self, parser): pass def generate_parser(self): epilog = "" parser = HelpParser( "Test Plan ({})".format(self.name), epilog, formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument( "--list", action="store_true", default=False, help="Shortcut for `--info name`.", ) parser.add_argument( "--info", dest="test_lister", metavar="TEST_INFO", **listing.listing_registry.to_arg().get_parser_context( default=self._default_options["test_lister"] ) ) general_group = parser.add_argument_group("General") general_group.add_argument( "--runpath", type=str, metavar="PATH", default=self._default_options["runpath"], help="Path under which all temp files and logs will be created.", ) general_group.add_argument( "--timeout", metavar="TIMEOUT", default=self._default_options["timeout"], type=int, help="Timeout value in seconds to kill Testplan and all child " "processes, default to 14400s(4h), set to 0 to disable.", ) general_group.add_argument( "-i", "--interactive", dest="interactive_port", nargs="?", default=self._default_options["interactive_port"], const=defaults.WEB_SERVER_PORT, type=int, help="Enable interactive mode. A port may be specified, otherwise " "the port defaults to {}.".format(defaults.WEB_SERVER_PORT), ) filter_group = parser.add_argument_group("Filtering") filter_group.add_argument( "--patterns", action=filtering.PatternAction, default=[], nargs="+", metavar="TEST_FILTER", type=str, help="""\ Test filter, supports glob notation & multiple arguments. --pattern <Multitest Name> --pattern <Multitest Name 1> <Multitest Name 2> --pattern <Multitest Name 1> --pattern <Multitest Name 2> --pattern <Multitest Name>:<Suite Name> --pattern <Multitest Name>:<Suite Name>:<Testcase name> --pattern <Multitest Name>:*:<Testcase name> --pattern *:<Suite Name>:<Testcase name>""", ) filter_group.add_argument( "--tags", action=filtering.TagsAction, default=[], nargs="+", metavar="TEST_FILTER", help="""\ Test filter, runs tests that match ANY of the given tags. --tags <tag_name_1> --tags <tag_name 2> --tags <tag_name_1> <tag_category_1>=<tag_name_2>""", ) filter_group.add_argument( "--tags-all", action=filtering.TagsAllAction, default=[], nargs="+", metavar="TEST_FILTER", help="""\ Test filter, runs tests that match ALL of the given tags. --tags-all <tag_name_1> --tags <tag_name 2> --tags-all <tag_name_1> <tag_category_1>=<tag_name_2>""", ) ordering_group = parser.add_argument_group("Ordering") ordering_group.add_argument( "--shuffle", nargs="+", type=str, default=self._default_options["shuffle"], choices=[enm.value for enm in ordering.SortType], help="Shuffle execution order", ) ordering_group.add_argument( "--shuffle-seed", metavar="SEED", type=float, default=self._default_options["shuffle_seed"], help="Seed shuffle with a specific value, useful to " "reproduce a particular order.", ) report_group = parser.add_argument_group("Reporting") report_group.add_argument( "--stdout-style", **styles.StyleArg.get_parser_context( default=self._default_options["stdout_style"] ) ) report_group.add_argument( "--pdf", dest="pdf_path", default=self._default_options["pdf_path"], metavar="PATH", help="Path for PDF report.", ) report_group.add_argument( "--json", dest="json_path", default=self._default_options["json_path"], metavar="PATH", help="Path for JSON report.", ) report_group.add_argument( "--xml", dest="xml_dir", default=self._default_options["xml_dir"], metavar="DIRECTORY", help="Directory path for XML reports.", ) report_group.add_argument( "--http", dest="http_url", default=self._default_options["http_url"], metavar="URL", help="Web URL for posting report.", ) report_group.add_argument( "--report-dir", default=self._default_options["report_dir"], metavar="PATH", help="Target directory for tag filtered report output.", ) report_group.add_argument( "--pdf-style", **styles.StyleArg.get_parser_context( default=self._default_options["pdf_style"] ) ) report_group.add_argument( "-v", "--verbose", action="store_true", default=self._default_options["verbose"], help="Enable verbose mode that will also set the stdout-style " 'option to "detailed".', ) report_group.add_argument( "-d", "--debug", action="store_true", default=self._default_options["debug"], help="Enable debug mode.", ) report_group.add_argument( "-b", "--browse", action="store_true", default=self._default_options["browse"], help="Automatically open report to browse. Must be specified " 'with "--ui" to open it locally, or upload it to a web server ' "with a customized exporter which has a `report_url`, or there " "will be nothing to open.", ) report_group.add_argument( "-u", "--ui", dest="ui_port", nargs="?", default=self._default_options["ui_port"], const=defaults.WEB_SERVER_PORT, type=int, help="Start the web server to view the Testplan UI. A port can be " "specified, otherwise defaults to {}. A JSON report will be " "saved locally.".format(self._default_options["ui_port"]), ) report_group.add_argument( "--report-tags", nargs="+", action=ReportTagsAction, default=self._default_options["report_tags"], metavar="REPORT_FILTER", help="""\ Report filter, generates a separate report (PDF by default) that match ANY of the given tags. --report-tags <tag_name_1> --report-tags <tag_name 2> --report-tags <tag_name_1> <tag_category_1>=<tag_name_2>""", ) report_group.add_argument( "--report-tags-all", nargs="+", action=ReportTagsAction, default=self._default_options["report_tags_all"], metavar="REPORT_FILTER", help="""\ Report filter, generates a separate report (PDF by default) that match ALL of the given tags. --report-tags-all <tag_name_1> --report-tags-all <tag_name 2> --report-tags-all <tag_name_1> <tag_category_1>=<tag_name_2>""", ) report_group.add_argument( "--file-log-level", choices=LogLevelAction.LEVELS.keys(), default=self._default_options["file_log_level"], action=LogLevelAction, help="Specify log level for file logs. Set to None to disable " "file logging.", ) report_group.add_argument( "--label", default=None, help="Label the test report with the given name, " 'useful to categorize or classify similar reports (aka "run-id").', ) self.add_arguments(parser) return parser def parse_args(self): return self.generate_parser().parse_args()
Apache License 2.0
theislab/scvelo
scvelo/core/_arithmetic.py
prod_sum
python
def prod_sum( a1: Union[ndarray, spmatrix], a2: Union[ndarray, spmatrix], axis: Optional[int] ) -> ndarray: if issparse(a1): return a1.multiply(a2).sum(axis=axis).A1 elif axis == 0: return np.einsum("ij, ij -> j", a1, a2) if a1.ndim > 1 else (a1 * a2).sum() elif axis == 1: return np.einsum("ij, ij -> i", a1, a2) if a1.ndim > 1 else (a1 * a2).sum()
Take sum of product of two arrays along given axis. Arguments --------- a1 First array. a2 Second array. axis Axis along which to sum elements. If `None`, all elements will be summed. Defaults to `None`. Returns ------- ndarray Sum of product of arrays along given axis.
https://github.com/theislab/scvelo/blob/1805ab4a72d3f34496f0ef246500a159f619d3a2/scvelo/core/_arithmetic.py#L52-L78
import warnings from typing import Optional, Union import numpy as np from numpy import ndarray from scipy.sparse import issparse, spmatrix def clipped_log(x: ndarray, lb: float = 0, ub: float = 1, eps: float = 1e-6) -> ndarray: return np.log(np.clip(x, lb + eps, ub - eps)) def invert(x: ndarray) -> ndarray: with warnings.catch_warnings(): warnings.simplefilter("ignore") x_inv = 1 / x * (x != 0) return x_inv
BSD 3-Clause New or Revised License
unofficial-memsource/memsource-cli-client
memsource_cli/models/workflow_step_reference.py
WorkflowStepReference.id
python
def id(self): return self._id
Gets the id of this WorkflowStepReference. # noqa: E501 :return: The id of this WorkflowStepReference. # noqa: E501 :rtype: str
https://github.com/unofficial-memsource/memsource-cli-client/blob/a6639506b74e95476da87f4375953448b76ea90c/memsource_cli/models/workflow_step_reference.py#L82-L89
import pprint import re import six class WorkflowStepReference(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'id': 'str', 'order': 'int' } attribute_map = { 'name': 'name', 'id': 'id', 'order': 'order' } def __init__(self, name=None, id=None, order=None): self._name = None self._id = None self._order = None self.discriminator = None if name is not None: self.name = name if id is not None: self.id = id if order is not None: self.order = order @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property
Apache License 2.0
lvapeab/nmt-keras
demo-web/sample_server.py
NMTSampler.generate_sample
python
def generate_sample(self, source_sentence, validated_prefix=None, max_N=5, isle_indices=None, filtered_idx2word=None, unk_indices=None, unk_words=None): logger.log(2, 'Beam size: %d' % (self.params_prediction['beam_size'])) generate_sample_start_time = time.time() if unk_indices is None: unk_indices = [] if unk_words is None: unk_words = [] tokenization_start_time = time.time() tokenized_input = self.general_tokenize_f(source_sentence, escape=False) tokenized_input = self.model_tokenize_f(tokenized_input) tokenization_end_time = time.time() logger.log(2, 'tokenization time: %.6f' % (tokenization_end_time - tokenization_start_time)) parse_input_start_time = time.time() src_seq = self.dataset.loadText([tokenized_input], vocabularies=self.dataset.vocabulary[self.params['INPUTS_IDS_DATASET'][0]], max_len=self.params['MAX_INPUT_TEXT_LEN'], offset=0, fill=self.dataset.fill_text[self.params['INPUTS_IDS_DATASET'][0]], pad_on_batch=self.dataset.pad_on_batch[self.params['INPUTS_IDS_DATASET'][0]], words_so_far=False, loading_X=True)[0][0] parse_input_end_time = time.time() logger.log(2, 'parse_input time: %.6f' % (parse_input_end_time - parse_input_start_time)) fixed_words_user = OrderedDict() unk_words_dict = OrderedDict() if validated_prefix is not None: next_correction = validated_prefix[-1] if next_correction == self.eos_symbol: return validated_prefix[:-1].decode('utf-8') prefix_tokenization_start_time = time.time() tokenized_validated_prefix = self.general_tokenize_f(validated_prefix, escape=False) tokenized_validated_prefix = self.model_tokenize_f(tokenized_validated_prefix) prefix_tokenization_end_time = time.time() logger.log(2, 'prefix_tokenization time: %.6f' % (prefix_tokenization_end_time - prefix_tokenization_start_time)) word_validation_start_time = time.time() for pos, word in enumerate(tokenized_validated_prefix.split()): fixed_words_user[pos] = self.word2index_y.get(word, self.unk_id) if self.word2index_y.get(word) is None: unk_words_dict[pos] = word word_validation_end_time = time.time() logger.log(2, 'word_validation time: %.6f' % (word_validation_end_time - word_validation_start_time)) constrain_search_start_time = time.time() last_user_word_pos = list(fixed_words_user.keys())[-1] if next_correction != u' ': last_user_word = tokenized_validated_prefix.split()[-1] filtered_idx2word = dict((self.word2index_y[candidate_word], candidate_word) for candidate_word in self.word2index_y if candidate_word[:len(last_user_word)] == last_user_word) if filtered_idx2word != dict(): del fixed_words_user[last_user_word_pos] if last_user_word_pos in list(unk_words_dict.keys()): del unk_words_dict[last_user_word_pos] else: filtered_idx2word = dict() constrain_search_end_time = time.time() logger.log(2, 'constrain_search_end_time time: %.6f' % (constrain_search_end_time - constrain_search_start_time)) sample_beam_search_start_time = time.time() trans_indices, costs, alphas = self.interactive_beam_searcher.sample_beam_search_interactive(src_seq, fixed_words=copy.copy(fixed_words_user), max_N=max_N, isles=isle_indices, valid_next_words=filtered_idx2word, idx2word=self.index2word_y) sample_beam_search_end_time = time.time() logger.log(2, 'sample_beam_search time: %.6f' % (sample_beam_search_end_time - sample_beam_search_start_time)) if self.params_prediction['pos_unk']: alphas = [alphas] sources = [tokenized_input] heuristic = self.params_prediction['heuristic'] else: alphas = None heuristic = None sources = None decoding_predictions_start_time = time.time() hypothesis = decode_predictions_beam_search([trans_indices], self.index2word_y, alphas=alphas, x_text=sources, heuristic=heuristic, mapping=self.mapping, pad_sequences=True, verbose=0)[0] decoding_predictions_end_time = time.time() logger.log(2, 'decoding_predictions time: %.6f' % (decoding_predictions_end_time - decoding_predictions_start_time)) unk_management_start_time = time.time() unk_indices = list(unk_words_dict) unk_words = list(unk_words_dict.values()) if len(unk_indices) > 0: hypothesis = hypothesis.split() if len(hypothesis) < len(unk_indices): for i, index in enumerate(range(0, len(hypothesis))): hypothesis[index] = unk_words[unk_indices[i]] for ii in range(i + 1, len(unk_words)): hypothesis.append(unk_words[ii]) else: for i, index in enumerate(unk_indices): if index < len(hypothesis): hypothesis[index] = unk_words[i] else: hypothesis.append(unk_words[i]) hypothesis = u' '.join(hypothesis) unk_management_end_time = time.time() logger.log(2, 'unk_management time: %.6f' % (unk_management_end_time - unk_management_start_time)) hypothesis_detokenization_start_time = time.time() hypothesis = self.model_detokenize_f(hypothesis) hypothesis = self.general_detokenize_f(hypothesis, unescape=False) hypothesis_detokenization_end_time = time.time() logger.log(2, 'hypothesis_detokenization time: %.6f' % (hypothesis_detokenization_end_time - hypothesis_detokenization_start_time)) generate_sample_end_time = time.time() logger.log(2, 'generate_sample time: %.6f' % (generate_sample_end_time - generate_sample_start_time)) return hypothesis
Generate sample via constrained search. Options labeled with <<isles>> are untested and likely require some modifications to correctly work. :param source_sentence: Source sentence. :param validated_prefix: Prefix to keep in the output. :param max_N: Maximum number of words to generate between validated segments. <<isles>> :param isle_indices: Indices of the validated segments. <<isles>> :param filtered_idx2word: List of candidate words to be the next one to generate (after generating fixed_words). :param unk_indices: Positions of the unknown words. :param unk_words: Unknown words. :return:
https://github.com/lvapeab/nmt-keras/blob/865613dd5d23eb87674b602e690f1df9d61cfee8/demo-web/sample_server.py#L207-L350
from __future__ import print_function import argparse import ast import logging import time import sys import os import copy from http.server import HTTPServer, BaseHTTPRequestHandler import urllib from collections import OrderedDict sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../') from keras_wrapper.model_ensemble import InteractiveBeamSearchSampler from keras_wrapper.cnn_model import loadModel, updateModel from keras_wrapper.dataset import loadDataset from keras_wrapper.extra.isles_utils import * from keras_wrapper.extra.read_write import pkl2dict from keras_wrapper.online_trainer import OnlineTrainer from keras_wrapper.utils import decode_predictions_beam_search from nmt_keras.model_zoo import TranslationModel from utils.utils import update_parameters from config_online import load_parameters as load_parameters_online from config import load_parameters logger = logging.getLogger(__name__) class NMTHandler(BaseHTTPRequestHandler): def do_GET(self): do_GET_start_time = time.time() args = self.path.split('?')[1] args = args.split('&') source_sentence = None validated_prefix = None learn = False beam_size = 6 length_norm = 0. coverage_norm = 0. alpha_norm = 1. args_processing_start_time = time.time() print (args) for aa in args: cc = aa.split('=') if cc[0] == 'source': source_sentence = urllib.parse.unquote_plus(cc[1]) if cc[0] == 'prefix': validated_prefix = cc[1] validated_prefix = urllib.parse.unquote_plus(validated_prefix) if cc[0] == 'learn': learn = cc[1] learn = urllib.parse.unquote_plus(learn) learn = eval(learn) if cc[0] == 'beam_size': beam_size = cc[1] beam_size = urllib.parse.unquote_plus(beam_size) beam_size = int(beam_size) self.server.sampler.params_prediction['beam_size'] = beam_size if cc[0] == 'length_norm': length_norm = cc[1] length_norm = urllib.parse.unquote_plus(length_norm) length_norm = float(length_norm) self.server.sampler.params_prediction['length_norm_factor'] = length_norm if cc[0] == 'coverage_norm': coverage_norm = cc[1] coverage_norm = urllib.parse.unquote_plus(coverage_norm) coverage_norm = float(coverage_norm) self.server.sampler.params_prediction['coverage_norm_factor'] = coverage_norm if cc[0] == 'alpha_norm': alpha_norm = cc[1] alpha_norm = urllib.parse.unquote_plus(alpha_norm) alpha_norm = float(alpha_norm) self.server.sampler.params_prediction['alpha_factor'] = alpha_norm if source_sentence is None: self.send_response(400) return source_sentence = urllib.parse.unquote_plus(source_sentence) args_processing_end_time = time.time() logger.log(2, 'args_processing time: %.6f' % (args_processing_end_time - args_processing_start_time)) generate_sample_start_time = time.time() if learn and validated_prefix is not None and source_sentence is not None: self.server.sampler.learn_from_sample(source_sentence, validated_prefix) self.send_response(200) else: hypothesis = self.server.sampler.generate_sample(source_sentence, validated_prefix=validated_prefix) response = hypothesis + u'\n' generate_sample_end_time = time.time() logger.log(2, 'args_processing time: %.6f' % (generate_sample_end_time - generate_sample_start_time)) send_response_start_time = time.time() self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(response.encode('utf-8')) send_response_end_time = time.time() logger.log(2, 'send_response time: %.6f' % (send_response_end_time - send_response_start_time)) do_GET_end_time = time.time() logger.log(2, 'do_GET time: %.6f' % (do_GET_end_time - do_GET_start_time)) def parse_args(): parser = argparse.ArgumentParser("Interactive neural machine translation server.") parser.add_argument("-ds", "--dataset", required=True, help="Dataset instance") parser.add_argument("-v", "--verbose", required=False, default=0, type=int, help="Verbosity level") parser.add_argument("-c", "--config", required=False, help="Config pkl for loading the model configuration. " "If not specified, hyperparameters " "are read from config.py") parser.add_argument("-m", "--models", nargs="+", required=True, help="Path to the models") parser.add_argument("-ch", "--changes", nargs="*", help="Changes to the config. Following the syntax Key=Value", default="") parser.add_argument("-o", "--online", action='store_true', default=False, required=False, help="Online training mode after postedition. ") parser.add_argument("-a", "--address", help="Server address", type=str, default='') parser.add_argument("-p", "--port", help="Port to use", type=int, default=6542) parser.add_argument("-l", "--logging-level", help="Logging level: \t 0: Only info messages." "\t 1: Debug messages." "\t 2: Time monitoring messages.", type=int, default=0) parser.add_argument("-eos", "--eos-symbol", help="End-of-sentence symbol", type=str, default='/') return parser.parse_args() class NMTSampler: def __init__(self, models, dataset, params, params_prediction, params_training, model_tokenize_f, model_detokenize_f, general_tokenize_f, general_detokenize_f, mapping=None, word2index_x=None, word2index_y=None, index2word_y=None, excluded_words=None, unk_id=1, eos_symbol='/', online=False, verbose=0): self.models = models self.dataset = dataset self.params = params self.params_prediction = params_prediction self.params_training = params_training self.model_tokenize_f = model_tokenize_f self.model_detokenize_f = model_detokenize_f self.general_tokenize_f = general_tokenize_f self.general_detokenize_f = general_detokenize_f self.mapping = mapping self.excluded_words = excluded_words self.verbose = verbose self.eos_symbol = eos_symbol self.word2index_x = word2index_x if word2index_x is not None else dataset.vocabulary[params_prediction['INPUTS_IDS_DATASET'][0]]['words2idx'] self.index2word_y = index2word_y if index2word_y is not None else dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['idx2words'] self.word2index_y = word2index_y if word2index_y is not None else dataset.vocabulary[params_prediction['OUTPUTS_IDS_DATASET'][0]]['words2idx'] self.unk_id = unk_id self.interactive_beam_searcher = InteractiveBeamSearchSampler(self.models, self.dataset, self.params_prediction, excluded_words=self.excluded_words, verbose=self.verbose) logger.info('Compiling sampler...') self.generate_sample('i') logger.info('Done.') self.online = online if self.online: self.online_trainer = OnlineTrainer(self.models, self.dataset, None, None, params_training, verbose=self.verbose) for i, nmt_model in enumerate(self.models): logger.info('Compiling model %d...' % i) nmt_model.model._make_train_function() logger.info('Done.') else: self.online_trainer = None
MIT License
fogleman/minecraft
main.py
Model.add_block
python
def add_block(self, position, texture, immediate=True): if position in self.world: self.remove_block(position, immediate) self.world[position] = texture self.sectors.setdefault(sectorize(position), []).append(position) if immediate: if self.exposed(position): self.show_block(position) self.check_neighbors(position)
Add a block with the given `texture` and `position` to the world. Parameters ---------- position : tuple of len 3 The (x, y, z) position of the block to add. texture : list of len 3 The coordinates of the texture squares. Use `tex_coords()` to generate. immediate : bool Whether or not to draw the block immediately.
https://github.com/fogleman/minecraft/blob/65c759c1f43798278e13f6c1a0e038aeeac77656/main.py#L233-L254
from __future__ import division import sys import math import random import time from collections import deque from pyglet import image from pyglet.gl import * from pyglet.graphics import TextureGroup from pyglet.window import key, mouse TICKS_PER_SEC = 60 SECTOR_SIZE = 16 WALKING_SPEED = 5 FLYING_SPEED = 15 GRAVITY = 20.0 MAX_JUMP_HEIGHT = 1.0 JUMP_SPEED = math.sqrt(2 * GRAVITY * MAX_JUMP_HEIGHT) TERMINAL_VELOCITY = 50 PLAYER_HEIGHT = 2 if sys.version_info[0] >= 3: xrange = range def cube_vertices(x, y, z, n): return [ x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, ] def tex_coord(x, y, n=4): m = 1.0 / n dx = x * m dy = y * m return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m def tex_coords(top, bottom, side): top = tex_coord(*top) bottom = tex_coord(*bottom) side = tex_coord(*side) result = [] result.extend(top) result.extend(bottom) result.extend(side * 4) return result TEXTURE_PATH = 'texture.png' GRASS = tex_coords((1, 0), (0, 1), (0, 0)) SAND = tex_coords((1, 1), (1, 1), (1, 1)) BRICK = tex_coords((2, 0), (2, 0), (2, 0)) STONE = tex_coords((2, 1), (2, 1), (2, 1)) FACES = [ ( 0, 1, 0), ( 0,-1, 0), (-1, 0, 0), ( 1, 0, 0), ( 0, 0, 1), ( 0, 0,-1), ] def normalize(position): x, y, z = position x, y, z = (int(round(x)), int(round(y)), int(round(z))) return (x, y, z) def sectorize(position): x, y, z = normalize(position) x, y, z = x // SECTOR_SIZE, y // SECTOR_SIZE, z // SECTOR_SIZE return (x, 0, z) class Model(object): def __init__(self): self.batch = pyglet.graphics.Batch() self.group = TextureGroup(image.load(TEXTURE_PATH).get_texture()) self.world = {} self.shown = {} self._shown = {} self.sectors = {} self.queue = deque() self._initialize() def _initialize(self): n = 80 s = 1 y = 0 for x in xrange(-n, n + 1, s): for z in xrange(-n, n + 1, s): self.add_block((x, y - 2, z), GRASS, immediate=False) self.add_block((x, y - 3, z), STONE, immediate=False) if x in (-n, n) or z in (-n, n): for dy in xrange(-2, 3): self.add_block((x, y + dy, z), STONE, immediate=False) o = n - 10 for _ in xrange(120): a = random.randint(-o, o) b = random.randint(-o, o) c = -1 h = random.randint(1, 6) s = random.randint(4, 8) d = 1 t = random.choice([GRASS, SAND, BRICK]) for y in xrange(c, c + h): for x in xrange(a - s, a + s + 1): for z in xrange(b - s, b + s + 1): if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2: continue if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2: continue self.add_block((x, y, z), t, immediate=False) s -= d def hit_test(self, position, vector, max_distance=8): m = 8 x, y, z = position dx, dy, dz = vector previous = None for _ in xrange(max_distance * m): key = normalize((x, y, z)) if key != previous and key in self.world: return key, previous previous = key x, y, z = x + dx / m, y + dy / m, z + dz / m return None, None def exposed(self, position): x, y, z = position for dx, dy, dz in FACES: if (x + dx, y + dy, z + dz) not in self.world: return True return False
MIT License
explosion/spacy
spacy/cli/_util.py
upload_file
python
def upload_file(src: Path, dest: Union[str, "Pathy"]) -> None: import smart_open dest = str(dest) with smart_open.open(dest, mode="wb") as output_file: with src.open(mode="rb") as input_file: output_file.write(input_file.read())
Upload a file. src (Path): The source path. url (str): The destination URL to upload to.
https://github.com/explosion/spacy/blob/4da2af4e0e522496f9418ae3721262f7e8254050/spacy/cli/_util.py#L334-L345
from typing import Dict, Any, Union, List, Optional, Tuple, Iterable from typing import TYPE_CHECKING, overload import sys import shutil from pathlib import Path from wasabi import msg, Printer import srsly import hashlib import typer from click import NoSuchOption from click.parser import split_arg_string from typer.main import get_command from contextlib import contextmanager from thinc.api import Config, ConfigValidationError, require_gpu from thinc.util import has_cupy, gpu_is_available from configparser import InterpolationError import os from ..compat import Literal from ..schemas import ProjectConfigSchema, validate from ..util import import_file, run_command, make_tempdir, registry, logger from ..util import is_compatible_version, SimpleFrozenDict, ENV_VARS from .. import about if TYPE_CHECKING: from pathy import Pathy SDIST_SUFFIX = ".tar.gz" WHEEL_SUFFIX = "-py3-none-any.whl" PROJECT_FILE = "project.yml" PROJECT_LOCK = "project.lock" COMMAND = "python -m spacy" NAME = "spacy" HELP = """spaCy Command-line Interface DOCS: https://spacy.io/api/cli """ PROJECT_HELP = f"""Command-line interface for spaCy projects and templates. You'd typically start by cloning a project template to a local directory and fetching its assets like datasets etc. See the project's {PROJECT_FILE} for the available commands. """ DEBUG_HELP = """Suite of helpful commands for debugging and profiling. Includes commands to check and validate your config files, training and evaluation data, and custom model implementations. """ INIT_HELP = """Commands for initializing configs and pipeline packages.""" Arg = typer.Argument Opt = typer.Option app = typer.Typer(name=NAME, help=HELP) project_cli = typer.Typer(name="project", help=PROJECT_HELP, no_args_is_help=True) debug_cli = typer.Typer(name="debug", help=DEBUG_HELP, no_args_is_help=True) init_cli = typer.Typer(name="init", help=INIT_HELP, no_args_is_help=True) app.add_typer(project_cli) app.add_typer(debug_cli) app.add_typer(init_cli) def setup_cli() -> None: registry.cli.get_all() command = get_command(app) command(prog_name=COMMAND) def parse_config_overrides( args: List[str], env_var: Optional[str] = ENV_VARS.CONFIG_OVERRIDES ) -> Dict[str, Any]: env_string = os.environ.get(env_var, "") if env_var else "" env_overrides = _parse_overrides(split_arg_string(env_string)) cli_overrides = _parse_overrides(args, is_cli=True) if cli_overrides: keys = [k for k in cli_overrides if k not in env_overrides] logger.debug(f"Config overrides from CLI: {keys}") if env_overrides: logger.debug(f"Config overrides from env variables: {list(env_overrides)}") return {**cli_overrides, **env_overrides} def _parse_overrides(args: List[str], is_cli: bool = False) -> Dict[str, Any]: result = {} while args: opt = args.pop(0) err = f"Invalid config override '{opt}'" if opt.startswith("--"): orig_opt = opt opt = opt.replace("--", "") if "." not in opt: if is_cli: raise NoSuchOption(orig_opt) else: msg.fail(f"{err}: can't override top-level sections", exits=1) if "=" in opt: opt, value = opt.split("=", 1) opt = opt.replace("-", "_") else: if not args or args[0].startswith("--"): value = "true" else: value = args.pop(0) result[opt] = _parse_override(value) else: msg.fail(f"{err}: name should start with --", exits=1) return result def _parse_override(value: Any) -> Any: try: return srsly.json_loads(value) except ValueError: return str(value) def load_project_config( path: Path, interpolate: bool = True, overrides: Dict[str, Any] = SimpleFrozenDict() ) -> Dict[str, Any]: config_path = path / PROJECT_FILE if not config_path.exists(): msg.fail(f"Can't find {PROJECT_FILE}", config_path, exits=1) invalid_err = f"Invalid {PROJECT_FILE}. Double-check that the YAML is correct." try: config = srsly.read_yaml(config_path) except ValueError as e: msg.fail(invalid_err, e, exits=1) errors = validate(ProjectConfigSchema, config) if errors: msg.fail(invalid_err) print("\n".join(errors)) sys.exit(1) validate_project_version(config) validate_project_commands(config) for subdir in config.get("directories", []): dir_path = path / subdir if not dir_path.exists(): dir_path.mkdir(parents=True) if interpolate: err = f"{PROJECT_FILE} validation error" with show_validation_error(title=err, hint_fill=False): config = substitute_project_variables(config, overrides) return config def substitute_project_variables( config: Dict[str, Any], overrides: Dict[str, Any] = SimpleFrozenDict(), key: str = "vars", env_key: str = "env", ) -> Dict[str, Any]: config.setdefault(key, {}) config.setdefault(env_key, {}) for config_var, env_var in config[env_key].items(): config[env_key][config_var] = _parse_override(os.environ.get(env_var, "")) cfg = Config({"project": config, key: config[key], env_key: config[env_key]}) cfg = Config().from_str(cfg.to_str(), overrides=overrides) interpolated = cfg.interpolate() return dict(interpolated["project"]) def validate_project_version(config: Dict[str, Any]) -> None: spacy_version = config.get("spacy_version", None) if spacy_version and not is_compatible_version(about.__version__, spacy_version): err = ( f"The {PROJECT_FILE} specifies a spaCy version range ({spacy_version}) " f"that's not compatible with the version of spaCy you're running " f"({about.__version__}). You can edit version requirement in the " f"{PROJECT_FILE} to load it, but the project may not run as expected." ) msg.fail(err, exits=1) def validate_project_commands(config: Dict[str, Any]) -> None: command_names = [cmd["name"] for cmd in config.get("commands", [])] workflows = config.get("workflows", {}) duplicates = set([cmd for cmd in command_names if command_names.count(cmd) > 1]) if duplicates: err = f"Duplicate commands defined in {PROJECT_FILE}: {', '.join(duplicates)}" msg.fail(err, exits=1) for workflow_name, workflow_steps in workflows.items(): if workflow_name in command_names: err = f"Can't use workflow name '{workflow_name}': name already exists as a command" msg.fail(err, exits=1) for step in workflow_steps: if step not in command_names: msg.fail( f"Unknown command specified in workflow '{workflow_name}': {step}", f"Workflows can only refer to commands defined in the 'commands' " f"section of the {PROJECT_FILE}.", exits=1, ) def get_hash(data, exclude: Iterable[str] = tuple()) -> str: if isinstance(data, dict): data = {k: v for k, v in data.items() if k not in exclude} data_str = srsly.json_dumps(data, sort_keys=True).encode("utf8") return hashlib.md5(data_str).hexdigest() def get_checksum(path: Union[Path, str]) -> str: path = Path(path) if not (path.is_file() or path.is_dir()): msg.fail(f"Can't get checksum for {path}: not a file or directory", exits=1) if path.is_file(): return hashlib.md5(Path(path).read_bytes()).hexdigest() else: dir_checksum = hashlib.md5() for sub_file in sorted(fp for fp in path.rglob("*") if fp.is_file()): dir_checksum.update(sub_file.read_bytes()) return dir_checksum.hexdigest() @contextmanager def show_validation_error( file_path: Optional[Union[str, Path]] = None, *, title: Optional[str] = None, desc: str = "", show_config: Optional[bool] = None, hint_fill: bool = True, ): try: yield except ConfigValidationError as e: title = title if title is not None else e.title if e.desc: desc = f"{e.desc}" if not desc else f"{e.desc}\n\n{desc}" err = e.from_error(e, title="", desc=desc, show_config=show_config) msg.fail(title) print(err.text.strip()) if hint_fill and "value_error.missing" in err.error_types: config_path = ( file_path if file_path is not None and str(file_path) != "-" else "config.cfg" ) msg.text( "If your config contains missing values, you can run the 'init " "fill-config' command to fill in all the defaults, if possible:", spaced=True, ) print(f"{COMMAND} init fill-config {config_path} {config_path} \n") sys.exit(1) except InterpolationError as e: msg.fail("Config validation error", e, exits=1) def import_code(code_path: Optional[Union[Path, str]]) -> None: if code_path is not None: if not Path(code_path).exists(): msg.fail("Path to Python code not found", code_path, exits=1) try: import_file("python_code", code_path) except Exception as e: msg.fail(f"Couldn't load Python code: {code_path}", e, exits=1)
MIT License
pegase745/sqlalchemy-datatables
datatables/search_methods.py
parse_query_value
python
def parse_query_value(combined_value): split = len(combined_value) - len(combined_value.lstrip("<>=")) operator = combined_value[:split] if operator == "": operator = "=" try: operator_func = search_operators[operator] except KeyError: raise ValueError( "Numeric query should start with operator, choose from %s" % ", ".join(search_operators.keys()) ) value = combined_value[split:].strip() return operator_func, value
Parse value in form of '>value' to a lambda and a value.
https://github.com/pegase745/sqlalchemy-datatables/blob/e0af01f347e35eb063ed826e7e6f7ec699d9c5ef/datatables/search_methods.py#L18-L32
import datetime import logging from dateutil.parser import parse as date_parse from sqlalchemy import Text logger = logging.getLogger(__name__) search_operators = { "=": lambda expr, value: expr == value, ">": lambda expr, value: expr > value, ">=": lambda expr, value: expr >= value, "<": lambda expr, value: expr < value, "<=": lambda expr, value: expr <= value, }
MIT License
bioconda/bioconda-utils
bioconda_utils/bot/views.py
auth_github
python
async def auth_github(request): if 'error' in request.query: logger.error(request.query) web.HTTPUnauthorized(body="Encountered an error. ") session = await get_session(request) nexturl = request.query.get('next') or '/' baseurl = BOT_BASEURL + "/auth/github?next=" + nexturl try: ghappapi = request.app['ghappapi'] ghapi = await ghappapi.oauth_github_user(baseurl, session, request.query) if ghapi.username: await remember(request, web.HTTPFound(nexturl), ghapi.token) return web.HTTPFound(nexturl) except web.HTTPFound: raise except Exception as exc: logger.exception("failed to auth") return web.HTTPUnauthorized(body="Could not authenticate your Github account")
View for signing in with Github Currently the only authentication method (and probably will remain so). This will redirect to Github to allow OAUTH authentication if necessary.
https://github.com/bioconda/bioconda-utils/blob/df49b2169672255d5937b181cb86fbe08f7ebaaa/bioconda_utils/bot/views.py#L231-L257
import logging from aiohttp import web from aiohttp_session import get_session from aiohttp_security import check_authorized, forget, permits, remember, authorized_userid from aiohttp_jinja2 import template, render_template from .events import event_routes from ..githubhandler import Event from ..circleci import SlackMessage from .worker import capp from .config import APP_SECRET, BOT_BASEURL from .commands import command_routes logger = logging.getLogger(__name__) web_routes = web.RouteTableDef() navigation_bar = [] def add_to_navbar(title): def wrapper(func): route = web_routes[-1] navigation_bar.append((route.path, route.kwargs['name'], title)) return func return wrapper async def check_permission(request, permission, context=None): await check_authorized(request) allowed = await permits(request, permission, context) if not allowed: request['permission_required'] = permission raise web.HTTPForbidden() @web_routes.post('/_gh') async def github_webhook_dispatch(request): try: body = await request.read() secret = APP_SECRET if secret == "IGNORE": logger.error("IGNORING WEBHOOK SECRET (DEBUG MODE)") secret = None event = Event.from_http(request.headers, body, secret=secret) if event.event == "ping": return web.Response(status=200) installation = event.get('installation/id') to_user = event.get('repository/owner/login', None) to_repo = event.get('repository/name', None) action = event.get('action', None) action_msg = '/' + action if action else '' logger.info("Received GH Event '%s%s' (%s) for %s (%s/%s)", event.event, action_msg, event.delivery_id, installation, to_user, to_repo) ghapi = await request.app['ghappapi'].get_github_api( dry_run=False, installation=installation, to_user=to_user, to_repo=to_repo ) try: await event_routes.dispatch(event, ghapi) logger.info("Event '%s%s' (%s) done", event.event, action_msg, event.delivery_id) except Exception: logger.exception("Failed to dispatch %s", event.delivery_id) request.app['gh_rate_limit'] = ghapi.rate_limit return web.Response(status=200) except Exception: logger.exception("Failure in webhook dispatch") return web.Response(status=500) @web_routes.post('/hooks/circleci') async def generic_circleci_dispatch(request): try: body = await request.read() msg = SlackMessage(request.headers, body) logger.info("Got data from Circle: %s", msg) return web.Response(status=200) except Exception: logger.exception("Failure in circle webhook dispatch") return web.Response(status=500) @web_routes.post('/hooks/{source}') async def generic_webhook_dispatch(request): try: source = request.match_info['source'] body = await request.read() logger.error("Got generic webhook for %s", source) logger.error(" Data: %s", body) return web.Response(status=200) except Exception: logger.exception("Failure in generic webhook dispatch") return web.Response(status=500) @add_to_navbar(title="Home") @web_routes.get("/", name="home") @template('bot_index.html') async def show_index(_request): return {} @add_to_navbar(title="Status") @web_routes.get("/status", name="status") @template("bot_status.html") async def show_status(request): await check_permission(request, 'bioconda') worker_status = capp.control.inspect(timeout=0.1) if not worker_status: return { 'error': 'Could not get worker status' } alive = worker_status.ping() if not alive: return { 'error': 'No workers found' } return { 'workers': { worker: { 'active': worker_status.active(worker), 'reserved': worker_status.reserved(worker), } for worker in sorted(alive.keys()) } } @web_routes.get('/logout', name="logout") async def logout(request): await check_authorized(request) nexturl = request.query.get('next', '/') response = web.HTTPFound(nexturl) await forget(request, response) return response @web_routes.get('/login') async def login(request): return web.HTTPFound('/auth/github') @web_routes.get('/auth/github', name="login")
MIT License
cn-uofbasel/picn
PiCN/LayerStack/LayerStack.py
LayerStack.start_all
python
def start_all(self): self.__started = True [l.start_process() for l in self.layers]
Utility function to start all LayerProcesses managed by the LayerStack.
https://github.com/cn-uofbasel/picn/blob/64ed40242657238e9f1d522d5873173f0b93a30e/PiCN/LayerStack/LayerStack.py#L95-L100
import multiprocessing from typing import List from PiCN.Processes import LayerProcess class LayerStack(object): def __init__(self, layers: List[LayerProcess]): self.layers: List[LayerProcess] = [] self.queues: List[multiprocessing.Queue] = [] self._queue_to_higher = multiprocessing.Queue() self._queue_from_higher = multiprocessing.Queue() self._queue_to_lower = multiprocessing.Queue() self._queue_from_lower = multiprocessing.Queue() self.__started = False if len(layers) == 0: raise ValueError('Can\'t have an empty LayerStack') for i in range(len(layers) - 1): upper = layers[i] lower = layers[i + 1] q_to_upper = multiprocessing.Queue() q_to_lower = multiprocessing.Queue() upper.queue_to_lower = q_to_lower upper.queue_from_lower = q_to_upper lower.queue_to_higher = q_to_upper lower.queue_from_higher = q_to_lower self.layers.append(upper) self.queues.append(q_to_upper) self.queues.append(q_to_lower) self.layers.append(layers[len(layers)-1]) self.layers[0].queue_to_higher = self.queue_to_higher self.layers[0].queue_from_higher = self.queue_from_higher self.layers[len(self.layers)-1].queue_to_lower = self.queue_to_lower self.layers[len(self.layers)-1].queue_from_lower = self.queue_from_lower def insert(self, layer: LayerProcess, on_top_of: LayerProcess = None, below_of: LayerProcess = None): if self.__started: raise multiprocessing.ProcessError('LayerStack should not be changed after its processes were started.') if layer is None: raise TypeError('Layer is None.') if (on_top_of is None) == (below_of is None): raise TypeError('Needs either on_top_of xor below_of') insert_above: bool = on_top_of is not None ref_layer = on_top_of if insert_above else below_of for i in range(len(self.layers)): if self.layers[i] == ref_layer: if insert_above: self.__insert(layer, i) else: self.__insert(layer, i + 1) return raise ValueError('Reference layer is not in the layer stack.') def close_all(self): [q.close() for q in self.queues] self._queue_to_higher.close() self._queue_from_higher.close() self._queue_to_lower.close() self._queue_from_lower.close()
BSD 3-Clause New or Revised License
vincentstimper/normalizing-flows
normflow/distributions.py
ImagePrior.__init__
python
def __init__(self, image, x_range=[-3, 3], y_range=[-3, 3], eps=1.e-10): super().__init__() image_ = np.flip(image, 0).transpose() + eps self.image_cpu = torch.tensor(image_ / np.max(image_)) self.image_size_cpu = self.image_cpu.size() self.x_range = torch.tensor(x_range) self.y_range = torch.tensor(y_range) self.register_buffer('image', self.image_cpu) self.register_buffer('image_size', torch.tensor(self.image_size_cpu).unsqueeze(0)) self.register_buffer('density', torch.log(self.image_cpu / torch.sum(self.image_cpu))) self.register_buffer('scale', torch.tensor([[self.x_range[1] - self.x_range[0], self.y_range[1] - self.y_range[0]]])) self.register_buffer('shift', torch.tensor([[self.x_range[0], self.y_range[0]]]))
Constructor :param image: image as np matrix :param x_range: x range to position image at :param y_range: y range to position image at :param eps: small value to add to image to avoid log(0) problems
https://github.com/vincentstimper/normalizing-flows/blob/8ee48560a864ac9821c9ea5fef8104c7d8c5b038/normflow/distributions.py#L691-L711
import torch import torch.nn as nn import numpy as np from . import flows class BaseDistribution(nn.Module): def __init__(self): super().__init__() def forward(self, num_samples=1): raise NotImplementedError def log_prob(self, z): raise NotImplementedError class DiagGaussian(BaseDistribution): def __init__(self, shape, trainable=True): super().__init__() if isinstance(shape, int): shape = (shape,) self.shape = shape self.n_dim = len(shape) self.d = np.prod(shape) if trainable: self.loc = nn.Parameter(torch.zeros(1, *self.shape)) self.log_scale = nn.Parameter(torch.zeros(1, *self.shape)) else: self.register_buffer("loc", torch.zeros(1, *self.shape)) self.register_buffer("log_scale", torch.zeros(1, *self.shape)) self.temperature = None def forward(self, num_samples=1): eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype, device=self.loc.device) if self.temperature is None: log_scale = self.log_scale else: log_scale = self.log_scale + np.log(self.temperature) z = self.loc + torch.exp(log_scale) * eps log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow(eps, 2), list(range(1, self.n_dim + 1))) return z, log_p def log_prob(self, z): if self.temperature is None: log_scale = self.log_scale else: log_scale = self.log_scale + np.log(self.temperature) log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow((z - self.loc) / torch.exp(log_scale), 2), list(range(1, self.n_dim + 1))) return log_p class ClassCondDiagGaussian(BaseDistribution): def __init__(self, shape, num_classes): super().__init__() if isinstance(shape, int): shape = (shape,) self.shape = shape self.n_dim = len(shape) self.perm = [self.n_dim] + list(range(self.n_dim)) self.d = np.prod(shape) self.num_classes = num_classes self.loc = nn.Parameter(torch.zeros(*self.shape, num_classes)) self.log_scale = nn.Parameter(torch.zeros(*self.shape, num_classes)) self.temperature = None def forward(self, num_samples=1, y=None): if y is not None: num_samples = len(y) else: y = torch.randint(self.num_classes, (num_samples,), device=self.loc.device) if y.dim() == 1: y_onehot = torch.zeros((self.num_classes, num_samples), dtype=self.loc.dtype, device=self.loc.device) y_onehot.scatter_(0, y[None], 1) y = y_onehot else: y = y.t() eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype, device=self.loc.device) loc = (self.loc @ y).permute(*self.perm) log_scale = (self.log_scale @ y).permute(*self.perm) if self.temperature is not None: log_scale = np.log(self.temperature) + log_scale z = loc + torch.exp(log_scale) * eps log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow(eps, 2), list(range(1, self.n_dim + 1))) return z, log_p def log_prob(self, z, y): if y.dim() == 1: y_onehot = torch.zeros((self.num_classes, len(y)), dtype=self.loc.dtype, device=self.loc.device) y_onehot.scatter_(0, y[None], 1) y = y_onehot else: y = y.t() loc = (self.loc @ y).permute(*self.perm) log_scale = (self.log_scale @ y).permute(*self.perm) if self.temperature is not None: log_scale = np.log(self.temperature) + log_scale log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(log_scale + 0.5 * torch.pow((z - loc) / torch.exp(log_scale), 2), list(range(1, self.n_dim + 1))) return log_p class GlowBase(BaseDistribution): def __init__(self, shape, num_classes=None, logscale_factor=3.): super().__init__() if isinstance(shape, int): shape = (shape,) self.shape = shape self.n_dim = len(shape) self.num_pix = np.prod(shape[1:]) self.d = np.prod(shape) self.sum_dim = list(range(1, self.n_dim + 1)) self.num_classes = num_classes self.class_cond = num_classes is not None self.logscale_factor = logscale_factor self.loc = nn.Parameter(torch.zeros(1, self.shape[0], *((self.n_dim - 1) * [1]))) self.loc_logs = nn.Parameter(torch.zeros(1, self.shape[0], *((self.n_dim - 1) * [1]))) self.log_scale = nn.Parameter(torch.zeros(1, self.shape[0], *((self.n_dim - 1) * [1]))) self.log_scale_logs = nn.Parameter(torch.zeros(1, self.shape[0], *((self.n_dim - 1) * [1]))) if self.class_cond: self.loc_cc = nn.Parameter(torch.zeros(self.num_classes, self.shape[0])) self.log_scale_cc = nn.Parameter(torch.zeros(self.num_classes, self.shape[0])) self.temperature = None def forward(self, num_samples=1, y=None): loc = self.loc * torch.exp(self.loc_logs * self.logscale_factor) log_scale = self.log_scale * torch.exp(self.log_scale_logs * self.logscale_factor) if self.class_cond: if y is not None: num_samples = len(y) else: y = torch.randint(self.num_classes, (num_samples,), device=self.loc.device) if y.dim() == 1: y_onehot = torch.zeros((len(y), self.num_classes), dtype=self.loc.dtype, device=self.loc.device) y_onehot.scatter_(1, y[:, None], 1) y = y_onehot loc = loc + (y @ self.loc_cc).view(y.size(0), self.shape[0], *((self.n_dim - 1) * [1])) log_scale = log_scale + (y @ self.log_scale_cc).view(y.size(0), self.shape[0], *((self.n_dim - 1) * [1])) if self.temperature is not None: log_scale = log_scale + np.log(self.temperature) eps = torch.randn((num_samples,) + self.shape, dtype=self.loc.dtype, device=self.loc.device) z = loc + torch.exp(log_scale) * eps log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.num_pix * torch.sum(log_scale, dim=self.sum_dim) - 0.5 * torch.sum(torch.pow(eps, 2), dim=self.sum_dim) return z, log_p def log_prob(self, z, y=None): loc = self.loc * torch.exp(self.loc_logs * self.logscale_factor) log_scale = self.log_scale * torch.exp(self.log_scale_logs * self.logscale_factor) if self.class_cond: if y.dim() == 1: y_onehot = torch.zeros((len(y), self.num_classes), dtype=self.loc.dtype, device=self.loc.device) y_onehot.scatter_(1, y[:, None], 1) y = y_onehot loc = loc + (y @ self.loc_cc).view(y.size(0), self.shape[0], *((self.n_dim - 1) * [1])) log_scale = log_scale + (y @ self.log_scale_cc).view(y.size(0), self.shape[0], *((self.n_dim - 1) * [1])) if self.temperature is not None: log_scale = log_scale + np.log(self.temperature) log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.num_pix * torch.sum(log_scale, dim=self.sum_dim) - 0.5 * torch.sum(torch.pow((z - loc) / torch.exp(log_scale), 2), dim=self.sum_dim) return log_p class AffineGaussian(BaseDistribution): def __init__(self, shape, affine_shape, num_classes=None): super().__init__() self.shape = shape self.n_dim = len(shape) self.d = np.prod(shape) self.sum_dim = list(range(1, self.n_dim + 1)) self.affine_shape = affine_shape self.num_classes = num_classes self.class_cond = num_classes is not None if self.class_cond: self.transform = flows.CCAffineConst(self.affine_shape, self.num_classes) else: self.transform = flows.AffineConstFlow(self.affine_shape) self.temperature = None def forward(self, num_samples=1, y=None): dtype = self.transform.s.dtype device = self.transform.s.device if self.class_cond: if y is not None: num_samples = len(y) else: y = torch.randint(self.num_classes, (num_samples,), device=device) if y.dim() == 1: y_onehot = torch.zeros((len(y), self.num_classes), dtype=dtype, device=device) y_onehot.scatter_(1, y[:, None], 1) y = y_onehot if self.temperature is not None: log_scale = np.log(self.temperature) else: log_scale = 0. eps = torch.randn((num_samples,) + self.shape, dtype=dtype, device=device) z = np.exp(log_scale) * eps log_p = - 0.5 * self.d * np.log(2 * np.pi) - self.d * log_scale - 0.5 * torch.sum(torch.pow(eps, 2), dim=self.sum_dim) if self.class_cond: z, log_det = self.transform(z, y) else: z, log_det = self.transform(z) log_p -= log_det return z, log_p def log_prob(self, z, y=None): if self.class_cond: if y.dim() == 1: y_onehot = torch.zeros((len(y), self.num_classes), dtype=self.transform.s.dtype, device=self.transform.s.device) y_onehot.scatter_(1, y[:, None], 1) y = y_onehot if self.temperature is not None: log_scale = np.log(self.temperature) else: log_scale = 0. if self.class_cond: z, log_p = self.transform.inverse(z, y) else: z, log_p = self.transform.inverse(z) z = z / np.exp(log_scale) log_p = log_p - self.d * log_scale - 0.5 * self.d * np.log(2 * np.pi) - 0.5 * torch.sum(torch.pow(z, 2), dim=self.sum_dim) return log_p class GaussianMixture(BaseDistribution): def __init__(self, n_modes, dim, loc=None, scale=None, weights=None, trainable=True): super().__init__() self.n_modes = n_modes self.dim = dim if loc is None: loc = np.random.randn(self.n_modes, self.dim) loc = np.array(loc)[None, ...] if scale is None: scale = np.ones((self.n_modes, self.dim)) scale = np.array(scale)[None, ...] if weights is None: weights = np.ones(self.n_modes) weights = np.array(weights)[None, ...] weights /= weights.sum(1) if trainable: self.loc = nn.Parameter(torch.tensor(1. * loc)) self.log_scale = nn.Parameter(torch.tensor(np.log(1. * scale))) self.weight_scores = nn.Parameter(torch.tensor(np.log(1. * weights))) else: self.register_buffer("loc", torch.tensor(1. * loc)) self.register_buffer("log_scale", torch.tensor(np.log(1. * scale))) self.register_buffer("weight_scores", torch.tensor(np.log(1. * weights))) def forward(self, num_samples=1): mode_ind = torch.randint(high=self.n_modes, size=(num_samples,)) mode_1h = torch.zeros((num_samples, self.n_modes), dtype=torch.int64) mode_1h.scatter_(1, mode_ind[:, None], 1) mode_1h = mode_1h[..., None] weights = torch.softmax(self.weight_scores, 1) eps = torch.randn(num_samples, self.dim, dtype=self.loc.dtype, device=self.loc.device) scale_sample = torch.sum(torch.exp(self.log_scale) * mode_1h, 1) loc_sample = torch.sum(self.loc * mode_1h, 1) z = eps * scale_sample + loc_sample log_p = - 0.5 * self.dim * np.log(2 * np.pi) + torch.log(weights) - 0.5 * torch.sum(torch.pow(eps, 2), 1, keepdim=True) - torch.sum(self.log_scale, 2) log_p = torch.logsumexp(log_p, 1) return z, log_p def log_prob(self, z): weights = torch.softmax(self.weight_scores, 1) eps = (z[:, None, :] - self.loc) / torch.exp(self.log_scale) log_p = - 0.5 * self.dim * np.log(2 * np.pi) + torch.log(weights) - 0.5 * torch.sum(torch.pow(eps, 2), 2) - torch.sum(self.log_scale, 2) log_p = torch.logsumexp(log_p, 1) return log_p class GaussianPCA(BaseDistribution): def __init__(self, dim, latent_dim=None, sigma=0.1): super().__init__() self.dim = dim if latent_dim is None: self.latent_dim = dim else: self.latent_dim = latent_dim self.loc = nn.Parameter(torch.zeros(1, dim)) self.W = nn.Parameter(torch.randn(latent_dim, dim)) self.log_sigma = nn.Parameter(torch.tensor(np.log(sigma))) def forward(self, num_samples=1): eps = torch.randn(num_samples, self.latent_dim, dtype=self.loc.dtype, device=self.loc.device) z_ = torch.matmul(eps, self.W) z = z_ + self.loc Sig = torch.matmul(self.W.T, self.W) + torch.exp(self.log_sigma * 2) * torch.eye(self.dim, dtype=self.loc.dtype, device=self.loc.device) log_p = self.dim / 2 * np.log(2 * np.pi) - 0.5 * torch.det(Sig) - 0.5 * torch.sum(z_ * torch.matmul(z_, torch.inverse(Sig)), 1) return z, log_p def log_prob(self, z): z_ = z - self.loc Sig = torch.matmul(self.W.T, self.W) + torch.exp(self.log_sigma * 2) * torch.eye(self.dim, dtype=self.loc.dtype, device=self.loc.device) log_p = self.dim / 2 * np.log(2 * np.pi) - 0.5 * torch.det(Sig) - 0.5 * torch.sum(z_ * torch.matmul(z_, torch.inverse(Sig)), 1) return log_p class BaseEncoder(nn.Module): def __init__(self): super().__init__() def forward(self, x, num_samples=1): raise NotImplementedError def log_prob(self, z, x): raise NotImplementedError class Dirac(BaseEncoder): def __init__(self): super().__init__() def forward(self, x, num_samples=1): z = x.unsqueeze(1).repeat(1, num_samples, 1) log_p = torch.zeros(z.size()[0:2]) return z, log_p def log_prob(self, z, x): log_p = torch.zeros(z.size()[0:2]) return log_p class Uniform(BaseEncoder): def __init__(self, zmin=0.0, zmax=1.0): super().__init__() self.zmin = zmin self.zmax = zmax self.log_p = -torch.log(zmax-zmin) def forward(self, x, num_samples=1): z = x.unsqueeze(1).repeat(1, num_samples, 1).uniform_(min=self.zmin, max=self.zmax) log_p = torch.zeros(z.size()[0:2]).fill_(self.log_p) return z, log_p def log_prob(self, z, x): log_p = torch.zeros(z.size()[0:2]).fill_(self.log_p) return log_p class ConstDiagGaussian(BaseEncoder): def __init__(self, loc, scale): super().__init__() self.d = len(loc) if not torch.is_tensor(loc): loc = torch.tensor(loc).float() if not torch.is_tensor(scale): scale = torch.tensor(scale).float() self.loc = nn.Parameter(loc.reshape((1, 1, self.d))) self.scale = nn.Parameter(scale) def forward(self, x=None, num_samples=1): if x is not None: batch_size = len(x) else: batch_size = 1 eps = torch.randn((batch_size, num_samples, self.d), device=x.device) z = self.loc + self.scale * eps log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(torch.log(self.scale) + 0.5 * torch.pow(eps, 2), 2) return z, log_p def log_prob(self, z, x): if z.dim() == 1: z = z.unsqueeze(0) if z.dim() == 2: z = z.unsqueeze(0) log_p = - 0.5 * self.d * np.log(2 * np.pi) - torch.sum(torch.log(self.scale) + 0.5 * ((z - self.loc) / self.scale) ** 2, 2) return log_p class NNDiagGaussian(BaseEncoder): def __init__(self, net): super().__init__() self.net = net def forward(self, x, num_samples=1): batch_size = len(x) mean_std = self.net(x) n_hidden = mean_std.size()[1] // 2 mean = mean_std[:, :n_hidden, ...].unsqueeze(1) std = torch.exp(0.5 * mean_std[:, n_hidden:(2 * n_hidden), ...].unsqueeze(1)) eps = torch.randn((batch_size, num_samples) + tuple(mean.size()[2:]), device=x.device) z = mean + std * eps log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - torch.sum(torch.log(std) + 0.5 * torch.pow(eps, 2), list(range(2, z.dim()))) return z, log_p def log_prob(self, z, x): if z.dim() == 1: z = z.unsqueeze(0) if z.dim() == 2: z = z.unsqueeze(0) mean_std = self.net(x) n_hidden = mean_std.size()[1] // 2 mean = mean_std[:, :n_hidden, ...].unsqueeze(1) var = torch.exp(mean_std[:, n_hidden:(2 * n_hidden), ...].unsqueeze(1)) log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - 0.5 * torch.sum(torch.log(var) + (z - mean) ** 2 / var, 2) return log_p class Decoder(nn.Module): def __init__(self): super().__init__() def forward(self, z): raise NotImplementedError def log_prob(self, x, z): raise NotImplementedError class NNDiagGaussianDecoder(Decoder): def __init__(self, net): super().__init__() self.net = net def forward(self, z): z_size = z.size() mean_std = self.net(z.view(-1, *z_size[2:])).view(z_size) n_hidden = mean_std.size()[2] // 2 mean = mean_std[:, :, :n_hidden, ...] std = torch.exp(0.5 * mean_std[:, :, n_hidden:(2 * n_hidden), ...]) return mean, std def log_prob(self, x, z): mean_std = self.net(z.view(-1, *z.size()[2:])).view(*z.size()[:2], x.size(1) * 2, *x.size()[3:]) n_hidden = mean_std.size()[2] // 2 mean = mean_std[:, :, :n_hidden, ...] var = torch.exp(mean_std[:, :, n_hidden:(2 * n_hidden), ...]) log_p = - 0.5 * torch.prod(torch.tensor(z.size()[2:])) * np.log(2 * np.pi) - 0.5 * torch.sum(torch.log(var) + (x.unsqueeze(1) - mean) ** 2 / var, list(range(2, z.dim()))) return log_p class NNBernoulliDecoder(Decoder): def __init__(self, net): super().__init__() self.net = net def forward(self, z): mean = torch.sigmoid(self.net(z)) return mean def log_prob(self, x, z): score = self.net(z) x = x.unsqueeze(1) x = x.repeat(1, z.size()[0] // x.size()[0], *((x.dim() - 2) * [1])).view(-1, *x.size()[2:]) log_sig = lambda a: -torch.relu(-a) - torch.log(1 + torch.exp(-torch.abs(a))) log_p = torch.sum(x * log_sig(score) + (1 - x) * log_sig(-score), list(range(1, x.dim()))) return log_p class PriorDistribution: def __init__(self): raise NotImplementedError def log_prob(self, z): raise NotImplementedError class ImagePrior(nn.Module):
MIT License
ianmiell/shutit
shutit_threads.py
managing_thread_main_simple
python
def managing_thread_main_simple(): import shutit_global last_msg = '' while True: printed_anything = False if shutit_global.shutit_global_object.log_trace_when_idle and time.time() - shutit_global.shutit_global_object.last_log_time > 10: this_msg = '' this_header = '' for thread_id, stack in sys._current_frames().items(): if thread_id == threading.current_thread().ident: continue printed_thread_started = False for filename, lineno, name, line in traceback.extract_stack(stack): if not printed_anything: printed_anything = True this_header += '\n='*80 + '\n' this_header += 'STACK TRACES PRINTED ON IDLE: THREAD_ID: ' + str(thread_id) + ' at ' + time.strftime('%c') + '\n' this_header += '='*80 + '\n' if not printed_thread_started: printed_thread_started = True this_msg += '%s:%d:%s' % (filename, lineno, name) + '\n' if line: this_msg += ' %s' % (line,) + '\n' if printed_anything: this_msg += '='*80 + '\n' this_msg += 'STACK TRACES DONE\n' this_msg += '='*80 + '\n' if this_msg != last_msg: print(this_header + this_msg) last_msg = this_msg time.sleep(5)
Simpler thread to track whether main thread has been quiet for long enough that a thread dump should be printed.
https://github.com/ianmiell/shutit/blob/509e273584408f01d216b5cc80058c03fe6654f4/shutit_threads.py#L138-L172
import itertools import time import threading import traceback import sys import os from curtsies.input import Input tracker_setup = False def gather_module_paths(): import shutit_global shutit_global_object = shutit_global.shutit_global_object owd = shutit_global_object.owd shutit_module_paths = set() for shutit_object in shutit_global.shutit_global_object.shutit_objects: shutit_module_paths = shutit_module_paths.union(set(shutit_object.host['shutit_module_path'])) if '.' in shutit_module_paths: shutit_module_paths.remove('.') shutit_module_paths.add(owd) for path in shutit_module_paths: if path[0] != '/': shutit_module_paths.remove(path) shutit_module_paths.add(owd + '/' + path) return shutit_module_paths def managing_thread_main(): import shutit_global from shutit_global import SessionPaneLine shutit_global.shutit_global_object.global_thread_lock.acquire() shutit_module_paths = gather_module_paths() shutit_global.shutit_global_object.global_thread_lock.release() shutit_global.shutit_global_object.stacktrace_lines_arr = [SessionPaneLine('',time.time(),'log'),] last_code = [] draw_type = 'default' zoom_state = None while True: with Input() as input_generator: input_char = input_generator.send(0.001) if input_char == 'r': shutit_global.shutit_global_object.lower_pane_rotate_count += 1 elif input_char == '1': if zoom_state == 1: draw_type = 'default' zoom_state = None else: draw_type = 'zoomed1' zoom_state = 1 elif input_char == '2': if zoom_state == 2: draw_type = 'default' zoom_state = None else: draw_type = 'zoomed2' zoom_state = 2 elif input_char == '3': if zoom_state == 3: draw_type = 'default' zoom_state = None else: draw_type = 'zoomed3' zoom_state = 3 elif input_char == '4': if zoom_state == 4: draw_type = 'default' zoom_state = None else: draw_type = 'zoomed4' zoom_state = 4 elif input_char == 'q': draw_type = 'clearscreen' shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type=draw_type) os.system('reset') os._exit(1) if shutit_global.shutit_global_object.ispy3: if not shutit_global.shutit_global_object.global_thread_lock.acquire(blocking=False): time.sleep(0.01) continue else: if not shutit_global.shutit_global_object.global_thread_lock.acquire(False): time.sleep(0.01) continue code = [] for thread_id, stack in sys._current_frames().items(): if thread_id == threading.current_thread().ident: continue for filename, lineno, name, line in traceback.extract_stack(stack): for shutit_module_path in shutit_module_paths: if filename.find(shutit_module_path) == 0: if len(shutit_global.shutit_global_object.stacktrace_lines_arr) == 0 or shutit_global.shutit_global_object.stacktrace_lines_arr[-1] != line: linearrow = '===> ' + str(line) code.append('_' * 80) code.append('=> %s:%d:%s' % (filename, lineno, name)) code.append('%s' % (linearrow,)) from_lineno = lineno - 5 if from_lineno < 0: from_lineno = 0 to_lineno = 10 else: to_lineno = lineno + 5 lineno_count = from_lineno with open(filename, "r") as f: for line in itertools.islice(f, from_lineno, to_lineno): line = line.replace('\t',' ') lineno_count += 1 if lineno_count == lineno: code.append('***' + str(lineno_count) + '> ' + line.rstrip()) else: code.append('===' + str(lineno_count) + '> ' + line.rstrip()) code.append('_' * 80) if code != last_code: for line in code: shutit_global.shutit_global_object.stacktrace_lines_arr.append(SessionPaneLine(line,time.time(),'log')) last_code = code shutit_global.shutit_global_object.pane_manager.draw_screen(draw_type=draw_type) shutit_global.shutit_global_object.global_thread_lock.release()
MIT License
pawamoy/aria2p
src/aria2p/client.py
Client.get_params
python
def get_params(*args: Any) -> list: return [_ for _ in args if _ is not None]
Build the list of parameters. This method simply removes the `None` values from the given arguments. Arguments: *args: List of parameters. Returns: A new list, with `None` values filtered out.
https://github.com/pawamoy/aria2p/blob/6cdc9a1ef5ed0413fffa3be4885f4b5325177660/src/aria2p/client.py#L424-L436
import json from typing import Any, Callable, List, Optional, Union import requests import websocket from loguru import logger from aria2p.types import CallsType, Multicalls2Type from aria2p.utils import SignalHandler DEFAULT_ID = -1 DEFAULT_HOST = "http://localhost" DEFAULT_PORT = 6800 DEFAULT_TIMEOUT: float = 60.0 JSONRPC_PARSER_ERROR = -32700 JSONRPC_INVALID_REQUEST = -32600 JSONRPC_METHOD_NOT_FOUND = -32601 JSONRPC_INVALID_PARAMS = -32602 JSONRPC_INTERNAL_ERROR = -32603 JSONRPC_CODES = { JSONRPC_PARSER_ERROR: "Invalid JSON was received by the server.", JSONRPC_INVALID_REQUEST: "The JSON sent is not a valid Request object.", JSONRPC_METHOD_NOT_FOUND: "The method does not exist / is not available.", JSONRPC_INVALID_PARAMS: "Invalid method parameter(s).", JSONRPC_INTERNAL_ERROR: "Internal JSON-RPC error.", } NOTIFICATION_START = "aria2.onDownloadStart" NOTIFICATION_PAUSE = "aria2.onDownloadPause" NOTIFICATION_STOP = "aria2.onDownloadStop" NOTIFICATION_COMPLETE = "aria2.onDownloadComplete" NOTIFICATION_ERROR = "aria2.onDownloadError" NOTIFICATION_BT_COMPLETE = "aria2.onBtDownloadComplete" NOTIFICATION_TYPES = [ NOTIFICATION_START, NOTIFICATION_PAUSE, NOTIFICATION_STOP, NOTIFICATION_COMPLETE, NOTIFICATION_ERROR, NOTIFICATION_BT_COMPLETE, ] CallReturnType = Union[dict, list, str, int] class ClientException(Exception): def __init__(self, code: int, message: str) -> None: super().__init__() if code in JSONRPC_CODES: message = f"{JSONRPC_CODES[code]}\n{message}" self.code = code self.message = message def __str__(self): return self.message def __bool__(self): return False class Client: ADD_URI = "aria2.addUri" ADD_TORRENT = "aria2.addTorrent" ADD_METALINK = "aria2.addMetalink" REMOVE = "aria2.remove" FORCE_REMOVE = "aria2.forceRemove" PAUSE = "aria2.pause" PAUSE_ALL = "aria2.pauseAll" FORCE_PAUSE = "aria2.forcePause" FORCE_PAUSE_ALL = "aria2.forcePauseAll" UNPAUSE = "aria2.unpause" UNPAUSE_ALL = "aria2.unpauseAll" TELL_STATUS = "aria2.tellStatus" GET_URIS = "aria2.getUris" GET_FILES = "aria2.getFiles" GET_PEERS = "aria2.getPeers" GET_SERVERS = "aria2.getServers" TELL_ACTIVE = "aria2.tellActive" TELL_WAITING = "aria2.tellWaiting" TELL_STOPPED = "aria2.tellStopped" CHANGE_POSITION = "aria2.changePosition" CHANGE_URI = "aria2.changeUri" GET_OPTION = "aria2.getOption" CHANGE_OPTION = "aria2.changeOption" GET_GLOBAL_OPTION = "aria2.getGlobalOption" CHANGE_GLOBAL_OPTION = "aria2.changeGlobalOption" GET_GLOBAL_STAT = "aria2.getGlobalStat" PURGE_DOWNLOAD_RESULT = "aria2.purgeDownloadResult" REMOVE_DOWNLOAD_RESULT = "aria2.removeDownloadResult" GET_VERSION = "aria2.getVersion" GET_SESSION_INFO = "aria2.getSessionInfo" SHUTDOWN = "aria2.shutdown" FORCE_SHUTDOWN = "aria2.forceShutdown" SAVE_SESSION = "aria2.saveSession" MULTICALL = "system.multicall" LIST_METHODS = "system.listMethods" LIST_NOTIFICATIONS = "system.listNotifications" METHODS = [ ADD_URI, ADD_TORRENT, ADD_METALINK, REMOVE, FORCE_REMOVE, PAUSE, PAUSE_ALL, FORCE_PAUSE, FORCE_PAUSE_ALL, UNPAUSE, UNPAUSE_ALL, TELL_STATUS, GET_URIS, GET_FILES, GET_PEERS, GET_SERVERS, TELL_ACTIVE, TELL_WAITING, TELL_STOPPED, CHANGE_POSITION, CHANGE_URI, GET_OPTION, CHANGE_OPTION, GET_GLOBAL_OPTION, CHANGE_GLOBAL_OPTION, GET_GLOBAL_STAT, PURGE_DOWNLOAD_RESULT, REMOVE_DOWNLOAD_RESULT, GET_VERSION, GET_SESSION_INFO, SHUTDOWN, FORCE_SHUTDOWN, SAVE_SESSION, MULTICALL, LIST_METHODS, LIST_NOTIFICATIONS, ] def __init__( self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, secret: str = "", timeout: float = DEFAULT_TIMEOUT, ) -> None: host = host.rstrip("/") self.host = host self.port = port self.secret = secret self.timeout = timeout self.listening = False def __str__(self): return self.server def __repr__(self): return f"Client(host='{self.host}', port={self.port}, secret='********')" @property def server(self) -> str: return f"{self.host}:{self.port}/jsonrpc" @property def ws_server(self) -> str: return f"ws{self.host[4:]}:{self.port}/jsonrpc" def call( self, method: str, params: List[Any] = None, msg_id: Union[int, str] = None, insert_secret: bool = True, ) -> CallReturnType: params = self.get_params(*(params or [])) if insert_secret and self.secret: if method.startswith("aria2."): params.insert(0, f"token:{self.secret}") elif method == self.MULTICALL: for param in params[0]: param["params"].insert(0, f"token:{self.secret}") payload: str = self.get_payload(method, params, msg_id=msg_id) return self.res_or_raise(self.post(payload)) def batch_call( self, calls: CallsType, insert_secret: bool = True, ) -> List[CallReturnType]: payloads = [] for method, params, msg_id in calls: params = self.get_params(*params) if insert_secret and self.secret and method.startswith("aria2."): params.insert(0, f"token:{self.secret}") payloads.append(self.get_payload(method, params, msg_id, as_json=False)) payload: str = json.dumps(payloads) responses = self.post(payload) return [self.res_or_raise(resp) for resp in responses] def multicall2(self, calls: Multicalls2Type, insert_secret: bool = True) -> CallReturnType: multicall_params = [] for method, params in calls: params = self.get_params(*params) if insert_secret and self.secret and method.startswith("aria2."): params.insert(0, f"token:{self.secret}") multicall_params.append({"methodName": method, "params": params}) payload: str = self.get_payload(self.MULTICALL, [multicall_params]) return self.res_or_raise(self.post(payload)) def post(self, payload: str) -> dict: return requests.post(self.server, data=payload, timeout=self.timeout).json() @staticmethod def response_as_exception(response: dict) -> ClientException: return ClientException(response["error"]["code"], response["error"]["message"]) @staticmethod def res_or_raise(response: dict) -> CallReturnType: if "error" in response: raise Client.response_as_exception(response) return response["result"] @staticmethod def get_payload( method, params: List[Any] = None, msg_id: Union[int, str] = None, as_json: bool = True, ) -> Union[str, dict]: payload = {"jsonrpc": "2.0", "method": method} if msg_id is not None: payload["id"] = msg_id else: payload["id"] = DEFAULT_ID if params: payload["params"] = params return json.dumps(payload) if as_json else payload @staticmethod
ISC License
unishared/videonotes
lib/evernote/edam/notestore/NoteStore.py
Iface.listTagsByNotebook
python
def listTagsByNotebook(self, authenticationToken, notebookGuid): pass
Returns a list of the tags that are applied to at least one note within the provided notebook. If the notebook is public, the authenticationToken may be ignored. @param notebookGuid the GUID of the notebook to use to find tags @throws EDAMNotFoundException <ul> <li> "Notebook.guid" - notebook not found by GUID </li> </ul> Parameters: - authenticationToken - notebookGuid
https://github.com/unishared/videonotes/blob/803cdd97b90823fb17f50dd55999aa7d1fec6c3a/lib/evernote/edam/notestore/NoteStore.py#L370-L388
from thrift.Thrift import TType, TMessageType, TException, TApplicationException from ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class Iface(object): def getSyncState(self, authenticationToken): pass def getSyncStateWithMetrics(self, authenticationToken, clientMetrics): pass def getSyncChunk(self, authenticationToken, afterUSN, maxEntries, fullSyncOnly): pass def getFilteredSyncChunk(self, authenticationToken, afterUSN, maxEntries, filter): pass def getLinkedNotebookSyncState(self, authenticationToken, linkedNotebook): pass def getLinkedNotebookSyncChunk(self, authenticationToken, linkedNotebook, afterUSN, maxEntries, fullSyncOnly): pass def listNotebooks(self, authenticationToken): pass def getNotebook(self, authenticationToken, guid): pass def getDefaultNotebook(self, authenticationToken): pass def createNotebook(self, authenticationToken, notebook): pass def updateNotebook(self, authenticationToken, notebook): pass def expungeNotebook(self, authenticationToken, guid): pass def listTags(self, authenticationToken): pass
MIT License
reliaqualassociates/ramstk
src/ramstk/views/gtk3/widgets/baseview.py
RAMSTKModuleView.__init__
python
def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._dic_icons["insert_part"] = ( self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/insert_part.png" ) self._dic_key_index: Dict[str, int] = {} self._lst_callbacks.insert(0, super().do_request_insert_sibling) self._lst_callbacks.insert(1, super().do_request_delete) self._lst_icons.insert(0, "add") self._lst_icons.insert(1, "remove")
Initialize the RAMSTKModuleView meta-class. :param configuration: the RAMSTKUserConfiguration class instance. :param logger: the RAMSTKLogManager class instance.
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/widgets/baseview.py#L770-L798
import locale from typing import Any, Dict, List, Tuple import treelib from pubsub import pub from ramstk.configuration import RAMSTKUserConfiguration from ramstk.logger import RAMSTKLogManager from ramstk.views.gtk3 import Gdk, Gtk, _ from .button import do_make_buttonbox from .dialog import RAMSTKMessageDialog from .label import RAMSTKLabel from .panel import RAMSTKPanel from .treeview import RAMSTKTreeView class RAMSTKBaseView(Gtk.HBox): _pixbuf: bool = False dic_tab_position = { "left": Gtk.PositionType.LEFT, "right": Gtk.PositionType.RIGHT, "top": Gtk.PositionType.TOP, "bottom": Gtk.PositionType.BOTTOM, } RAMSTK_USER_CONFIGURATION: RAMSTKUserConfiguration = None def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__() self.RAMSTK_USER_CONFIGURATION = configuration self.RAMSTK_LOGGER = logger self.RAMSTK_LOGGER.do_create_logger( __name__, self.RAMSTK_USER_CONFIGURATION.RAMSTK_LOGLEVEL, to_tty=False ) self._dic_icons: Dict[str, str] = self.__set_icons() self._lst_callbacks: List[object] = [ self.do_request_update, self.do_request_update_all, ] self._lst_icons: List[str] = [ "save", "save-all", ] self._lst_mnu_labels: List[str] = [ _("Save"), _("Save All"), ] self._lst_tooltips: List[str] = [] self._lst_handler_id: List[int] = [] self._lst_layouts: List[str] = [ "allocation", "failure_definition", "fmea", "function", "hardware", "hazard", "incident", "pof", "requirement", "revision", "similar_item", "software", "stakeholder", "testing", "validation", ] self._img_tab: Gtk.Image = Gtk.Image() self._mission_time: float = float(self.RAMSTK_USER_CONFIGURATION.RAMSTK_MTIME) self._notebook: Gtk.Notebook = Gtk.Notebook() self._parent_id: int = 0 self._pnlPanel: RAMSTKPanel = RAMSTKPanel() self._record_id: int = -1 self._revision_id: int = 0 self._tree_loaded: bool = False self.fmt: str = ( "{0:0." + str(self.RAMSTK_USER_CONFIGURATION.RAMSTK_DEC_PLACES) + "G}" ) self.hbx_tab_label: Gtk.HBox = Gtk.HBox() try: locale.setlocale( locale.LC_ALL, self.RAMSTK_USER_CONFIGURATION.RAMSTK_LOCALE ) except locale.Error as _error: locale.setlocale(locale.LC_ALL, "") self.RAMSTK_LOGGER.do_log_exception(__name__, _error) pub.subscribe(self.do_set_cursor_active, "request_set_cursor_active") pub.subscribe(self.do_set_cursor_active, f"succeed_update_{self._tag}") pub.subscribe(self.do_set_cursor_active, f"succeed_calculate_{self._tag}") pub.subscribe(self.do_set_cursor_active, "succeed_update_all") pub.subscribe(self.do_set_cursor_active_on_fail, f"fail_delete_{self._tag}") pub.subscribe(self.do_set_cursor_active_on_fail, f"fail_insert_{self._tag}") pub.subscribe(self.do_set_cursor_active_on_fail, f"fail_update_{self._tag}") pub.subscribe(self.on_select_revision, "selected_revision") def do_embed_treeview_panel(self) -> None: _fmt_file = ( self.RAMSTK_USER_CONFIGURATION.RAMSTK_CONF_DIR + "/layouts/" + self.RAMSTK_USER_CONFIGURATION.RAMSTK_FORMAT_FILE[self._tag] ) try: _bg_color = self.RAMSTK_USER_CONFIGURATION.RAMSTK_COLORS[self._tag + "bg"] _fg_color = self.RAMSTK_USER_CONFIGURATION.RAMSTK_COLORS[self._tag + "fg"] except KeyError: _bg_color = "#FFFFFF" _fg_color = "#000000" self._pnlPanel.do_make_treeview( bg_color=_bg_color, fg_color=_fg_color, fmt_file=_fmt_file ) self.pack_end(self._pnlPanel, True, True, 0) self.show_all() def do_make_layout(self) -> None: self.make_tab_label(tablabel=self._tablabel, tooltip=self._tabtooltip) self.make_toolbuttons( icons=self._lst_icons, tooltips=self._lst_tooltips, callbacks=self._lst_callbacks, ) def do_make_layout_lr(self) -> Gtk.HPaned: self.do_make_layout() _hpaned: Gtk.HPaned = Gtk.HPaned() self.pack_start(_hpaned, True, True, 0) return _hpaned def do_make_layout_lrr(self) -> Tuple[Gtk.HPaned, Gtk.VPaned]: self.do_make_layout() _hpaned: Gtk.HPaned = Gtk.HPaned() _vpaned_right: Gtk.VPaned = Gtk.VPaned() _hpaned.pack2(_vpaned_right, True, True) self.pack_start(_hpaned, True, True, 0) return _hpaned, _vpaned_right def do_make_layout_llr(self) -> Tuple[Gtk.HPaned, Gtk.VPaned]: self.do_make_layout() _hpaned: Gtk.HPaned = Gtk.HPaned() _vpaned_left: Gtk.VPaned = Gtk.VPaned() _hpaned.pack1(_vpaned_left, True, True) self.pack_start(_hpaned, True, True, 0) return _hpaned, _vpaned_left def do_make_layout_llrr(self) -> Tuple[Gtk.VPaned, Gtk.VPaned]: self.do_make_layout() _hpaned: Gtk.HPaned = Gtk.HPaned() _vpaned_left: Gtk.VPaned = Gtk.VPaned() _vpaned_right: Gtk.VPaned = Gtk.VPaned() _hpaned.pack1(_vpaned_left, True, True) _hpaned.pack2(_vpaned_right, True, True) self.pack_start(_hpaned, True, True, 0) return _vpaned_left, _vpaned_right def do_raise_dialog(self, **kwargs: Any) -> RAMSTKMessageDialog: _debug_msg = kwargs.get("debug_msg", "") _parent = kwargs.get("parent", None) _dialog = RAMSTKMessageDialog(parent=_parent) self.RAMSTK_LOGGER.do_log_debug(__name__, _debug_msg) return _dialog def do_request_delete(self, __button: Gtk.ToolButton) -> None: _parent = self.get_parent().get_parent().get_parent().get_parent().get_parent() _prompt = _( "You are about to delete {1} {0} and all " "data associated with it. Is this really what " "you want to do?" ).format(self._record_id, self._tag.title()) _dialog = RAMSTKMessageDialog(parent=_parent) _dialog.do_set_message(_prompt) _dialog.do_set_message_type("question") if _dialog.do_run() == Gtk.ResponseType.YES: self.do_set_cursor_busy() pub.sendMessage( f"request_delete_{self._tag}", node_id=self._record_id, ) _dialog.do_destroy() def do_request_insert(self, **kwargs: Any) -> None: _sibling = kwargs.get("sibling", True) self.do_set_cursor_busy() if _sibling: pub.sendMessage( f"request_insert_{self._tag.lower()}", parent_id=self._parent_id, ) else: pub.sendMessage( f"request_insert_{self._tag.lower()}", parent_id=self._record_id, ) def do_request_insert_child(self, __button: Gtk.ToolButton) -> Any: return self.do_request_insert(sibling=False) def do_request_insert_sibling(self, __button: Gtk.ToolButton) -> Any: return self.do_request_insert(sibling=True) def do_request_update(self, __button: Gtk.ToolButton) -> None: self.do_set_cursor_busy() pub.sendMessage(f"request_update_{self._tag}", node_id=self._record_id) def do_request_update_all(self, __button: Gtk.ToolButton) -> None: self.do_set_cursor_busy() pub.sendMessage(f"request_update_all_{self._tag}s") def do_set_cursor(self, cursor: Gdk.CursorType) -> None: try: self.get_parent_window().set_cursor(Gdk.Cursor.new(cursor)) Gdk.flush() except AttributeError: pass def do_set_cursor_active(self, tree: treelib.Tree = "") -> None: self.do_set_cursor(Gdk.CursorType.LEFT_PTR) def do_set_cursor_active_on_fail(self, error_message: str = "") -> None: self.do_set_cursor(Gdk.CursorType.LEFT_PTR) def do_set_cursor_busy(self) -> None: self.do_set_cursor(Gdk.CursorType.WATCH) def make_tab_label(self, **kwargs: Dict[str, Any]) -> None: try: self._img_tab.set_from_file(self._dic_icons["tab"]) self.hbx_tab_label.pack_start(self._img_tab, True, True, 0) except KeyError: pass _label: RAMSTKLabel = RAMSTKLabel(self._tablabel) _label.do_set_properties( height=30, width=-1, justify=Gtk.Justification.CENTER, tooltip=self._tabtooltip, ) self.hbx_tab_label.pack_end(_label, True, True, 0) self.hbx_tab_label.show_all() def make_toolbuttons(self, **kwargs: Dict[str, Any]) -> None: _scrolledwindow = Gtk.ScrolledWindow() _scrolledwindow.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC) _scrolledwindow.add_with_viewport(do_make_buttonbox(self, **kwargs)) self.pack_start(_scrolledwindow, False, False, 0) def on_button_press(self, treeview: RAMSTKTreeView, event: Gdk.EventButton) -> None: treeview.handler_block(treeview.dic_handler_id["button-press"]) if event.button == 3: _menu = Gtk.Menu() _menu.popup_at_pointer(event) for _idx, __ in enumerate(self._lst_icons): _menu_item = Gtk.ImageMenuItem() _image = Gtk.Image() _image.set_from_file(self._dic_icons[self._lst_icons[_idx]]) _menu_item.set_label(self._lst_mnu_labels[_idx]) _menu_item.set_image(_image) _menu_item.set_property("use_underline", True) _menu_item.connect( "activate", self._lst_callbacks[_idx], self.RAMSTK_USER_CONFIGURATION, ) _menu_item.show() _menu.append(_menu_item) treeview.handler_unblock(treeview.dic_handler_id["button-press"]) def on_insert(self, node_id: int = 0, tree: treelib.Tree = "") -> None: _data = tree.get_node(node_id).data[self._tag].get_attributes() self._pnlPanel.on_insert(_data) def on_select_revision(self, attributes: Dict[str, Any]) -> None: self._revision_id = attributes["revision_id"] def __set_icons(self) -> Dict[str, str]: return { "action": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/action.png", "add": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/add.png", "assembly": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/assembly.png", "calculate": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/calculate.png", "calculate_all": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/calculate-all.png", "cancel": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/cancel.png", "cause": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/cause.png", "complete": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/complete.png", "control": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/control.png", "chart": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/charts.png", "edit": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/edit.png", "environment": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/environment.png", "error": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/error.png", "export": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/export.png", "important": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/important.png", "insert_child": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/insert_child.png", "insert_sibling": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/insert_sibling.png", "mechanism": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/mechanism.png", "mission": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/mission.png", "mode": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/mode.png", "none": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/none.png", "opload": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/load.png", "opstress": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/stress.png", "part": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/part.png", "partial": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/partial.png", "phase": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/phase.png", "plot": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/charts.png", "question": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/question.png", "refresh-view": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/view-refresh.png", "remove": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/remove.png", "rollup": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/rollup.png", "reports": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/reports.png", "save": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/save.png", "save-all": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/save-all.png", "save-layout": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/save-layout.png", "testmethod": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/method.png", "warning": self.RAMSTK_USER_CONFIGURATION.RAMSTK_ICON_DIR + "/32x32/warning.png", } class RAMSTKListView(RAMSTKBaseView): def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._lst_callbacks.insert(0, super().do_request_insert_sibling) self._lst_icons.insert(0, "add") self.tab_label: Gtk.Label = Gtk.Label() def do_request_update_all(self, __button: Gtk.ToolButton) -> None: super().do_set_cursor_busy() pub.sendMessage(f"request_update_all_{self._tag}s") def make_ui(self) -> None: super().do_make_layout() super().do_embed_treeview_panel() class RAMSTKModuleView(RAMSTKBaseView):
BSD 3-Clause New or Revised License
simoncozens/fontfeatures
fontFeatures/ttLib/FontFeatures.py
makeTable
python
def makeTable(self, tag, font): table = getattr(otTables, tag, None)() table.Version = 0x00010000 table.ScriptList = otTables.ScriptList() table.ScriptList.ScriptRecord = [] table.FeatureList = otTables.FeatureList() table.FeatureList.FeatureRecord = [] table.LookupList = otTables.LookupList() stage_map = separate_by_stage(arrangeByScripts(self), tag[1:].lower()) stage_routines = [x for x in self.routines if x.stage == tag[1:].lower() ] buildersset = [ x.toOTLookup(font, self) for x in stage_routines ] lookups = [] builderlist = [] for builders in buildersset: for builder in builders: builder.lookup_index = len(lookups) builderlist.append(builder) lookups.append(builder.build()) table.LookupList.Lookup = lookups feature_indices = {} required_feature_indices = {} scripts = {} sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) for key, lookups in sorted(stage_map.items(), key=sortFeatureTag): feature_tag, script, lang = key lookup_indices = tuple([builderlist.index(x.routine.__builder) for x in lookups ]) size_feature = tag == "GPOS" and feature_tag == "size" if len(lookup_indices) == 0 and not size_feature: continue feature_key = (feature_tag, lookup_indices) feature_index = feature_indices.get(feature_key) if feature_index is None: feature_index = len(table.FeatureList.FeatureRecord) frec = otTables.FeatureRecord() frec.FeatureTag = feature_tag frec.Feature = otTables.Feature() frec.Feature.LookupListIndex = list(lookup_indices) frec.Feature.LookupCount = len(lookup_indices) table.FeatureList.FeatureRecord.append(frec) feature_indices[feature_key] = feature_index scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index) for script, lang_features in sorted(scripts.items()): srec = otTables.ScriptRecord() srec.ScriptTag = script srec.Script = otTables.Script() srec.Script.DefaultLangSys = None srec.Script.LangSysRecord = [] for lang, feature_indices in sorted(lang_features.items()): langrec = otTables.LangSysRecord() langrec.LangSys = otTables.LangSys() langrec.LangSys.LookupOrder = None langrec.LangSys.ReqFeatureIndex = 0xFFFF langrec.LangSys.FeatureIndex = [i for i in feature_indices ] langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex) if lang == "dflt": srec.Script.DefaultLangSys = langrec.LangSys else: langrec.LangSysTag = lang srec.Script.LangSysRecord.append(langrec) srec.Script.LangSysCount = len(srec.Script.LangSysRecord) table.ScriptList.ScriptRecord.append(srec) table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) table.LookupList.LookupCount = len(table.LookupList.Lookup) return table
Compiles a binary GSUB/GPOS table.
https://github.com/simoncozens/fontfeatures/blob/2c3a04b461c319b0a07baf02647f0c51bf5c2394/fontFeatures/ttLib/FontFeatures.py#L182-L263
import copy from fontTools.ttLib.tables import otBase, otTables from fontTools.ttLib import newTable from collections import OrderedDict import itertools from fontTools.varLib.varStore import OnlineVarStoreBuilder def buildBinaryFeatures(self, font, axes=[]): self.resolveAllRoutines() reorderRoutines(self) for r in self.routines: self.partitionRoutine(r, lambda rule: rule.lookup_type()) if axes: self.varstorebuilder = OnlineVarStoreBuilder([ax.tag for ax in axes]) self.axes = axes buildGDEF(self, font) buildGPOSGSUB(self, font) if axes: store = self.varstorebuilder.finish() if store.VarData: font["GDEF"].table.Version = 0x00010003 font["GDEF"].table.VarStore = store varidx_map = store.optimize() font["GDEF"].table.remap_device_varidxes(varidx_map) if 'GPOS' in font: font['GPOS'].table.remap_device_varidxes(varidx_map) def reorderRoutines(self): newroutines = [] i = 0 while i < len(self.routines): r = self.routines[i] if r in newroutines: i = i + 1 continue for dep in r.dependencies: if dep not in newroutines: newroutines.append(dep) newroutines.append(r) i = i + 1 self.routines = newroutines def buildGDEF(self, font): gdef = otTables.GDEF() gdef.GlyphClassDef = _buildGDEFGlyphClassDef(self) gdef.Version = 0x00010000 if any( ( gdef.GlyphClassDef, ) ): font["GDEF"] = newTable("GDEF") font["GDEF"].table = gdef classnames = ["", "base", "ligature", "mark", "component"] def _buildGDEFGlyphClassDef(self): classes = {} for g, c in self.glyphclasses.items(): classes[g] = classnames.index(c) if classes: result = otTables.GlyphClassDef() result.classDefs = classes return result else: return None def buildGPOSGSUB(self, font): for tag in ["GSUB", "GPOS"]: table = makeTable(self, tag, font) fontTable = font[tag] = newTable(tag) fontTable.table = table def arrangeByScripts(self): for r in self.routines: if any(rule.languages for rule in r.rules): self.partitionRoutine(r, lambda rule: tuple(rule.languages or [])) for r in self.routines: if not r.rules: continue r.languages = r.rules[0].languages if r.languages: for ix,langpair in enumerate(r.languages): if langpair[1] == "*": r.languages[ix] = (langpair[0], "dflt") if langpair[0] == "*": r.languages[ix] = ("DFLT", langpair[1]) else: r.languages = [("DFLT", "dflt")] self.hoist_languages() script_lang_pairs = [] for script in self.scripts_and_languages.keys(): if not (script, "dflt") in script_lang_pairs: script_lang_pairs.append((script, "dflt")) for lang in self.scripts_and_languages[script]: script_lang_pairs.append((script, lang)) the_big_map = OrderedDict() def put_in_map(tag, script, lang, routine): key = (tag, script, lang) if key not in the_big_map: the_big_map[key] = [] if routine not in the_big_map[key]: the_big_map[key].append(routine) def put_in_map_with_default(tag, script, lang, routine): put_in_map(tag, script, lang, routine) for script2, lang2 in script_lang_pairs: if script == "DFLT" and lang == "dflt": put_in_map(tag, script2, lang2, routine) elif script == script2 and lang == "dflt": put_in_map(tag, script2, lang2, routine) for tag, routinereferences in self.features.items(): for r in routinereferences: for script, lang in r.routine.languages: put_in_map_with_default(tag, script, lang, r) return the_big_map def separate_by_stage(the_big_map, stage): stage_map = OrderedDict() for k,v in the_big_map.items(): v = [r for r in v if r.stage == stage] if v: stage_map[k] = v return stage_map
BSD 3-Clause New or Revised License
negrinho/deep_architect
dev/hyperband/common.py
History.load
python
def load(): pass
Load from json file
https://github.com/negrinho/deep_architect/blob/3427c5d45b0cbdc9c2fe1f4e5213f6961ef41749/dev/hyperband/common.py#L162-L164
class HyperBand: def __init__(self): if type(self) == HyperBand: raise Exception("Can't instantiate Abstract Class; must define a subclass") def get_hyperparameter_configuration(self, n): raise NotImplementedError("Need to be defined by users") def run_then_return_val_loss(self, t, r): raise NotImplementedError("Need to be defined by users") def top_k(self, T, P, k): raise NotImplementedError("Need to be defined by users") def get_best_performance(self, L, T, best_so_far): raise NotImplementedError("Need to be defined by users") def evaluate(self, R, eta = 3): s_max = int(math.floor(math.log(R, eta))) B = (s_max + 1) * R max_perf = None self.total_resources = 0 self.total_configs = 0 for s in xrange(s_max, -1, -1): n = int(math.ceil(float(B) * (eta**s) / (float(R) * (s+1)))) r = R * (eta**(-s)) T = self.get_hyperparameter_configuration(n) self.total_configs += n for i in xrange(s+1): if len(T) == 0: return max_perf n_i = int(math.floor(n * (eta ** (-i)))) r_i = int(math.floor(r * (eta ** i))) L = [self.run_then_return_val_loss(t, r_i) for t in T] k = int(math.floor(float(n_i) / eta)) if (k == 0): break T = self.top_k(T, L, k) self.total_resources += len(T) * r_i max_perf = self.get_best_performance(L, T, max_perf) return max_perf class History: def __init__(self): self.brackets = dict() self.total_configurations = 0 self.total_resources = 0 self.best_configuration = None self.best_performance = None def record_successiveHalving(self, bracket_id, configs, performances, resource): assert(len(configs) == len(performances)) if bracket_id not in self.brackets: self.brackets[bracket_id] = dict() for (config, performance) in zip(configs, performances): self.brackets[config][resource] = performance def record_best_of_bracket(self, bracket_id, configs, performances): assert(bracket_id in self.brackets) assert(len(configs) == len(performances) == 1) self.brackets[bracket_id]["best_config"] = (configs[0], performances[0]) def record_best_config(self, best_config, best_perf): self.best_configuration = best_config self.best_performance = best_perf def convert_to_graph_hyperband_ready(self, metric, resource): data = {'configuration': [], metric: [], resource:[]} for bracket in self.brackets: config, performance = self.brackets[bracket] history = performance[1] config_id = config n = len(history[metric]) data['configuration'] += [config_id for _ in range(n)] data[metric] += history[metric] data[resource] += history[resource] return data def convert_a_bracket_to_graph_ready(self, bracket_id, metric, resource): data = {'configuration': [], metric: [], resource:[]} config, performance = self.brackets[bracket_id] history = performance[1] config_id = config n = len(history[metric]) data['configuration'] += [config_id for _ in range(n)] data[metric] += history[metric] data[resource] += history[resource] return data def get_all_bracket_IDs(self): pass def save(): pass
MIT License
jdcloud-api/jdcloud-sdk-python
jdcloud_sdk/services/cdn/apis/CreateDomainRequest.py
CreateDomainParameters.setMinFileSize
python
def setMinFileSize(self, minFileSize): self.minFileSize = minFileSize
:param minFileSize: (Optional)
https://github.com/jdcloud-api/jdcloud-sdk-python/blob/ff70b25bb9c4397e80bc2e4fefe1db1c637d8c2f/jdcloud_sdk/services/cdn/apis/CreateDomainRequest.py#L94-L98
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest class CreateDomainRequest(JDCloudRequest): def __init__(self, parameters, header=None, version="v1"): super(CreateDomainRequest, self).__init__( '/domain/{domain}', 'POST', header, version) self.parameters = parameters class CreateDomainParameters(object): def __init__(self, domain, ): self.domain = domain self.sourceType = None self.cdnType = None self.backSourceType = None self.dailyBandWidth = None self.quaility = None self.maxFileSize = None self.minFileSize = None self.sumFileSize = None self.avgFileSize = None self.defaultSourceHost = None self.httpType = None self.ipSource = None self.domainSource = None self.ossSource = None self.accelerateRegion = None self.tempInstId = None def setSourceType(self, sourceType): self.sourceType = sourceType def setCdnType(self, cdnType): self.cdnType = cdnType def setBackSourceType(self, backSourceType): self.backSourceType = backSourceType def setDailyBandWidth(self, dailyBandWidth): self.dailyBandWidth = dailyBandWidth def setQuaility(self, quaility): self.quaility = quaility def setMaxFileSize(self, maxFileSize): self.maxFileSize = maxFileSize
Apache License 2.0
gugarosa/opytimizer
opytimizer/math/general.py
kmeans
python
def kmeans(x, n_clusters=1, max_iterations=100, tol=1e-4): n_samples, n_variables, n_dimensions = x.shape[0], x.shape[1], x.shape[2] centroids = np.zeros((n_clusters, n_variables, n_dimensions)) labels = np.zeros(n_samples) for i in range(n_clusters): idx = r.generate_integer_random_number(0, n_samples) centroids[i] = x[idx] for _ in range(max_iterations): dists = np.squeeze(np.array([np.linalg.norm(x - c, axis=1) for c in centroids])) updated_labels = np.squeeze(np.array(np.argmin(dists, axis=0))) ratio = np.sum(labels != updated_labels) / n_samples if ratio <= tol: break labels = updated_labels for i in range(n_clusters): centroid_samples = x[labels == i] if centroid_samples.shape[0] > 0: centroids[i] = np.mean(centroid_samples, axis=0) return labels
Performs the K-Means clustering over the input data. Args: x (np.array): Input array with a shape equal to (n_samples, n_variables, n_dimensions). n_clusters (int): Number of clusters. max_iterations (int): Maximum number of clustering iterations. tol (float): Tolerance value to stop the clustering. Returns: An array holding the assigned cluster per input sample.
https://github.com/gugarosa/opytimizer/blob/09e5485b9e30eca622ad404e85c22de0c42c8abd/opytimizer/math/general.py#L28-L79
from itertools import islice import numpy as np import opytimizer.math.random as r def euclidean_distance(x, y): distance = np.linalg.norm(x - y) return distance
Apache License 2.0
r-arcgis/r-bridge-install
rtools/utils.py
mkdtemp
python
def mkdtemp(suffix='', prefix='tmp', parent_dir=None): path = tempfile.mkdtemp(suffix, prefix, parent_dir) try: yield str(path) finally: shutil.rmtree(path, ignore_errors=True)
A contextlib based wrapper for tempfile.mkdtemp.
https://github.com/r-arcgis/r-bridge-install/blob/18ebc2edb84655f6bc67b1f462d58162329f1fcd/rtools/utils.py#L36-L42
from __future__ import unicode_literals import contextlib import os import shutil import sys import tempfile import textwrap def platform(): platform = None if sys.maxsize > 2**32: platform = 'x64' else: platform = 'i386' return platform def dedent(text, ending='\r\n'): text = text.replace('\n', ending) return textwrap.dedent(text) def versiontuple(v): res = None if v is not None: res = tuple(map(int, (v.split(".")))) return res @contextlib.contextmanager
Apache License 2.0
trusted-ai/adversarial-robustness-toolbox
art/attacks/extraction/knockoff_nets.py
KnockoffNets.extract
python
def extract(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> "CLASSIFIER_TYPE": if self.sampling_strategy == "random" and y is not None: logger.warning("This attack with random sampling strategy does not use the provided label y.") if self.sampling_strategy == "adaptive" and y is None: raise ValueError("This attack with adaptive sampling strategy needs label y.") if x.shape[0] < self.nb_stolen: logger.warning( "The size of the source input is smaller than the expected number of queries submitted " "to the victim classifier." ) thieved_classifier = kwargs.get("thieved_classifier") if thieved_classifier is None or not isinstance(thieved_classifier, ClassifierMixin): raise ValueError("A thieved classifier is needed.") if self.sampling_strategy == "random": thieved_classifier = self._random_extraction(x, thieved_classifier) else: thieved_classifier = self._adaptive_extraction(x, y, thieved_classifier) return thieved_classifier
Extract a thieved classifier. :param x: An array with the source input to the victim classifier. :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape `(nb_samples,)`. :param thieved_classifier: A thieved classifier to be stolen. :return: The stolen classifier.
https://github.com/trusted-ai/adversarial-robustness-toolbox/blob/564f46f99b3cb0406fe3570919b8e71a4c5bba9d/art/attacks/extraction/knockoff_nets.py#L100-L136
from __future__ import absolute_import, division, print_function, unicode_literals import logging from typing import Optional, TYPE_CHECKING import numpy as np from tqdm.auto import trange from art.config import ART_NUMPY_DTYPE from art.attacks.attack import ExtractionAttack from art.estimators.estimator import BaseEstimator from art.estimators.classification.classifier import ClassifierMixin from art.utils import to_categorical if TYPE_CHECKING: from art.utils import CLASSIFIER_TYPE logger = logging.getLogger(__name__) class KnockoffNets(ExtractionAttack): attack_params = ExtractionAttack.attack_params + [ "batch_size_fit", "batch_size_query", "nb_epochs", "nb_stolen", "sampling_strategy", "reward", "verbose", "use_probability", ] _estimator_requirements = (BaseEstimator, ClassifierMixin) def __init__( self, classifier: "CLASSIFIER_TYPE", batch_size_fit: int = 1, batch_size_query: int = 1, nb_epochs: int = 10, nb_stolen: int = 1, sampling_strategy: str = "random", reward: str = "all", verbose: bool = True, use_probability: bool = False, ) -> None: super().__init__(estimator=classifier) self.batch_size_fit = batch_size_fit self.batch_size_query = batch_size_query self.nb_epochs = nb_epochs self.nb_stolen = nb_stolen self.sampling_strategy = sampling_strategy self.reward = reward self.verbose = verbose self.use_probability = use_probability self._check_params()
MIT License
duointeractive/media-nommer
media_nommer/core/storage_backends/s3.py
S3Backend._get_aws_s3_connection
python
def _get_aws_s3_connection(cls, access_key, secret_access_key): return boto.connect_s3(access_key, secret_access_key)
Lazy-loading of the S3 boto connection. Refer to this instead of referencing self._aws_s3_connection directly. :param str access_key: The AWS_ Access Key needed to get to the file in question. :param str secret_access_key: The AWS_ Secret Access Key needed to get to the file in question. :rtype: :py:class:`boto.s3.connection.Connection` :returns: A boto connection to Amazon's S3 interface.
https://github.com/duointeractive/media-nommer/blob/ba8095748582934ab44f6972210381b074bd9d14/media_nommer/core/storage_backends/s3.py#L16-L28
import boto from media_nommer.utils import logger from media_nommer.utils.uri_parsing import get_values_from_media_uri from media_nommer.core.storage_backends.exceptions import InfileNotFoundException from media_nommer.core.storage_backends.base_backend import BaseStorageBackend class S3Backend(BaseStorageBackend): @classmethod
BSD 3-Clause New or Revised License
alteryx/evalml
evalml/pipelines/components/transformers/preprocessing/delayed_feature_transformer.py
DelayedFeatureTransformer.transform
python
def transform(self, X, y=None): if X is None: X = pd.DataFrame() X_ww = infer_feature_types(X) X_ww = X_ww.ww.copy() categorical_columns = self._get_categorical_columns(X_ww) original_features = list(X_ww.columns) if self.delay_features and len(X) > 0: X_categorical = self._encode_X_while_preserving_index( X_ww[categorical_columns] ) for col_name in X_ww: col = X_ww[col_name] if col_name in categorical_columns: col = X_categorical[col_name] for t in range(self.start_delay, self.start_delay + self.max_delay + 1): X_ww.ww[f"{col_name}_delay_{t}"] = col.shift(t) if self.delay_target and y is not None: y = infer_feature_types(y) if type(y.ww.logical_type) == logical_types.Categorical: y = self._encode_y_while_preserving_index(y) for t in range(self.start_delay, self.start_delay + self.max_delay + 1): X_ww.ww[self.target_colname_prefix.format(t)] = y.shift(t) return X_ww.ww.drop(original_features)
Computes the delayed features for all features in X and y. For each feature in X, it will add a column to the output dataframe for each delay in the (inclusive) range [1, max_delay]. The values of each delayed feature are simply the original feature shifted forward in time by the delay amount. For example, a delay of 3 units means that the feature value at row n will be taken from the n-3rd row of that feature If y is not None, it will also compute the delayed values for the target variable. Args: X (pd.DataFrame or None): Data to transform. None is expected when only the target variable is being used. y (pd.Series, or None): Target. Returns: pd.DataFrame: Transformed X.
https://github.com/alteryx/evalml/blob/12ea29f4cb62948566804f624d37442c2e5aeeea/evalml/pipelines/components/transformers/preprocessing/delayed_feature_transformer.py#L94-L135
import pandas as pd from sklearn.preprocessing import LabelEncoder, OrdinalEncoder from woodwork import logical_types from evalml.pipelines.components.transformers.transformer import Transformer from evalml.utils import infer_feature_types class DelayedFeatureTransformer(Transformer): name = "Delayed Feature Transformer" hyperparameter_ranges = {} needs_fitting = False target_colname_prefix = "target_delay_{}" def __init__( self, date_index=None, max_delay=2, gap=0, forecast_horizon=1, delay_features=True, delay_target=True, random_seed=0, **kwargs, ): self.date_index = date_index self.max_delay = max_delay self.delay_features = delay_features self.delay_target = delay_target self.forecast_horizon = forecast_horizon self.gap = gap self.start_delay = self.forecast_horizon + self.gap parameters = { "date_index": date_index, "max_delay": max_delay, "delay_target": delay_target, "delay_features": delay_features, "forecast_horizon": forecast_horizon, "gap": gap, } parameters.update(kwargs) super().__init__(parameters=parameters, random_seed=random_seed) def fit(self, X, y=None): return self @staticmethod def _encode_y_while_preserving_index(y): y_encoded = LabelEncoder().fit_transform(y) y = pd.Series(y_encoded, index=y.index) return y @staticmethod def _get_categorical_columns(X): return list(X.ww.select(["categorical"], return_schema=True).columns) @staticmethod def _encode_X_while_preserving_index(X_categorical): return pd.DataFrame( OrdinalEncoder().fit_transform(X_categorical), columns=X_categorical.columns, index=X_categorical.index, )
BSD 3-Clause New or Revised License
sammchardy/python-binance-chain
binance_chain/node_rpc/http.py
HttpRpcClient.get_validators
python
def get_validators(self): return self._request('validators')
Get the validator set at the given block height. If no height is provided, it will fetch the current validator set. https://binance-chain.github.io/api-reference/node-rpc.html#validators
https://github.com/sammchardy/python-binance-chain/blob/19d7d639cc912a27ec86831338c2a2dc96289d50/binance_chain/node_rpc/http.py#L218-L225
import asyncio import itertools from typing import Optional, Dict import requests import aiohttp import ujson from binance_chain.exceptions import BinanceChainRPCException, BinanceChainRequestException from binance_chain.constants import RpcBroadcastRequestType from binance_chain.messages import Msg from binance_chain.node_rpc.request import RpcRequest requests.models.json = ujson class BaseHttpRpcClient: id_generator = itertools.count(1) def __init__(self, endpoint_url, requests_params: Optional[Dict] = None): self._endpoint_url = endpoint_url self._requests_params = requests_params self.session = self._init_session() def _init_session(self): session = requests.session() session.headers.update(self._get_headers()) return session def _get_rpc_request(self, path, **kwargs) -> str: rpc_request = RpcRequest(path, next(self.id_generator), kwargs.get('data', None)) return str(rpc_request) def _get_headers(self): return { 'Accept': 'application/json', 'Content-Type': 'application/json', 'User-Agent': 'python-binance-chain', } def request_kwargs(self, method, **kwargs): kwargs['timeout'] = 10 if self._requests_params: kwargs.update(self._requests_params) kwargs['data'] = kwargs.get('data', {}) kwargs['headers'] = kwargs.get('headers', {}) if kwargs['data'] and method == 'get': kwargs['params'] = kwargs['data'] del(kwargs['data']) if method == 'post': kwargs['headers']['content-type'] = 'text/plain' return kwargs class HttpRpcClient(BaseHttpRpcClient): def _request(self, path, **kwargs): rpc_request = self._get_rpc_request(path, **kwargs) response = self.session.post(self._endpoint_url, data=rpc_request.encode(), headers=self._get_headers()) return self._handle_response(response) def _request_session(self, path, params=None): kwargs = { 'params': params, 'headers': self._get_headers() } response = self.session.get(f"{self._endpoint_url}/{path}", **kwargs) return self._handle_session_response(response) @staticmethod def _handle_response(response): try: res = response.json() if 'error' in res and res['error']: raise BinanceChainRPCException(response) if 'result' in res: res = res['result'] return res except ValueError: raise BinanceChainRequestException('Invalid Response: %s' % response.text) @staticmethod def _handle_session_response(response): if not str(response.status_code).startswith('2'): raise BinanceChainRPCException(response) try: res = response.json() if 'code' in res and res['code'] != "200000": raise BinanceChainRPCException(response) if 'success' in res and not res['success']: raise BinanceChainRPCException(response) if 'result' in res: res = res['result'] return res except ValueError: raise BinanceChainRequestException('Invalid Response: %s' % response.text) def get_path_list(self): res = self._request(self._endpoint_url, method="get") return res.content def get_abci_info(self): return self._request('abci_info') def get_consensus_state(self): return self._request('consensus_state') def dump_consensus_state(self): return self._request('dump_consensus_state') def get_genesis(self): return self._request('genesis') def get_net_info(self): return self._request('net_info') def get_num_unconfirmed_txs(self): return self._request('num_unconfirmed_txs') def get_status(self): return self._request('status') def get_health(self): return self._request('health') def get_unconfirmed_txs(self): return self._request('unconfirmed_txs')
MIT License
quizlet/abracadabra
abra/stats.py
cohens_d_sample_size
python
def cohens_d_sample_size( delta, alpha, power, statistic, std_control, std_variation=None, sample_size_ratio=1. ): SUPPORTED_STATISTICS = ('t', 'z') effect_size = cohens_d(delta, std_control, std_variation) if statistic in SUPPORTED_STATISTICS: power_func = "{}t_ind_solve_power".format(statistic) N1 = int( eval(power_func)( effect_size, alpha=alpha, power=power, ratio=sample_size_ratio ) ) N2 = int(N1 * sample_size_ratio) return [N1, N2] else: raise ValueError("Unknown statistic, must be either {!r}".format(SUPPORTED_STATISTICS))
Calculate sample size required to observe a significantly reliable difference between groups a and b. Assumes Cohen's d definition of effect size and an enrollment ratio of 1.0 between groups a and b by default. Parameters ---------- std_control : float An estiamte of the expected sample standard deviation of control group nobs_control : int The number of control observations. std_variation : float An estimate of the expected sample standard deviation of variation group. If not provided, we assume homogenous variances for the two groups. Returns ------- sample_sizes : list[int] The estiamated sample sizes for the control and variation treatments Example ------- # Get estimate of sample size required to observe a significant difference between # two binomial distributions that differ by .01 in mean probability prob_control = .49 std_control = (prob_control * (1 - prob_control))**.5 # Binomial std prob_variation = std_variation = .50 delta = prob_variation - prob_control print( cohens_d_sample_size( delta=delta, alpha=.05, power=.8, statistic='z', std_control=std_control, std_variation=std_variation ) ) # [39236, 39236] References ---------- Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd ed.). Hillsdale, NJ: Lawrence Earlbaum Associates.
https://github.com/quizlet/abracadabra/blob/eda599bd02f14b96efdc521f53132d93c9100ede/abra/stats.py#L181-L255
from abra.config import DEFAULT_ALPHA, logger from abra.mixin import InitRepr from statsmodels.stats.api import DescrStatsW, CompareMeans from statsmodels.distributions.empirical_distribution import ECDF from statsmodels.stats.power import tt_ind_solve_power, zt_ind_solve_power from statsmodels.stats.proportion import proportions_ztest, binom_test from scipy.stats import norm from scipy import optimize from pandas import DataFrame import numpy as np CORRECTIONS = {'b': 'bonferroni', 's': 'sidak', 'bh': 'fdr_bh'} def bonferroni(alpha_orig, p_values): return alpha_orig / len(p_values) def sidak(alpha_orig, p_values): return 1. - (1. - alpha_orig) ** (1. / len(p_values)) def fdr_bh(fdr, p_values): n_tests = len(p_values) def p_i(i): return i * fdr / n_tests p_sorted = np.sort(np.asarray(p_values)) significant_idx = [i for i, val in enumerate(p_sorted, 1) if val <= p_i(i)] rank = np.max(significant_idx) if significant_idx else 1 return p_i(rank) def estimate_experiment_sample_sizes( delta, statistic='z', alpha=.05, power=.8, *args, **kwargs ): if statistic in ('t', 'z'): return cohens_d_sample_size(delta, alpha, power, statistic, *args, **kwargs) elif statistic == 'rates_ratio': return ratio_sample_size(alpha, power, delta, *args, **kwargs) else: raise ValueError("Unknown statistic") def cohens_d(delta, std_control, std_variation=None): std_variation = std_variation if std_variation else std_control std_pooled = np.sqrt((std_control ** 2 + std_variation ** 2) / 2.) return delta / std_pooled
MIT License
jahjajaka/afternoon_cleaner
object_detection/model_lib_v2_test.py
_get_config_kwarg_overrides
python
def _get_config_kwarg_overrides(): data_path = _get_data_path() label_map_path = _get_labelmap_path() return { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path }
Returns overrides to the configs that insert the correct local paths.
https://github.com/jahjajaka/afternoon_cleaner/blob/590bdf58a216cbc6cfc47ef8f49d7af3df3703b7/object_detection/model_lib_v2_test.py#L53-L61
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf from object_detection import model_hparams from object_detection import model_lib_v2 from object_detection.utils import config_util MODEL_NAME_FOR_TEST = 'ssd_mobilenet_v2_pets_keras' def _get_data_path(): return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', 'pets_examples.record') def get_pipeline_config_path(model_name): return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', 'configs', model_name + '.config') def _get_labelmap_path(): return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'pet_label_map.pbtxt')
MIT License
wq2012/spectralcluster
spectralcluster/autotune.py
AutoTune.__init__
python
def __init__(self, p_percentile_min=0.60, p_percentile_max=0.95, init_search_step=0.01, search_level=1): self.p_percentile_min = p_percentile_min self.p_percentile_max = p_percentile_max self.search_step = init_search_step self.search_level = search_level
Initialization of the autotune arguments. Args: p_percentile_min: minimum value of p_percentile p_percentile_max: maximum value of p_percentile init_search_step: initial search step size for auto-tuning search_level: hierarchical search level for auto-tuning
https://github.com/wq2012/spectralcluster/blob/98f13872154cf296936be98cbdfa208f5a45dd87/spectralcluster/autotune.py#L17-L33
import numpy as np MIN_SEARCH_STEP = 1e-04 class AutoTune:
Apache License 2.0
ialbert/biostar-central
biostar/forum/auth.py
valid_awards
python
def valid_awards(user): valid = [] for award in awards.ALL_AWARDS: targets = award.get_awards(user) for target in targets: post = target if isinstance(target, Post) else None date = post.lastedit_date if post else user.profile.last_login badge = Badge.objects.filter(name=award.name).first() valid.append((user, badge, date, post)) return valid
Return list of valid awards for a given user
https://github.com/ialbert/biostar-central/blob/5326eec6badb4c272e13042f3e983b0e30103287/biostar/forum/auth.py#L409-L428
import hashlib import logging import re import urllib.parse as urlparse from datetime import timedelta from difflib import Differ, SequenceMatcher, HtmlDiff, unified_diff import bs4 from django.contrib import messages from django.contrib.auth import get_user_model from django.core.cache import cache from django.db import transaction from django.db.models import F, Q from django.template import loader from django.utils.safestring import mark_safe from django.conf import settings from biostar.accounts.const import MESSAGE_COUNT from biostar.accounts.models import Message from biostar.planet.models import BlogPost, Blog from biostar.accounts.models import Profile from biostar.utils.helpers import get_ip from . import util, awards from .const import * from .models import Post, Vote, Subscription, Badge, delete_post_cache, Log, SharedLink, Diff User = get_user_model() logger = logging.getLogger("engine") def get_votes(user, root): store = { Vote.BOOKMARK: set(), Vote.UP: set() } if user.is_authenticated: votes = Vote.objects.filter(post__root=root, author=user).values_list("type", "post__id") for vote_type, post_id, in votes: store.setdefault(vote_type, set()).add(post_id) return store def convert_html(): return def delete_cache(prefix, user): key = f"{prefix}-{user.pk}" if cache.get(key): cache.delete(key) logger.debug(f'deleted {key} from cache') return import datetime ICONS = ["monsterid", "robohash", "wavatar", "retro"] def gravatar_url(email, style='mp', size=80, force=None): global ICONS hash_num = hashlib.md5(email).hexdigest() now = datetime.datetime.now() if now.month == 4 and now.day == 1: index = now.hour % len(ICONS) style = ICONS[index] force = True data = dict(s=str(size), d=style) url = "https://secure.gravatar.com/avatar/%s?" % hash_num if force: data['f'] = 'y' url += urlparse.urlencode(data) return url def encode_email(email, key): return def decode_email(email): return def gravatar(user, size=80): if not user or user.is_anonymous: email = '[email protected]'.encode('utf8') return gravatar_url(email=email) email = user.email if user.is_authenticated else '' email = email.encode('utf8', errors="ignore") if user.is_anonymous or not user.profile.is_valid: email = '[email protected]'.encode('utf8') style = "monsterid" return gravatar_url(email=email, style=style, size=size, force=True) if user.profile.user_icon != Profile.DEFAULT_ICON: style = user.profile.user_icon return gravatar_url(email=email, style=style, size=size, force=True) if user.profile.is_moderator: style = "robohash" elif user.profile.score > 100: style = "retro" elif user.profile.score > 0: style = "identicon" else: style = "mp" return gravatar_url(email=email, style=style, size=size) def walk_down_thread(parent, collect=set()): if (parent is None) or (parent.parent is None) or (parent.root is None): return collect children = Post.objects.filter(parent=parent).exclude(uid=parent.uid) for child in children: collect.add(child) walk_down_thread(parent=child, collect=collect) return collect def create_post_from_json(**json_data): post_uid = json_data['id'] post = Post.objects.filter(uid=post_uid).first() if post: post.content = json_data[''] post.lastedit_date = json_data['lastedit_date'] post.creation_date = json_data['creation_date'] return def create_post(author, title, content, request=None, root=None, parent=None, ptype=Post.QUESTION, tag_val="", nodups=True): post = Post.objects.filter(content=content, author=author).order_by('-creation_date').first() frame = 60 delta = (util.now() - post.creation_date).seconds if post else frame if nodups and delta < frame: if request: messages.warning(request, "Post with this content was created recently.") return post post = Post.objects.create(title=title, content=content, root=root, parent=parent, type=ptype, tag_val=tag_val, author=author) delete_cache(MYPOSTS, author) return post def diff_ratio(text1, text2): s = SequenceMatcher(lambda char: re.match(r'\w+', char), text1, text2) return round(s.ratio(), 5) def create_diff(text, post, user): if not post: return ratio = diff_ratio(text1=text, text2=post.content) if ratio == 1: return content = post.content.splitlines() text = text.splitlines() diff = unified_diff(content, text) diff = [f"{line}\n" if not line.endswith('\n') else line for line in diff] diff = ''.join(diff) dobj = Diff.objects.filter(post=post, author=post.author).first() frame = 60 * 10 delta = (util.now() - dobj.created).seconds if dobj else frame if delta >= frame or user != post.author: dobj = Diff.objects.create(diff=diff, post=post, author=user) post.has_diff = True if user != post.author: db_logger(user=user, action=Log.EDIT, text=f'edited post', target=post.author, post=post) Post.objects.filter(pk=post.pk).update(has_diff=post.has_diff) return dobj def merge_profiles(main, alias): Post.objects.filter(author=alias).update(author=main) Post.objects.filter(lastedit_user=alias).update(lastedit_user=main) Message.objects.filter(sender=alias).update(sender=main) Message.objects.filter(recipient=alias).update(recipient=main) older = (alias.profile.date_joined < main.profile.date_joined) if alias.profile.is_moderator or alias.profile.high_rep or older: return alias.delete() return def create_subscription(post, user, sub_type=None, update=False): subs = Subscription.objects.filter(post=post.root, user=user) sub = subs.first() default = Subscription.TYPE_MAP.get(user.profile.message_prefs, Subscription.LOCAL_MESSAGE) empty = sub_type is None sub_type = None if empty else sub_type sub_type = sub_type or default if sub and update: sub.type = sub_type sub.save() else: subs.delete() Subscription.objects.create(post=post.root, user=user, type=sub_type) subs_count = Subscription.objects.filter(post=post.root).exclude(type=Subscription.NO_MESSAGES).count() Post.objects.filter(pk=post.root.pk).update(subs_count=subs_count) delete_cache(FOLLOWING, user) def is_suspended(user): if user.is_authenticated and user.profile.state in (Profile.BANNED, Profile.SUSPENDED, Profile.SPAMMER): return True return False def post_tree(user, root): query = Post.objects.valid_posts(u=user, root=root).exclude(pk=root.id) if user.is_anonymous or not user.profile.is_moderator: query = query.exclude(Q(status=Post.DELETED) | Q(spam=Post.SPAM)) query = query.select_related("lastedit_user__profile", "author__profile", "root__author__profile") thread = query.order_by("type", "-accept_count", "-vote_count", "creation_date") votes = get_votes(user=user, root=root) bookmarks, upvotes = votes[Vote.BOOKMARK], votes[Vote.UP] comment_tree = dict() def decorate(post): if post.is_comment: comment_tree.setdefault(post.parent_id, []).append(post) post.has_bookmark = int(post.id in bookmarks) post.has_upvote = int(post.id in upvotes) if user.is_authenticated: post.can_accept = not post.is_toplevel and (user == post.root.author or user.profile.is_moderator) post.can_moderate = user.profile.is_moderator post.is_editable = (user == post.author or user.profile.is_moderator) else: post.can_accept = False post.is_editable = False post.can_moderate = False return post thread = list(map(decorate, thread)) root = decorate(root) answers = [p for p in thread if p.type == Post.ANSWER] return root, comment_tree, answers, thread
MIT License
sibirrer/lenstronomy
lenstronomy/Analysis/kinematics_api.py
KinematicsAPI.__init__
python
def __init__(self, z_lens, z_source, kwargs_model, kwargs_aperture, kwargs_seeing, anisotropy_model, cosmo=None, lens_model_kinematics_bool=None, light_model_kinematics_bool=None, multi_observations=False, kwargs_numerics_galkin=None, analytic_kinematics=False, Hernquist_approx=False, MGE_light=False, MGE_mass=False, kwargs_mge_light=None, kwargs_mge_mass=None, sampling_number=1000, num_kin_sampling=1000, num_psf_sampling=100): self.z_d = z_lens self.z_s = z_source self._kwargs_aperture_kin = kwargs_aperture self._kwargs_psf_kin = kwargs_seeing self.lensCosmo = LensCosmo(z_lens, z_source, cosmo=cosmo) self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, extinction_class = class_creator.create_class_instances( all_models=True, **kwargs_model) self._lensLightProfile = LightProfileAnalysis(light_model=self.LensLightModel) self._lensMassProfile = LensProfileAnalysis(lens_model=self.LensModel) self._lens_light_model_list = self.LensLightModel.profile_type_list self._lens_model_list = self.LensModel.lens_model_list self._kwargs_cosmo = {'d_d': self.lensCosmo.dd, 'd_s': self.lensCosmo.ds, 'd_ds': self.lensCosmo.dds} self._lens_model_kinematics_bool = lens_model_kinematics_bool self._light_model_kinematics_bool = light_model_kinematics_bool self._sampling_number = sampling_number self._num_kin_sampling = num_kin_sampling self._num_psf_sampling = num_psf_sampling if kwargs_mge_mass is None: self._kwargs_mge_mass = {'n_comp': 20} else : self._kwargs_mge_mass = kwargs_mge_mass if kwargs_mge_light is None: self._kwargs_mge_light = {'grid_spacing': 0.01, 'grid_num': 100, 'n_comp': 20, 'center_x': None, 'center_y': None} else: self._kwargs_mge_light = kwargs_mge_light if kwargs_numerics_galkin is None: kwargs_numerics_galkin = {'interpol_grid_num': 1000, 'log_integration': True, 'max_integrate': 100, 'min_integrate': 0.001} self._kwargs_numerics_kin = kwargs_numerics_galkin self._anisotropy_model = anisotropy_model self._analytic_kinematics = analytic_kinematics self._Hernquist_approx = Hernquist_approx self._MGE_light = MGE_light self._MGE_mass = MGE_mass self._multi_observations = multi_observations
:param z_lens: redshift of lens :param z_source: redshift of source :param kwargs_model: model keyword arguments, needs 'lens_model_list', 'lens_light_model_list' :param kwargs_aperture: spectroscopic aperture keyword arguments, see lenstronomy.Galkin.aperture for options :param kwargs_seeing: seeing condition of spectroscopic observation, corresponds to kwargs_psf in the GalKin module specified in lenstronomy.GalKin.psf :param cosmo: astropy.cosmology instance, if None then will be set to the default cosmology :param lens_model_kinematics_bool: bool list of length of the lens model. Only takes a subset of all the models as part of the kinematics computation (can be used to ignore substructure, shear etc that do not describe the main deflector potential :param light_model_kinematics_bool: bool list of length of the light model. Only takes a subset of all the models as part of the kinematics computation (can be used to ignore light components that do not describe the main deflector :param multi_observations: bool, if True uses multi-observation to predict a set of different observations with the GalkinMultiObservation() class. kwargs_aperture and kwargs_seeing require to be lists of the individual observations. :param anisotropy_model: type of stellar anisotropy model. See details in MamonLokasAnisotropy() class of lenstronomy.GalKin.anisotropy :param analytic_kinematics: boolean, if True, used the analytic JAM modeling for a power-law profile on top of a Hernquist light profile ATTENTION: This may not be accurate for your specific problem! :param Hernquist_approx: bool, if True, uses a Hernquist light profile matched to the half light radius of the deflector light profile to compute the kinematics :param MGE_light: bool, if true performs the MGE for the light distribution :param MGE_mass: bool, if true performs the MGE for the mass distribution :param kwargs_numerics_galkin: numerical settings for the integrated line-of-sight velocity dispersion :param kwargs_mge_mass: keyword arguments that go into the MGE decomposition routine :param kwargs_mge_light: keyword arguments that go into the MGE decomposition routine :param sampling_number: int, number of spectral rendering to compute the light weighted integrated LOS dispersion within the aperture. This keyword should be chosen high enough to result in converged results within the tolerance. :param num_kin_sampling: number of kinematic renderings on a total IFU :param num_psf_sampling: number of PSF displacements for each kinematic rendering on the IFU
https://github.com/sibirrer/lenstronomy/blob/e6d0e179a98ecb0c4db25cdf7cfb73e83c6aeded/lenstronomy/Analysis/kinematics_api.py#L22-L97
__author__ = 'sibirrer' import numpy as np import copy from lenstronomy.GalKin.galkin_multiobservation import GalkinMultiObservation from lenstronomy.GalKin.galkin import Galkin from lenstronomy.Cosmo.lens_cosmo import LensCosmo from lenstronomy.Util import class_creator from lenstronomy.Analysis.lens_profile import LensProfileAnalysis from lenstronomy.Analysis.light_profile import LightProfileAnalysis import lenstronomy.Util.multi_gauss_expansion as mge __all__ = ['KinematicsAPI'] class KinematicsAPI(object):
MIT License
hackatbrown/2015.hackatbrown.org
hack-at-brown-2015/cssutils/tests/test_properties.py
PropertiesTestCase._check
python
def _check(self, name, keys): notvalid = self._allvalues() for value in self._valuesofkeys(keys): if name == debug: print '+True?', Property(name, value).valid, value self.assertEqual(True, Property(name, value).valid) if value in notvalid: notvalid.remove(value) for value in notvalid: if name == debug: print '-False?', Property(name, value).valid, value self.assertEqual(False, Property(name, value).valid)
Check each value in values if for property name p.name==exp.
https://github.com/hackatbrown/2015.hackatbrown.org/blob/6e6e10b010421228deb562909a1c8bb4272b759f/hack-at-brown-2015/cssutils/tests/test_properties.py#L98-L113
__version__ = '$Id: test_property.py 1529 2008-11-30 15:12:01Z cthedot $' import copy import xml.dom import basetest import cssutils from cssutils.css.property import Property debug = False class PropertiesTestCase(basetest.BaseTestCase): def setUp(self): V = { '0': ('0', '-0'), 'NUMBER': ('0', '-0', '100.1', '-100.1'), 'PERCENTAGE': ('0%', '-0%', '100.1%', '-100.1%'), 'EM': '1.2em', 'EX': '1.2ex', 'PX': '1.2px', 'CM': '1.2cm', 'MM': '1.2mm', 'IN': '1.2in', 'PT': '1.2pt', 'PC': '1.2pc', 'ANGLES': ('1deg', '1rad', '1grad'), 'TIMES': ('1s', '1ms'), 'FREQUENCIES': ('1hz', '1khz'), 'DIMENSION': ('1dimension', '1_dimension', '1dimension2'), 'STRING': ('"string"', "'STRING'"), 'URI': ('url(x)', 'URL("x")', "url(')')"), 'IDENT': ('ident', 'IDENT', '_IDENT', '_2', 'i-2'), 'ATTR': ('attr(x)'), 'RECT': ('rect(1,2,3,4)'), 'CLIP': ('rect(1,2,3,4)'), 'FUNCTION': (), 'HEX3': '#123', 'HEX6': '#123abc', 'RGB': 'rgb(1,2,3)', 'RGB100': 'rgb(1%,2%,100%)', 'RGBA': 'rgba(1,2,3, 1)', 'RGBA100': 'rgba(1%,2%,100%, 0)', 'HSL': 'hsl(1,2%,3%)', 'HSLA': 'hsla(1,2%,3%, 1.0)' } def expanded(*keys): r = [] for k in keys: if isinstance(V[k], basestring): r.append(V[k]) else: r.extend(list(V[k])) return r self.V = V self.ALL = list(self._valuesofkeys(V.keys())) self.V['LENGTHS'] = expanded('0', 'EM', 'EX', 'PX', 'CM', 'MM', 'IN', 'PT', 'PC') self.V['COLORS'] = expanded('HEX3', 'HEX6', 'RGB', 'RGB100') self.V['COLORS3'] = expanded('RGBA', 'RGBA100', 'HSL', 'HSLA') def _allvalues(self): return copy.copy(self.ALL) def _valuesofkeys(self, keys): done = [] for key in keys: if isinstance(key, list): for v in key: yield v else: v = self.V[key] if isinstance(v, basestring): if v not in done: done.append(v) yield v else: for value in v: if value not in done: done.append(value) yield value
MIT License
borgwardtlab/wwl
src/wwl/wwl.py
pairwise_wasserstein_distance
python
def pairwise_wasserstein_distance(X, node_features = None, num_iterations=3, sinkhorn=False, enforce_continuous=False): categorical = True if enforce_continuous: print('Enforce continous flag is on, using CONTINUOUS propagation scheme.') categorical = False elif node_features is not None: print('Continuous node features provided, using CONTINUOUS propagation scheme.') categorical = False else: for g in X: if not 'label' in g.vs.attribute_names(): print('No label attributed to graphs, use degree instead and use CONTINUOUS propagation scheme.') categorical = False break if categorical: print('Categorically-labelled graphs, using CATEGORICAL propagation scheme.') if categorical: es = WeisfeilerLehman() node_representations = es.fit_transform(X, num_iterations=num_iterations) else: es = ContinuousWeisfeilerLehman() node_representations = es.fit_transform(X, node_features=node_features, num_iterations=num_iterations) pairwise_distances = _compute_wasserstein_distance(node_representations, sinkhorn=sinkhorn, categorical=categorical, sinkhorn_lambda=1e-2) return pairwise_distances
Pairwise computation of the Wasserstein distance between embeddings of the graphs in X. args: X (List[ig.graphs]): List of graphs node_features (array): Array containing the node features for continuously attributed graphs num_iterations (int): Number of iterations for the propagation scheme sinkhorn (bool): Indicates whether sinkhorn approximation should be used
https://github.com/borgwardtlab/wwl/blob/96ed6d3e877b4fdedcd4253079da52f697efaecc/src/wwl/wwl.py#L44-L82
from .propagation_scheme import WeisfeilerLehman, ContinuousWeisfeilerLehman from sklearn.metrics.pairwise import laplacian_kernel import ot import numpy as np def _compute_wasserstein_distance(label_sequences, sinkhorn=False, categorical=False, sinkhorn_lambda=1e-2): n = len(label_sequences) M = np.zeros((n,n)) for graph_index_1, graph_1 in enumerate(label_sequences): labels_1 = label_sequences[graph_index_1] for graph_index_2, graph_2 in enumerate(label_sequences[graph_index_1:]): labels_2 = label_sequences[graph_index_2 + graph_index_1] ground_distance = 'hamming' if categorical else 'euclidean' costs = ot.dist(labels_1, labels_2, metric=ground_distance) if sinkhorn: mat = ot.sinkhorn(np.ones(len(labels_1))/len(labels_1), np.ones(len(labels_2))/len(labels_2), costs, sinkhorn_lambda, numItermax=50) M[graph_index_1, graph_index_2 + graph_index_1] = np.sum(np.multiply(mat, costs)) else: M[graph_index_1, graph_index_2 + graph_index_1] = ot.emd2([], [], costs) M = (M + M.T) return M
BSD 3-Clause New or Revised License
nuagenetworks/vspk-examples
python/shared_domain_vports_acl_analytics.py
get_args
python
def get_args(): parser = argparse.ArgumentParser(description="Tool to gather statistics on domains, zones, subnets or vports within a certain time frame.") parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true') parser.add_argument('-j', '--json', required=False, help='Print as JSON, not as a table', dest='json_output', action='store_true') parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str) parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str) parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str) parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443) parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str) parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str) parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true') args = parser.parse_args() return args
Supports the command-line arguments listed below.
https://github.com/nuagenetworks/vspk-examples/blob/825d9bd6093b5ed19419ca15898085ae85731657/python/shared_domain_vports_acl_analytics.py#L224-L240
from __future__ import print_function from builtins import str import argparse import getpass import json import logging from prettytable import PrettyTable from vspk import v6 as vsdk ether_types = { '0x0800': 'IPv4', '0x0806': 'ARP', '0x0842': 'Wake-on-LAN', '0x22F3': 'IETF TRILL', '0x6003': 'DECnet Phase IV', '0x8035': 'RARP', '0x809B': 'AppleTalk ', '0x80F3': 'AARP', '0x8100': '802.1Q and 802.1aq', '0x8137': 'IPX', '0x8204': 'QNX Qnet', '0x86DD': 'IPv6', '0x8808': 'Ethernet flow control', '0x8819': 'CobraNet', '0x8847': 'MPLS unicast', '0x8848': 'MPLS multicast', '0x8863': 'PPPoE Discovery Stage', '0x8864': 'PPPoE Session Stage', '0x8870': 'Jumbo Frames (proposed)', '0x887B': 'HomePlug 1.0 MME', '0x888E': 'EAP over LAN (IEEE 802.1X)', '0x8892': 'PROFINET Protocol', '0x889A': 'HyperSCSI (SCSI over Ethernet)', '0x88A2': 'ATA over Ethernet', '0x88A4': 'EtherCAT Protocol', '0x88A8': 'Provider Bridging (IEEE 802.1ad) ', '0x88AB': 'Ethernet Powerlink', '0x88CC': 'LLDP', '0x88CD': 'SERCOS III', '0x88E1': 'HomePlug AV MME', '0x88E3': 'Media Redundancy Protocol (IEC62439-2)', '0x88E5': 'MAC security (IEEE 802.1AE)', '0x88E7': 'Provider Backbone Bridges (PBB) (IEEE 802.1ah)', '0x88F7': 'Precision Time Protocol (PTP) over Ethernet (IEEE 1588)', '0x8902': 'IEEE 802.1ag Connectivity Fault Management (CFM) Protocol ', '0x8906': 'FCoE', '0x8914': 'FCoE Initialization Protocol', '0x8915': 'RoCE', '0x891D': 'TTE', '0x892F': 'HSR', '0x9000': 'Ethernet Configuration Testing Protocol' } protocols = { '0': 'HOPOPT', '1': 'ICMP', '2': 'IGMP', '3': 'GGP', '4': 'IPv4', '5': 'ST', '6': 'TCP', '7': 'CBT', '8': 'EGP', '9': 'IGP', '10': 'BBN-RCC-MON', '11': 'NVP-II', '12': 'PUP', '13': 'ARGUS', '14': 'EMCON', '15': 'XNET', '16': 'CHAOS', '17': 'UDP', '18': 'MUX', '19': 'DCN-MEAS', '20': 'HMP', '21': 'PRM', '22': 'XNS-IDP', '23': 'TRUNK-1', '24': 'TRUNK-2', '25': 'LEAF-1', '26': 'LEAF-2', '27': 'RDP', '28': 'IRTP', '29': 'ISO-TP4', '30': 'NETBLT', '31': 'MFE-NSP', '32': 'MERIT-INP', '33': 'DCCP', '34': '3PC', '35': 'IDPR', '36': 'XTP', '37': 'DDP', '38': 'IDPR-CMTP', '39': 'TP++', '40': 'IL', '41': 'IPv6', '42': 'SDRP', '43': 'IPv6-Route', '44': 'IPv6-Frag', '45': 'IDRP', '46': 'RSVP', '47': 'GRE', '48': 'DSR', '49': 'BNA', '50': 'ESP', '51': 'AH', '52': 'I-NLSP', '53': 'SWIPE', '54': 'NARP', '55': 'MOBILE', '56': 'TLSP', '57': 'SKIP', '58': 'IPv6-ICMP', '59': 'IPv6-NoNxt', '60': 'IPv6-Opts', '62': 'CFTP', '64': 'SAT-EXPAK', '65': 'KRYPTOLAN', '66': 'RVD', '67': 'IPPC', '69': 'SAT-MON', '70': 'VISA', '71': 'IPCV', '72': 'CPNX', '73': 'CPHB', '74': 'WSN', '75': 'PVP', '76': 'BR-SAT-MON', '77': 'SUN-ND', '78': 'WB-MON', '79': 'WB-EXPAK', '80': 'ISO-IP', '81': 'VMTP', '82': 'SECURE-VMTP', '83': 'VINES', '84': 'IPTM', '85': 'NSFNET-IGP', '86': 'DGP', '87': 'TCF', '88': 'EIGRP', '89': 'OSPFIGP', '90': 'Sprite-RPC', '91': 'LARP', '92': 'MTP', '93': 'AX.25', '94': 'IPIP', '95': 'MICP', '96': 'SCC-SP', '97': 'ETHERIP', '98': 'ENCAP', '100': 'GMTP', '101': 'IFMP', '102': 'PNNI', '103': 'PIM', '104': 'ARIS', '105': 'SCPS', '106': 'QNX', '107': 'A/N', '108': 'IPComp', '109': 'SNP', '110': 'Compaq-Peer', '111': 'IPX-in-IP', '112': 'VRRP', '113': 'PGM', '115': 'L2TP', '116': 'DDX', '117': 'IATP', '118': 'STP', '119': 'SRP', '120': 'UTI', '121': 'SMP', '122': 'SM', '123': 'PTP', '124': 'ISIS over IPv4', '125': 'FIRE', '126': 'CRTP', '127': 'CRUDP', '128': 'SSCOPMCE', '129': 'IPLT', '130': 'SPS', '131': 'PIPE', '132': 'SCTP', '133': 'FC', '134': 'RSVP-E2E-IGNORE', '135': 'Mobility Header', '136': 'UDPLite', '137': 'MPLS-in-IP', '138': 'manet', '139': 'HIP', '140': 'Shim6', '141': 'WESP', '142': 'ROHC', '255': 'Reserved' } configuration = {} logger = None output_parser = None
BSD 3-Clause New or Revised License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/sem/hole.py
HoleSemantics._sanity_check_plugging
python
def _sanity_check_plugging(self, plugging, node, ancestors): if node in self.holes: ancestors = [node] + ancestors label = plugging[node] else: label = node assert label in self.labels for c in self.constraints: if c.lhs == label: assert c.rhs in ancestors args = self.fragments[label][1] for arg in args: if self.is_node(arg): self._sanity_check_plugging(plugging, arg, [label] + ancestors)
Make sure that a given plugging is legal. We recursively go through each node and make sure that no constraints are violated. We also check that all holes have been filled.
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/sem/hole.py#L252-L270
from __future__ import print_function, unicode_literals from functools import reduce from six import itervalues from nltk import compat from nltk.parse import load_parser from nltk.sem.skolemize import skolemize from nltk.sem.logic import (AllExpression, AndExpression, ApplicationExpression, ExistsExpression, IffExpression, ImpExpression, LambdaExpression, NegatedExpression, OrExpression) class Constants(object): ALL = 'ALL' EXISTS = 'EXISTS' NOT = 'NOT' AND = 'AND' OR = 'OR' IMP = 'IMP' IFF = 'IFF' PRED = 'PRED' LEQ = 'LEQ' HOLE = 'HOLE' LABEL = 'LABEL' MAP = {ALL: lambda v, e: AllExpression(v.variable, e), EXISTS: lambda v, e: ExistsExpression(v.variable, e), NOT: NegatedExpression, AND: AndExpression, OR: OrExpression, IMP: ImpExpression, IFF: IffExpression, PRED: ApplicationExpression} class HoleSemantics(object): def __init__(self, usr): self.holes = set() self.labels = set() self.fragments = {} self.constraints = set() self._break_down(usr) self.top_most_labels = self._find_top_most_labels() self.top_hole = self._find_top_hole() def is_node(self, x): return x in (self.labels | self.holes) def _break_down(self, usr): if isinstance(usr, AndExpression): self._break_down(usr.first) self._break_down(usr.second) elif isinstance(usr, ApplicationExpression): func, args = usr.uncurry() if func.variable.name == Constants.LEQ: self.constraints.add(Constraint(args[0], args[1])) elif func.variable.name == Constants.HOLE: self.holes.add(args[0]) elif func.variable.name == Constants.LABEL: self.labels.add(args[0]) else: label = args[0] assert label not in self.fragments self.fragments[label] = (func, args[1:]) else: raise ValueError(usr.label()) def _find_top_nodes(self, node_list): top_nodes = node_list.copy() for f in itervalues(self.fragments): args = f[1] for arg in args: if arg in node_list: top_nodes.discard(arg) return top_nodes def _find_top_most_labels(self): return self._find_top_nodes(self.labels) def _find_top_hole(self): top_holes = self._find_top_nodes(self.holes) assert len(top_holes) == 1 return top_holes.pop() def pluggings(self): record = [] self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) return record def _plug_nodes(self, queue, potential_labels, plug_acc, record): if queue != []: (node, ancestors) = queue[0] if node in self.holes: self._plug_hole(node, ancestors, queue[1:], potential_labels, plug_acc, record) else: assert node in self.labels args = self.fragments[node][1] head = [(a, ancestors) for a in args if self.is_node(a)] self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) else: raise Exception('queue empty') def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): assert hole not in ancestors0 ancestors = [hole] + ancestors0 for l in potential_labels0: if self._violates_constraints(l, ancestors): continue plug_acc = plug_acc0.copy() plug_acc[hole] = l potential_labels = potential_labels0.copy() potential_labels.remove(l) if len(potential_labels) == 0: self._sanity_check_plugging(plug_acc, self.top_hole, []) record.append(plug_acc) else: self._plug_nodes(queue + [(l, ancestors)], potential_labels, plug_acc, record) def _violates_constraints(self, label, ancestors): for c in self.constraints: if c.lhs == label: if c.rhs not in ancestors: return True return False
MIT License
shonenada/flask-rbac
flask_rbac/model.py
RoleMixin.get_name
python
def get_name(self): return self.name
Return the name of this role
https://github.com/shonenada/flask-rbac/blob/8271dd451b94ee0903e27018f0d085ed44bc509d/flask_rbac/model.py#L18-L20
class RoleMixin(object): roles = {} def __init__(self, name=None): self.name = name if not hasattr(self.__class__, 'parents'): self.parents = set() if not hasattr(self.__class__, 'children'): self.children = set() RoleMixin.roles[name] = self
MIT License
cohesity/management-sdk-python
cohesity_management_sdk/models/nimble_protection_source.py
NimbleProtectionSource.from_dictionary
python
def from_dictionary(cls, dictionary): if dictionary is None: return None name = dictionary.get('name') storage_array = cohesity_management_sdk.models.san_storage_array.SanStorageArray.from_dictionary(dictionary.get('storageArray')) if dictionary.get('storageArray') else None mtype = dictionary.get('type') volume = cohesity_management_sdk.models.san_volume.SanVolume.from_dictionary(dictionary.get('volume')) if dictionary.get('volume') else None return cls(name, storage_array, mtype, volume)
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.
https://github.com/cohesity/management-sdk-python/blob/1c085d5a10f5f1a87b700e7ad1fc1dcabda41ae5/cohesity_management_sdk/models/nimble_protection_source.py#L49-L75
import cohesity_management_sdk.models.san_storage_array import cohesity_management_sdk.models.san_volume class NimbleProtectionSource(object): _names = { "name":'name', "storage_array":'storageArray', "mtype":'type', "volume":'volume' } def __init__(self, name=None, storage_array=None, mtype=None, volume=None): self.name = name self.storage_array = storage_array self.mtype = mtype self.volume = volume @classmethod
Apache License 2.0
tehmaze/natural
natural/file.py
accessed
python
def accessed(filename): if isinstance(filename, file): filename = filename.name return duration(os.stat(filename)[stat.ST_ATIME])
Retrieve how long ago a file has been accessed. :param filename: name of the file >>> print accessed(__file__) # doctest: +SKIP just now
https://github.com/tehmaze/natural/blob/4d41ed6708965420ad884339e0e9ca5f089b2202/natural/file.py#L7-L19
from natural.date import duration from natural.size import filesize import os import stat
MIT License
ibm/watson-assistant-workbench
ci/test_utils.py
BaseTestCaseCapture.t_fun_noExceptionAndErrMessage
python
def t_fun_noExceptionAndErrMessage(self, function, message, args=[], kwargs={}): return self.t_fun_noExceptionAndMessage(function, BaseTestCaseCapture.MessageType.ERR, message, args, kwargs)
(Generic) Runs function with given arguments and tchecks that no exception was raised and error message
https://github.com/ibm/watson-assistant-workbench/blob/fa055d7324645af9a3601c9cf625ce81cebc8aed/ci/test_utils.py#L133-L135
import os import shutil import traceback import pytest class BaseTestCaseCapture(object): dialogSchemaPath = '../data_spec/dialog_schema.xml' captured = None logs = None class MessageType(object): OUT = 0 ERR = 1 LOG = 2 def t_missingRequiredArgs(self, args=[], kwargs={}): return self.t_fun_missingRequiredArgs(self.callfunc, args, kwargs) def t_fun_missingRequiredArgs(self, function, args=[], kwargs={}): return self.t_fun_exitCodeAndErrMessage(function, 2, 'the following arguments are required', args, kwargs) def t_unrecognizedArgs(self, args=[], kwargs={}): return self.t_fun_unrecognizedArgs(self.callfunc, args, kwargs) def t_fun_unrecognizedArgs(self, function, args=[], kwargs={}): return self.t_fun_exitCodeAndErrMessage(function, 2, 'unrecognized arguments', args, kwargs) def t_exitCode(self, exitCode, args=[], kwargs={}): return self.t_fun_exitCode(self.callfunc, exitCode, args, kwargs) def t_fun_exitCode(self, function, exitCode, args=[], kwargs={}): return self.t_fun_exitCodeAndMessage(function, exitCode, None, None, args, kwargs) def t_exitCodeAndOutMessage(self, exitCode, message, args=[], kwargs={}): return self.t_fun_exitCodeAndOutMessage(self.callfunc, exitCode, message, args, kwargs) def t_fun_exitCodeAndOutMessage(self, function, exitCode, message, args=[], kwargs={}): return self.t_fun_exitCodeAndMessage(function, exitCode, BaseTestCaseCapture.MessageType.OUT, message, args, kwargs) def t_exitCodeAndErrMessage(self, exitCode, errMessage, args=[], kwargs={}): return self.t_fun_exitCodeAndErrMessage(self.callfunc, exitCode, errMessage, args, kwargs) def t_fun_exitCodeAndErrMessage(self, function, exitCode, errMessage, args=[], kwargs={}): return self.t_fun_exitCodeAndMessage(function, exitCode, BaseTestCaseCapture.MessageType.ERR, errMessage, args, kwargs) def t_exitCodeAndLogMessage(self, exitCode, message, args=[], kwargs={}): return self.t_fun_exitCodeAndLogMessage(self.callfunc, exitCode, message, args, kwargs) def t_fun_exitCodeAndLogMessage(self, function, exitCode, message, args=[], kwargs={}): return self.t_fun_exitCodeAndMessage(function, exitCode, BaseTestCaseCapture.MessageType.LOG, message, args, kwargs) def t_exitCodeAndMessage(self, function, exitCode, messageType, message, args=[], kwargs={}): return self.t_fun_exitCodeAndMessage(self.callfunc, exitCode, messageType, message, args, kwargs) def t_fun_exitCodeAndMessage(self, function, exitCode, messageType, message, args=[], kwargs={}): return self.t_fun_generic(function, SystemExit, str(exitCode), messageType, message, args, kwargs) def t_raiseException(self, exceptionType, exceptionValue, args=[], kwargs={}): return self.t_fun_raiseException(self.callfunc, exceptionType, exceptionValue, args, kwargs) def t_fun_raiseException(self, function, exceptionType, exceptionValue, args=[], kwargs={}): return self.t_fun_generic(function, exceptionType, exceptionValue, None, None, args, kwargs) def t_noException(self, args=[], kwargs={}): return self.t_fun_noException(self.callfunc, args, kwargs) def t_fun_noException(self, function, args=[], kwargs={}): return self.t_fun_noExceptionAndMessage(function, None, None, args, kwargs) def t_noExceptionAndOutMessage(self, message, args=[], kwargs={}): return self.t_fun_noExceptionAndOutMessage(self.callfunc, message, args, kwargs) def t_fun_noExceptionAndOutMessage(self, function, message, args=[], kwargs={}): return self.t_fun_noExceptionAndMessage(function, BaseTestCaseCapture.MessageType.OUT, message, args, kwargs) def t_noExceptionAndErrMessage(self, message, args=[], kwargs={}): return self.t_fun_noExceptionAndErrMessage(self.callfunc, message, args, kwargs)
Apache License 2.0
pytest-dev/pytest-bdd
pytest_bdd/parser.py
Step.__str__
python
def __str__(self): return f'{self.type.capitalize()} "{self.name}"'
Full step name including the type.
https://github.com/pytest-dev/pytest-bdd/blob/583910d88d1488ec139f75ebc026a06173389980/pytest_bdd/parser.py#L350-L352
import os.path import re import textwrap import typing from collections import OrderedDict from . import exceptions, types SPLIT_LINE_RE = re.compile(r"(?<!\\)\|") STEP_PARAM_RE = re.compile(r"<(.+?)>") COMMENT_RE = re.compile(r"(^|(?<=\s))#") STEP_PREFIXES = [ ("Feature: ", types.FEATURE), ("Scenario Outline: ", types.SCENARIO_OUTLINE), ("Examples: Vertical", types.EXAMPLES_VERTICAL), ("Examples:", types.EXAMPLES), ("Scenario: ", types.SCENARIO), ("Background:", types.BACKGROUND), ("Given ", types.GIVEN), ("When ", types.WHEN), ("Then ", types.THEN), ("@", types.TAG), ("And ", None), ("But ", None), ] def split_line(line): return [cell.replace("\\|", "|").strip() for cell in SPLIT_LINE_RE.split(line)[1:-1]] def parse_line(line): for prefix, _ in STEP_PREFIXES: if line.startswith(prefix): return prefix.strip(), line[len(prefix) :].strip() return "", line def strip_comments(line): res = COMMENT_RE.search(line) if res: line = line[: res.start()] return line.strip() def get_step_type(line): for prefix, _type in STEP_PREFIXES: if line.startswith(prefix): return _type def parse_feature(basedir: str, filename: str, encoding: str = "utf-8") -> "Feature": abs_filename = os.path.abspath(os.path.join(basedir, filename)) rel_filename = os.path.join(os.path.basename(basedir), filename) feature = Feature( scenarios=OrderedDict(), filename=abs_filename, rel_filename=rel_filename, line_number=1, name=None, tags=set(), examples=Examples(), background=None, description="", ) scenario: typing.Optional[ScenarioTemplate] = None mode = None prev_mode = None description: typing.List[str] = [] step = None multiline_step = False prev_line = None with open(abs_filename, encoding=encoding) as f: content = f.read() for line_number, line in enumerate(content.splitlines(), start=1): unindented_line = line.lstrip() line_indent = len(line) - len(unindented_line) if step and (step.indent < line_indent or ((not unindented_line) and multiline_step)): multiline_step = True step.add_line(line) continue else: step = None multiline_step = False stripped_line = line.strip() clean_line = strip_comments(line) if not clean_line and (not prev_mode or prev_mode not in types.FEATURE): continue mode = get_step_type(clean_line) or mode allowed_prev_mode = (types.BACKGROUND, types.GIVEN, types.WHEN) if not scenario and prev_mode not in allowed_prev_mode and mode in types.STEP_TYPES: raise exceptions.FeatureError( "Step definition outside of a Scenario or a Background", line_number, clean_line, filename ) if mode == types.FEATURE: if prev_mode is None or prev_mode == types.TAG: _, feature.name = parse_line(clean_line) feature.line_number = line_number feature.tags = get_tags(prev_line) elif prev_mode == types.FEATURE: description.append(clean_line) else: raise exceptions.FeatureError( "Multiple features are not allowed in a single feature file", line_number, clean_line, filename, ) prev_mode = mode keyword, parsed_line = parse_line(clean_line) if mode in [types.SCENARIO, types.SCENARIO_OUTLINE]: tags = get_tags(prev_line) feature.scenarios[parsed_line] = scenario = ScenarioTemplate( feature=feature, name=parsed_line, line_number=line_number, tags=tags ) elif mode == types.BACKGROUND: feature.background = Background(feature=feature, line_number=line_number) elif mode == types.EXAMPLES: mode = types.EXAMPLES_HEADERS (scenario or feature).examples.line_number = line_number elif mode == types.EXAMPLES_VERTICAL: mode = types.EXAMPLE_LINE_VERTICAL (scenario or feature).examples.line_number = line_number elif mode == types.EXAMPLES_HEADERS: (scenario or feature).examples.set_param_names([l for l in split_line(parsed_line) if l]) mode = types.EXAMPLE_LINE elif mode == types.EXAMPLE_LINE: (scenario or feature).examples.add_example([l for l in split_line(stripped_line)]) elif mode == types.EXAMPLE_LINE_VERTICAL: param_line_parts = [l for l in split_line(stripped_line)] try: (scenario or feature).examples.add_example_row(param_line_parts[0], param_line_parts[1:]) except exceptions.ExamplesNotValidError as exc: if scenario: raise exceptions.FeatureError( f"Scenario has not valid examples. {exc.args[0]}", line_number, clean_line, filename, ) else: raise exceptions.FeatureError( f"Feature has not valid examples. {exc.args[0]}", line_number, clean_line, filename, ) elif mode and mode not in (types.FEATURE, types.TAG): step = Step(name=parsed_line, type=mode, indent=line_indent, line_number=line_number, keyword=keyword) if feature.background and not scenario: target = feature.background else: target = scenario target.add_step(step) prev_line = clean_line feature.description = "\n".join(description).strip() return feature class Feature: def __init__(self, scenarios, filename, rel_filename, name, tags, examples, background, line_number, description): self.scenarios: typing.Dict[str, ScenarioTemplate] = scenarios self.rel_filename = rel_filename self.filename = filename self.tags = tags self.examples = examples self.name = name self.line_number = line_number self.description = description self.background = background class ScenarioTemplate: def __init__(self, feature: Feature, name: str, line_number: int, tags=None): self.feature = feature self.name = name self._steps: typing.List[Step] = [] self.examples = Examples() self.line_number = line_number self.tags = tags or set() def add_step(self, step): step.scenario = self self._steps.append(step) @property def steps(self): background = self.feature.background return (background.steps if background else []) + self._steps def render(self, context: typing.Mapping[str, typing.Any]) -> "Scenario": steps = [ Step( name=templated_step.render(context), type=templated_step.type, indent=templated_step.indent, line_number=templated_step.line_number, keyword=templated_step.keyword, ) for templated_step in self.steps ] return Scenario(feature=self.feature, name=self.name, line_number=self.line_number, steps=steps, tags=self.tags) def validate(self): params = frozenset(sum((list(step.params) for step in self.steps), [])) example_params = set(self.examples.example_params + self.feature.examples.example_params) if params and example_params and params != example_params: raise exceptions.ScenarioExamplesNotValidError( """Scenario "{}" in the feature "{}" has not valid examples. """ """Set of step parameters {} should match set of example values {}.""".format( self.name, self.feature.filename, sorted(params), sorted(example_params) ) ) class Scenario: def __init__(self, feature: Feature, name: str, line_number: int, steps: "typing.List[Step]", tags=None): self.feature = feature self.name = name self.steps = steps self.line_number = line_number self.tags = tags or set() self.failed = False class Step: def __init__(self, name, type, indent, line_number, keyword): self.name = name self.keyword = keyword self.lines = [] self.indent = indent self.type = type self.line_number = line_number self.failed = False self.start = 0 self.stop = 0 self.scenario = None self.background = None def add_line(self, line): self.lines.append(line) @property def name(self): multilines_content = textwrap.dedent("\n".join(self.lines)) if self.lines else "" multilines_content = re.sub( pattern=r'^"""\n(?P<content>.*)\n"""$', repl=r"\g<content>", string=multilines_content, flags=re.DOTALL, ) lines = [self._name] + [multilines_content] return "\n".join(lines).strip() @name.setter def name(self, value): self._name = value
MIT License
ddorn/gui
GUI/base.py
BaseWidget.unfocus
python
def unfocus(self): self._focus = False
Takes back the focus from the widget.
https://github.com/ddorn/gui/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/base.py#L157-L159
import pygame from GUI.locals import CENTER, TOPLEFT, TOPRIGHT, MIDTOP, MIDLEFT, MIDRIGHT, BOTTOMRIGHT, MIDBOTTOM, BOTTOMLEFT from pygame.event import EventType class BaseWidget(pygame.Rect): def __init__(self, pos, size, anchor=CENTER): self.__verify(pos) self.__verify(size) super().__init__((0, 0), (0, 0)) self._anchor = anchor self._pos = pos self._size = size self._focus = False self.clicked = False def __str__(self): return repr(self) def __repr__(self): return '<BaseWidget({}, {})>'.format(self.topleft, self.size) def __contains__(self, item): return self.left <= item[0] <= self.right and self.top <= item[1] <= self.bottom def __getattribute__(self, item): if item in "x y top left bottom right topleft bottomleft topright bottomright midtop midleft midbottom " "midright center centerx centery".split(): self.__update() if item in "width height w h".split(): self.__update() return super(BaseWidget, self).__getattribute__(item) def __setattr__(self, key, value): if key in (TOPLEFT, BOTTOMLEFT, TOPRIGHT, BOTTOMRIGHT, MIDTOP, MIDLEFT, MIDBOTTOM, MIDRIGHT, CENTER): self.anchor = key self.pos = value elif key in ('width', 'height', 'w', 'h'): raise AttributeError("Can't set the attribute") elif key in ('x', 'y', 'top', 'left', 'bottom', 'right', 'centerx', 'centery'): raise AttributeError("Can't set the attribute") else: super(BaseWidget, self).__setattr__(key, value) def __update(self): width, height = self.size super(BaseWidget, self).__setattr__("width", width) super(BaseWidget, self).__setattr__("height", height) super(BaseWidget, self).__setattr__(self.anchor, self.pos) @staticmethod def __verify(pos_or_size): if not callable(pos_or_size): assert isinstance(pos_or_size, tuple) assert len(pos_or_size) == 2 assert isinstance(pos_or_size[0], int) assert isinstance(pos_or_size[1], int) def as_rect(self): return self.pos, self.size @property def pos(self): if callable(self._pos): return self._pos() return self._pos @pos.setter def pos(self, value): if not callable(value): if not isinstance(value, tuple): raise TypeError("The pos must be a callable that returns 2-tuples or a 2-tuple") if len(value) != 2: raise ValueError("The pos must be a callable that returns 2-tuples or a 2-tuple") self._pos = value @property def size(self): if callable(self._size): return self._size() return self._size @size.setter def size(self, value): if not callable(value): if not isinstance(value, tuple): raise TypeError("The size must be a callable that returns 2-tuples or a 2-tuple") if len(value) != 2: raise ValueError("The size must be a callable that returns 2-tuples or a 2-tuple") self._size = value @property def anchor(self): if callable(self._anchor): return self._anchor() return self._anchor @anchor.setter def anchor(self, value): if not callable(value): if value not in (TOPLEFT, TOPRIGHT, MIDTOP, MIDLEFT, MIDRIGHT, CENTER, BOTTOMRIGHT, MIDBOTTOM, BOTTOMLEFT): raise ValueError self._anchor = value def focus(self): self._focus = True
MIT License
rwl/muntjac
muntjac/addon/colorpicker/color_picker.py
ColorPicker.setSwatchesVisibility
python
def setSwatchesVisibility(self, visible): if not visible and not self.hsvVisible and not self.rgbVisible: raise ValueError('Cannot hide all tabs.') self.swatchesVisible = visible if self._window is not None: self._window.setSwatchesTabVisible(visible)
Set the visibility of the Swatches Tab @param visible: The visibility
https://github.com/rwl/muntjac/blob/8db97712edd81b4d25deaaa48587d2a08010f2c8/muntjac/addon/colorpicker/color_picker.py#L303-L314
from muntjac.addon.colorpicker.color import Color from muntjac.ui.abstract_component import AbstractComponent from muntjac.ui.window import ICloseListener from muntjac.addon.colorpicker.color_change_event import ColorChangeEvent from muntjac.addon.colorpicker.color_selector import IColorSelector class IColorChangeListener(object): def colorChanged(self, event): raise NotImplementedError _COLOR_CHANGE_METHOD = getattr(IColorChangeListener, 'colorChanged') class ColorPicker(AbstractComponent, ICloseListener, IColorSelector, IColorChangeListener): CLIENT_WIDGET = None TYPE_MAPPING = 'com.vaadin.addon.colorpicker.ColorPicker' def __init__(self, caption='Colors', initialColor=None): self.buttonStyle = str(ButtonStyle.BUTTON_NORMAL) self.popupStyle = PopupStyle.POPUP_NORMAL self.buttonCaption = '' self._window = None self._parent_window = None self._popupStatus = False self._positionX = 0 self._positionY = 0 self.rgbVisible = True self.hsvVisible = True self.swatchesVisible = True self.historyVisible = True self.textfieldVisible = True if initialColor is None: initialColor = Color(0, 0, 0) self.color = initialColor self.caption = caption super(ColorPicker, self).__init__() def setColor(self, color): self.color = color if self._window is not None: self._window.setColor(color) self.requestRepaint() def getColor(self): return self.color def setPosition(self, x, y): self._positionX = x self._positionY = y if self._window is not None: self._window.setPositionX(x) self._window.setPositionY(y) def addListener(self, listener, iface=None): if (isinstance(listener, IColorChangeListener) and (iface is None or issubclass(iface, IColorChangeListener))): self.registerListener(ColorChangeEvent, listener, _COLOR_CHANGE_METHOD) super(ColorPicker, self).addListener(listener, iface) def addCallback(self, callback, eventType=None, *args): if eventType is None: eventType = callback._eventType if issubclass(eventType, ColorChangeEvent): self.registerCallback(ColorChangeEvent, callback, None, *args) else: super(ColorPicker, self).addCallback(callback, eventType, *args) def removeListener(self, listener, iface=None): if (isinstance(listener, IColorChangeListener) and (iface is None or issubclass(iface, IColorChangeListener))): self.withdrawListener(ColorChangeEvent, listener) super(ColorPicker, self).removeListener(listener, iface) def removeCallback(self, callback, eventType=None): if eventType is None: eventType = callback._eventType if issubclass(eventType, ColorChangeEvent): self.withdrawCallback(ColorChangeEvent, callback) else: super(ColorPicker, self).removeCallback(callback, eventType) def paintContent(self, target): target.addAttribute('red', '%.2x' % self.color.getRed()) target.addAttribute('green', '%.2x' % self.color.getGreen()) target.addAttribute('blue', '%.2x' % self.color.getBlue()) target.addAttribute('alpha', self.color.getAlpha()) target.addAttribute('popup', self._popupStatus) target.addAttribute('btnstyle', self.buttonStyle) target.addAttribute('btncaption', self.buttonCaption) def changeVariables(self, source, variables): if 'popup' in variables: openPopup = variables['popup'] if openPopup and not self.isReadOnly(): if self._parent_window is None: self._parent_window = self.getWindow() if self._parent_window.getParent() is not None: self._parent_window = self._parent_window.getParent() if self._window is None: from muntjac.addon.colorpicker.color_picker_popup import ColorPickerPopup self._window = ColorPickerPopup(self.color) self._window.setCaption(self.caption) self._window.setRGBTabVisible(self.rgbVisible) self._window.setHSVTabVisible(self.hsvVisible) self._window.setSwatchesTabVisible(self.swatchesVisible) self._window.setHistoryVisible(self.historyVisible) self._window.setPreviewVisible(self.textfieldVisible) self._window.setImmediate(True) self._window.addListener(self, ICloseListener) self._window.addListener(self, IColorChangeListener) self._window.getHistory().setColor(self.color) self._parent_window.addWindow(self._window) self._window.setVisible(True) self._window.setPositionX(self._positionX) self._window.setPositionY(self._positionY) else: self._window.setRGBTabVisible(self.rgbVisible) self._window.setHSVTabVisible(self.hsvVisible) self._window.setSwatchesTabVisible(self.swatchesVisible) self._window.setHistoryVisible(self.historyVisible) self._window.setPreviewVisible(self.textfieldVisible) self._window.setColor(self.color) self._window.getHistory().setColor(self.color) self._window.setVisible(True) self._parent_window.addWindow(self._window) elif self._window is not None: self._window.setVisible(False) self._parent_window.removeWindow(self._window) def windowClose(self, e): if e.getWindow() == self._window: self._popupStatus = False self.requestRepaint() def colorChanged(self, event): self.color = event.getColor() self.fireColorChanged() def fireColorChanged(self): self.fireEvent(ColorChangeEvent(self, self.color)) def setButtonStyle(self, style): self.buttonStyle = str(style) def setPopupStyle(self, style): self.popupStyle = style if style == self.POPUP_NORMAL: self.setRGBVisibility(True) self.setHSVVisibility(True) self.setSwatchesVisibility(True) self.setHistoryVisibility(True) self.setTextfieldVisibility(True) elif style == self.POPUP_SIMPLE: self.setRGBVisibility(False) self.setHSVVisibility(False) self.setSwatchesVisibility(True) self.setHistoryVisibility(False) self.setTextfieldVisibility(False) def setButtonCaption(self, caption): self.buttonCaption = '' if caption is None else caption def setRGBVisibility(self, visible): if not visible and not self.hsvVisible and not self.swatchesVisible: raise ValueError('Cannot hide all tabs.') self.rgbVisible = visible if self._window is not None: self._window.setRGBTabVisible(visible) def setHSVVisibility(self, visible): if not visible and not self.rgbVisible and not self.swatchesVisible: raise ValueError('Cannot hide all tabs.') self.hsvVisible = visible if self._window is not None: self._window.setHSVTabVisible(visible)
Apache License 2.0
simonmeister/motion-rcnn
object_detection/core/target_assigner.py
TargetAssigner.__init__
python
def __init__(self, similarity_calc, matcher, box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=None): if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator): raise ValueError('similarity_calc must be a RegionSimilarityCalculator') if not isinstance(matcher, mat.Matcher): raise ValueError('matcher must be a Matcher') if not isinstance(box_coder, bcoder.BoxCoder): raise ValueError('box_coder must be a BoxCoder') self._similarity_calc = similarity_calc self._matcher = matcher self._box_coder = box_coder self._positive_class_weight = positive_class_weight self._negative_class_weight = negative_class_weight if unmatched_cls_target is None: self._unmatched_cls_target = tf.constant([0], tf.float32) else: self._unmatched_cls_target = unmatched_cls_target
Construct Multibox Target Assigner. Args: similarity_calc: a RegionSimilarityCalculator matcher: an object_detection.core.Matcher used to match groundtruth to anchors. box_coder: an object_detection.core.BoxCoder used to encode matching groundtruth boxes with respect to anchors. positive_class_weight: classification weight to be associated to positive anchors (default: 1.0) negative_class_weight: classification weight to be associated to negative anchors (default: 1.0) unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k] which is consistent with the classification target for each anchor (and can be empty for scalar targets). This shape must thus be compatible with the groundtruth labels that are passed to the "assign" function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]). If set to None, unmatched_cls_target is set to be [0] for each anchor. Raises: ValueError: if similarity_calc is not a RegionSimilarityCalculator or if matcher is not a Matcher or if box_coder is not a BoxCoder
https://github.com/simonmeister/motion-rcnn/blob/f8883d2328cf9be5b781c1a555b0eef1dda62e18/object_detection/core/target_assigner.py#L52-L92
import tensorflow as tf import numpy as np import cv2 from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_coder as bcoder from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import matcher as mat from object_detection.core import region_similarity_calculator as sim_calc from object_detection.matchers import argmax_matcher from object_detection.matchers import bipartite_matcher class TargetAssigner(object):
MIT License
kngwyu/rainy
rainy/net/actor_critic.py
policy_init
python
def policy_init(gain: float = 0.01) -> Initializer: return Initializer(weight_init=orthogonal(gain))
Use small value for policy layer to make policy entroy larger
https://github.com/kngwyu/rainy/blob/535f8f49d9efe0324a8480d0165ab1b35d83dc19/rainy/net/actor_critic.py#L187-L189
from abc import ABC, abstractmethod from typing import List, Optional, Sequence, Tuple, Type import numpy as np from torch import Tensor, nn from ..prelude import ArrayLike from ..utils import Device from .block import CNNBody, FCBody, LinearHead, NetworkBlock, ResNetBody from .init import Initializer, orthogonal from .policy import CategoricalDist, Policy, PolicyDist from .prelude import NetFn from .recurrent import DummyRnn, RnnBlock, RnnState class ActorCriticNet(nn.Module, ABC): state_dim: Sequence[int] action_dim: int recurrent_body: RnnBlock @property def is_recurrent(self) -> bool: return not isinstance(self.recurrent_body, DummyRnn) @abstractmethod def policy( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tuple[Policy, RnnState]: pass @abstractmethod def value( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tensor: pass @abstractmethod def forward( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tuple[Policy, Tensor, RnnState]: pass class SeparatedACNet(nn.Module, ABC): def __init__( self, actor_body: NetworkBlock, critic_body: NetworkBlock, policy_dist: PolicyDist, device: Device = Device(), init: Initializer = Initializer(), ) -> None: super().__init__() self.device = device self.actor = nn.Sequential( actor_body, LinearHead(actor_body.output_dim, policy_dist.input_dim, init=init), ) self.critic = nn.Sequential( critic_body, LinearHead(critic_body.output_dim, 1, init=init) ) self.policy_dist = policy_dist self.recurrent_body = DummyRnn() self.to(device.unwrapped) self.state_dim = self.actor[0].input_dim self.action_dim = self.actor[1].output_dim @property def is_recurrent(self) -> bool: return False def policy( self, states: ArrayLike, _rnns: Optional[RnnState] = None, _masks: Optional[Tensor] = None, ) -> Tuple[Policy, RnnState]: s = self.device.tensor(states) return self.policy_dist(self.actor(s)), self.recurrent_body.DUMMY_STATE def value( self, states: ArrayLike, _rnns: Optional[RnnState] = None, _masks: Optional[Tensor] = None, ) -> Tensor: s = self.device.tensor(states) return self.critic(s).squeeze_() def forward( self, states: ArrayLike, _rnns: Optional[RnnState] = None, _masks: Optional[Tensor] = None, ) -> Tuple[Policy, Tensor, RnnState]: s = self.device.tensor(states) policy = self.policy_dist(self.actor(s)) value = self.critic(s).squeeze_() return policy, value, self.recurrent_body.DUMMY_STATE class SharedACNet(ActorCriticNet): def __init__( self, body: NetworkBlock, actor_head: NetworkBlock, critic_head: NetworkBlock, policy_dist: PolicyDist, recurrent_body: RnnBlock = DummyRnn(), device: Device = Device(), ) -> None: assert body.output_dim == np.prod( actor_head.input_dim ), "body output and action_head input must have a same dimention" assert body.output_dim == np.prod( critic_head.input_dim ), "body output and action_head input must have a same dimention" super().__init__() self.device = device self.body = body self.actor_head = actor_head self.critic_head = critic_head self.policy_dist = policy_dist self.recurrent_body = recurrent_body self.to(device.unwrapped) self.state_dim = self.body.input_dim self.action_dim = self.actor_head.output_dim def _features( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tuple[Tensor, RnnState]: res = self.body(self.device.tensor(states)) if rnns is None: rnns = self.recurrent_body.initial_state(res.size(0), self.device) res = self.recurrent_body(res, rnns, masks) return res def policy( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tuple[Policy, RnnState]: features, rnn_next = self._features(states, rnns, masks) return self.policy_dist(self.actor_head(features)), rnn_next def value( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tensor: features = self._features(states, rnns, masks)[0] return self.critic_head(features).squeeze_() def forward( self, states: ArrayLike, rnns: Optional[RnnState] = None, masks: Optional[Tensor] = None, ) -> Tuple[Policy, Tensor, RnnState]: features, rnn_next = self._features(states, rnns, masks) policy, value = self.actor_head(features), self.critic_head(features) return self.policy_dist(policy), value.squeeze_(), rnn_next
Apache License 2.0
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/analytics_median_query_request.py
AnalyticsMedianQueryRequest.to_dict
python
def to_dict(self): result = {} if hasattr(super(AnalyticsMedianQueryRequest, self), "to_dict"): result = super(AnalyticsMedianQueryRequest, self).to_dict() return result
Returns the model properties as a dict
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/analytics_median_query_request.py#L14-L20
from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.analytics_attribute import AnalyticsAttribute from bitmovin_api_sdk.models.analytics_interval import AnalyticsInterval from bitmovin_api_sdk.models.analytics_query_request import AnalyticsQueryRequest import pprint class AnalyticsMedianQueryRequest(AnalyticsQueryRequest):
MIT License
square/connect-python-sdk
squareconnect/models/catalog_object.py
CatalogObject.item_option_value_data
python
def item_option_value_data(self, item_option_value_data): self._item_option_value_data = item_option_value_data
Sets the item_option_value_data of this CatalogObject. Structured data for a [CatalogItemOptionValue](#type-catalogitemoptionvalue), set for CatalogObjects of type `ITEM_OPTION_VAL`. :param item_option_value_data: The item_option_value_data of this CatalogObject. :type: CatalogItemOptionValue
https://github.com/square/connect-python-sdk/blob/e00e2889b2dd2c55048219cbe64db79962a68633/squareconnect/models/catalog_object.py#L664-L673
from pprint import pformat from six import iteritems import re class CatalogObject(object): def __init__(self, type=None, id=None, updated_at=None, version=None, is_deleted=None, catalog_v1_ids=None, present_at_all_locations=None, present_at_location_ids=None, absent_at_location_ids=None, image_id=None, item_data=None, category_data=None, item_variation_data=None, tax_data=None, discount_data=None, modifier_list_data=None, modifier_data=None, time_period_data=None, product_set_data=None, pricing_rule_data=None, image_data=None, measurement_unit_data=None, item_option_data=None, item_option_value_data=None): self.swagger_types = { 'type': 'str', 'id': 'str', 'updated_at': 'str', 'version': 'int', 'is_deleted': 'bool', 'catalog_v1_ids': 'list[CatalogV1Id]', 'present_at_all_locations': 'bool', 'present_at_location_ids': 'list[str]', 'absent_at_location_ids': 'list[str]', 'image_id': 'str', 'item_data': 'CatalogItem', 'category_data': 'CatalogCategory', 'item_variation_data': 'CatalogItemVariation', 'tax_data': 'CatalogTax', 'discount_data': 'CatalogDiscount', 'modifier_list_data': 'CatalogModifierList', 'modifier_data': 'CatalogModifier', 'time_period_data': 'CatalogTimePeriod', 'product_set_data': 'CatalogProductSet', 'pricing_rule_data': 'CatalogPricingRule', 'image_data': 'CatalogImage', 'measurement_unit_data': 'CatalogMeasurementUnit', 'item_option_data': 'CatalogItemOption', 'item_option_value_data': 'CatalogItemOptionValue' } self.attribute_map = { 'type': 'type', 'id': 'id', 'updated_at': 'updated_at', 'version': 'version', 'is_deleted': 'is_deleted', 'catalog_v1_ids': 'catalog_v1_ids', 'present_at_all_locations': 'present_at_all_locations', 'present_at_location_ids': 'present_at_location_ids', 'absent_at_location_ids': 'absent_at_location_ids', 'image_id': 'image_id', 'item_data': 'item_data', 'category_data': 'category_data', 'item_variation_data': 'item_variation_data', 'tax_data': 'tax_data', 'discount_data': 'discount_data', 'modifier_list_data': 'modifier_list_data', 'modifier_data': 'modifier_data', 'time_period_data': 'time_period_data', 'product_set_data': 'product_set_data', 'pricing_rule_data': 'pricing_rule_data', 'image_data': 'image_data', 'measurement_unit_data': 'measurement_unit_data', 'item_option_data': 'item_option_data', 'item_option_value_data': 'item_option_value_data' } self._type = type self._id = id self._updated_at = updated_at self._version = version self._is_deleted = is_deleted self._catalog_v1_ids = catalog_v1_ids self._present_at_all_locations = present_at_all_locations self._present_at_location_ids = present_at_location_ids self._absent_at_location_ids = absent_at_location_ids self._image_id = image_id self._item_data = item_data self._category_data = category_data self._item_variation_data = item_variation_data self._tax_data = tax_data self._discount_data = discount_data self._modifier_list_data = modifier_list_data self._modifier_data = modifier_data self._time_period_data = time_period_data self._product_set_data = product_set_data self._pricing_rule_data = pricing_rule_data self._image_data = image_data self._measurement_unit_data = measurement_unit_data self._item_option_data = item_option_data self._item_option_value_data = item_option_value_data @property def type(self): return self._type @type.setter def type(self, type): self._type = type @property def id(self): return self._id @id.setter def id(self, id): if id is None: raise ValueError("Invalid value for `id`, must not be `None`") if len(id) < 1: raise ValueError("Invalid value for `id`, length must be greater than or equal to `1`") self._id = id @property def updated_at(self): return self._updated_at @updated_at.setter def updated_at(self, updated_at): self._updated_at = updated_at @property def version(self): return self._version @version.setter def version(self, version): self._version = version @property def is_deleted(self): return self._is_deleted @is_deleted.setter def is_deleted(self, is_deleted): self._is_deleted = is_deleted @property def catalog_v1_ids(self): return self._catalog_v1_ids @catalog_v1_ids.setter def catalog_v1_ids(self, catalog_v1_ids): self._catalog_v1_ids = catalog_v1_ids @property def present_at_all_locations(self): return self._present_at_all_locations @present_at_all_locations.setter def present_at_all_locations(self, present_at_all_locations): self._present_at_all_locations = present_at_all_locations @property def present_at_location_ids(self): return self._present_at_location_ids @present_at_location_ids.setter def present_at_location_ids(self, present_at_location_ids): self._present_at_location_ids = present_at_location_ids @property def absent_at_location_ids(self): return self._absent_at_location_ids @absent_at_location_ids.setter def absent_at_location_ids(self, absent_at_location_ids): self._absent_at_location_ids = absent_at_location_ids @property def image_id(self): return self._image_id @image_id.setter def image_id(self, image_id): self._image_id = image_id @property def item_data(self): return self._item_data @item_data.setter def item_data(self, item_data): self._item_data = item_data @property def category_data(self): return self._category_data @category_data.setter def category_data(self, category_data): self._category_data = category_data @property def item_variation_data(self): return self._item_variation_data @item_variation_data.setter def item_variation_data(self, item_variation_data): self._item_variation_data = item_variation_data @property def tax_data(self): return self._tax_data @tax_data.setter def tax_data(self, tax_data): self._tax_data = tax_data @property def discount_data(self): return self._discount_data @discount_data.setter def discount_data(self, discount_data): self._discount_data = discount_data @property def modifier_list_data(self): return self._modifier_list_data @modifier_list_data.setter def modifier_list_data(self, modifier_list_data): self._modifier_list_data = modifier_list_data @property def modifier_data(self): return self._modifier_data @modifier_data.setter def modifier_data(self, modifier_data): self._modifier_data = modifier_data @property def time_period_data(self): return self._time_period_data @time_period_data.setter def time_period_data(self, time_period_data): self._time_period_data = time_period_data @property def product_set_data(self): return self._product_set_data @product_set_data.setter def product_set_data(self, product_set_data): self._product_set_data = product_set_data @property def pricing_rule_data(self): return self._pricing_rule_data @pricing_rule_data.setter def pricing_rule_data(self, pricing_rule_data): self._pricing_rule_data = pricing_rule_data @property def image_data(self): return self._image_data @image_data.setter def image_data(self, image_data): self._image_data = image_data @property def measurement_unit_data(self): return self._measurement_unit_data @measurement_unit_data.setter def measurement_unit_data(self, measurement_unit_data): self._measurement_unit_data = measurement_unit_data @property def item_option_data(self): return self._item_option_data @item_option_data.setter def item_option_data(self, item_option_data): self._item_option_data = item_option_data @property def item_option_value_data(self): return self._item_option_value_data @item_option_value_data.setter
Apache License 2.0
ucopacme/aws-orgs
awsorgs/utils.py
get_logger
python
def get_logger(args): log_level = logging.INFO if args['--debug']: log_level = logging.DEBUG if args['--quiet']: log_level = logging.CRITICAL log_format = '%(name)s: %(levelname)-9s%(message)s' if args['report']: log_format = '%(message)s' if args['--debug'] == 1: log_format = '%(name)s: %(levelname)-9s%(funcName)s(): %(message)s' if (not args['--exec'] and not args['report']): log_format = '[dryrun] %s' % log_format if not args['--debug'] == 2: logging.getLogger('botocore').propagate = False logging.getLogger('boto3').propagate = False logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level) log = logging.getLogger(__name__) return log
Setup logging.basicConfig from args. Return logging.Logger object.
https://github.com/ucopacme/aws-orgs/blob/441e22de53de7fa462aa5c1be3dd471902942eca/awsorgs/utils.py#L101-L125
import os import sys import re import pkg_resources import difflib import threading try: import queue except ImportError: import Queue as queue import boto3 from botocore.exceptions import ClientError import yaml import logging S3_BUCKET_PREFIX = 'awsorgs' S3_OBJECT_KEY = 'deployed_accounts.yaml' def get_s3_bucket_name(prefix=S3_BUCKET_PREFIX): sts_client = boto3.client('sts') account_id = sts_client.get_caller_identity()['Account'] return '-'.join([prefix, account_id]) def lookup(dlist, lkey, lvalue, rkey=None): items = [d for d in dlist if lkey in d and d[lkey] == lvalue] if not items: return None if len(items) > 1: raise RuntimeError( "Data Error: lkey: {}, lvalue: {} - lookup matches multiple items in dlist".format(lkey, lvalue) ) if rkey: if rkey in items[0]: return items[0][rkey] return None return items[0] def search_spec(spec, search_key, recurse_key): value = [] if search_key in spec and spec[search_key]: if isinstance(spec[search_key], str): value.append(spec[search_key]) else: value += spec[search_key] if recurse_key in spec and spec[recurse_key]: for child_spec in spec[recurse_key]: value += search_spec(child_spec, search_key, recurse_key) return sorted(value) def ensure_absent(spec): if 'Ensure' in spec and spec['Ensure'] == 'absent': return True return False def munge_path(default_path, spec): if 'Path' in spec and spec['Path']: if spec['Path'][0] == '/': if spec['Path'][-1] != '/': return spec['Path'] + '/' return spec['Path'] return "/%s/%s/" % (default_path, spec['Path']) return "/%s/" % default_path
MIT License
yuhaocheng/pyanomaly
pyanomaly/core/utils.py
multi_obj_grid_crop
python
def multi_obj_grid_crop(bottom, bbox, object_size=(64,64)): bottom = bottom.repeat(bbox.size(0), 1, 1, 1) rois = bbox.detach() batch_size = bottom.size(0) D = bottom.size(1) H = bottom.size(2) W = bottom.size(3) roi_per_batch = int(rois.size(0) / batch_size) x1 = bbox[:, 0::4] y1 = bbox[:, 1::4] x2 = bbox[:, 2::4] y2 = bbox[:, 3::4] height = bottom.size(2) width = bottom.size(3) zero = rois.new_zeros(bbox.size(0), 1) theta = torch.cat([ (x2 - x1) / (width - 1), zero, (x1 + x2 - width + 1) / (width - 1), zero, (y2 - y1) / (height - 1), (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3) grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, object_size[0], object_size[1]))) bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W).contiguous().view(-1, D, H, W) crops = F.grid_sample(bottom, grid) return crops, grid
[ x2-x1 x1 + x2 - W + 1 ] [ ----- 0 --------------- ] [ W - 1 W - 1 ] [ ] [ y2-y1 y1 + y2 - H + 1 ] [ 0 ----- --------------- ] [ H - 1 H - 1 ]
https://github.com/yuhaocheng/pyanomaly/blob/718feab80c081f5c415c9f9b923408a890f08836/pyanomaly/core/utils.py#L133-L184
import torch import os import pickle from scipy.ndimage import gaussian_filter1d import torch.nn.functional as F import torchvision.transforms.functional as tf from tsnecuda import TSNE from pyanomaly.utils import flow2img from collections import OrderedDict import matplotlib.pyplot as plt class AverageMeter(object): def __init__(self, name='default'): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.name = name def get_name(self): return self.name def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count if self.count != 0 else 0 class ParamSet(object): def __init__(self, name='default', **kwargs): self.param_names = list(kwargs.keys()) self.name = name self.param = OrderedDict(kwargs) def get_name(self): return self.name def get_params_names(self): return self.param_names def modelparallel(model): if isinstance(model, OrderedDict): print('The model is a OrderedDict') elif isinstance(model, torch.nn.Module): print('The model is a nn.Module') else: raise Exception('Not support the model') def modeldist(model): assert torch.distributed.is_initialized(), 'Not init the dist' if isinstance(model, OrderedDict): print('The model is OrderedDict') elif isinstance(model, torch.nn.Module): print('The model is nn.Module') else: raise Exception('Not support the model') def grid_crop(bottom, bbox, object_size=(64,64)): rois = bbox.detach() batch_size = bottom.size(0) D = bottom.size(1) H = bottom.size(2) W = bottom.size(3) roi_per_batch = int(rois.size(0) / batch_size) x1 = bbox[:, 0::4] y1 = bbox[:, 1::4] x2 = bbox[:, 2::4] y2 = bbox[:, 3::4] height = bottom.size(2) width = bottom.size(3) zero = rois.new_zeros(bbox.size(0), 1) theta = torch.cat([ (x2 - x1) / (width - 1), zero, (x1 + x2 - width + 1) / (width - 1), zero, (y2 - y1) / (height - 1), (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3) grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, object_size[0], object_size[1]))) bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W).contiguous().view(-1, D, H, W) crops = F.grid_sample(bottom, grid) return crops, grid
Apache License 2.0
iranathan/robloxapi
robloxapi/group.py
Group.exile
python
async def exile(self, user_id: int) -> int: r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/users/{user_id}', method='DELETE') return r.status_code
Exiles a user from the group. :param user_id: The users id :return: StatusCode
https://github.com/iranathan/robloxapi/blob/ab218f749afefb630f7472ce6d396369fd995ca4/robloxapi/group.py#L60-L67
import json import logging import re import asyncio from typing import List, Tuple from bs4 import BeautifulSoup from .utils.errors import RoleError, NotFound from .utils.classes import Role, Shout, WallPost, Action from .joinrequest import JoinRequest from .groupmember import GroupMember from .user import User from .auth import Auth, Captcha class Group: def __init__(self, request, group_id, group_name, description, member_count, shout, owner_id=None, owner_username=None): self.request = request self.id = group_id self.name = group_name self.description = description if owner_id and owner_username: self.owner = User(self.request, owner_id, owner_username) self.member_count = member_count self.shout = shout async def pay(self, user_id: int, amount: int) -> int: data = json.dumps({ "PayoutType": "FixedAmount", "Recipients": [ { "recipientId": user_id, "recipientType": "User", "amount": amount } ] }) r = await self.request.request(url=f'https://groups.roblox.com/v1/groups/{self.id}/payouts', data=data, method="POST") return r.status_code
MIT License
deepanshs/mrsimulator
src/mrsimulator/signal_processing/_base.py
Operation._get_dv_indexes
python
def _get_dv_indexes(indexes, n): if indexes is None: return np.arange(n) if isinstance(indexes, int): return [indexes] if isinstance(indexes, (list, tuple)): return np.asarray(indexes)
Return a list of dependent variable indexes. Args: indexes: An interger, list of integers, or None indicating the dv indexes. n: Total number of dependent variables in the CSDM object.
https://github.com/deepanshs/mrsimulator/blob/9ab8a5edfa66434301b9d79da0c01a294e173704/src/mrsimulator/signal_processing/_base.py#L33-L45
from sys import modules from typing import ClassVar import numpy as np from mrsimulator.utils.parseable import Parseable __author__ = "Maxwell C. Venetos" __email__ = "[email protected]" class Operation(Parseable): module_name: ClassVar[str] = None @property def function(self): return self.__class__.__name__ def json(self) -> dict: my_dict = super().json() my_dict["function"] = self.function if hasattr(self, "type"): my_dict["type"] = self.type return my_dict @staticmethod
BSD 3-Clause New or Revised License
wavefronthq/python-client
wavefront_api_client/models/raw_timeseries.py
RawTimeseries.points
python
def points(self): return self._points
Gets the points of this RawTimeseries. # noqa: E501 :return: The points of this RawTimeseries. # noqa: E501 :rtype: list[Point]
https://github.com/wavefronthq/python-client/blob/e410ce0dd8a2334e995456f4f3d44e0f04664a3a/wavefront_api_client/models/raw_timeseries.py#L60-L67
import pprint import re import six from wavefront_api_client.configuration import Configuration class RawTimeseries(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'points': 'list[Point]', 'tags': 'dict(str, str)' } attribute_map = { 'points': 'points', 'tags': 'tags' } def __init__(self, points=None, tags=None, _configuration=None): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._points = None self._tags = None self.discriminator = None self.points = points if tags is not None: self.tags = tags @property
Apache License 2.0
psss/tmt
tmt/steps/provision/__init__.py
Guest.push
python
def push(self, source=None, destination=None, options=None): if options is None: options = "-Rrz --links --safe-links --delete".split() if destination is None: destination = "/" if source is None: source = self.parent.plan.workdir self.debug(f"Push workdir to guest '{self.guest}'.") else: self.debug(f"Copy '{source}' to '{destination}' on the guest.") self._check_rsync() try: self.run( ["rsync"] + options + ["-e", self._ssh_command(join=True)] + [source, f"{self._ssh_guest()}:{destination}"], shell=False) except tmt.utils.RunError: self.fail( f"Failed to push workdir to the guest. This usually means " f"login as '{self.user}' to the test machine does not work.") raise
Push files to the guest By default the whole plan workdir is synced to the same location on the guest. Use the 'source' and 'destination' to sync custom location and the 'options' parametr to modify default options which are '-Rrz --links --safe-links --delete'.
https://github.com/psss/tmt/blob/1cd284946155eced0c6522fa42e327ab22683d0d/tmt/steps/provision/__init__.py#L441-L473
import os import random import re import shlex import string import time import click import fmf import tmt CONNECTION_TIMEOUT = 60 * 4 SSH_INITIAL_WAIT_TIME = 5 class Provision(tmt.steps.Step): how = 'virtual' def __init__(self, data, plan): super().__init__(data, plan) self._guests = [] self._guest_data = {} def load(self, extra_keys=None): extra_keys = extra_keys or [] super().load(extra_keys) try: self._guest_data = tmt.utils.yaml_to_dict(self.read('guests.yaml')) except tmt.utils.FileError: self.debug('Provisioned guests not found.', level=2) def save(self, data=None): data = data or {} super().save(data) try: guests = dict( [(guest.name, guest.save()) for guest in self.guests()]) self.write('guests.yaml', tmt.utils.dict_to_yaml(guests)) except tmt.utils.FileError: self.debug('Failed to save provisioned guests.') def wake(self): super().wake() for data in self.data: plugin = ProvisionPlugin.delegate(self, data) self._plugins.append(plugin) plugin.wake(data=self._guest_data.get(plugin.name)) if plugin.guest(): self._guests.append(plugin.guest()) if self.status() == 'done': self.debug( 'Provision wake up complete (already done before).', level=2) else: self.status('todo') self.save() def show(self): for data in self.data: ProvisionPlugin.delegate(self, data).show() def summary(self): guests = fmf.utils.listed(self.guests(), 'guest') self.info('summary', f'{guests} provisioned', 'green', shift=1) for guest in self.guests(): if guest.name != tmt.utils.DEFAULT_NAME: self.verbose(guest.name, color='red', shift=2) def go(self): super().go() if self.status() == 'done': self.info('status', 'done', 'green', shift=1) self.summary() self.actions() return self._guests = [] save = True try: for plugin in self.plugins(): try: plugin.go() if isinstance(plugin, ProvisionPlugin): plugin.guest().details() finally: if isinstance(plugin, ProvisionPlugin): self._guests.append(plugin.guest()) self.summary() self.status('done') except SystemExit as error: save = False raise error finally: if save: self.save() def guests(self): return self._guests def requires(self): requires = set() for plugin in self.plugins(classes=ProvisionPlugin): requires.update(plugin.requires()) return list(requires) class ProvisionPlugin(tmt.steps.Plugin): how = 'virtual' _supported_methods = [] @classmethod def base_command(cls, method_class=None, usage=None): if method_class: usage = Provision.usage(method_overview=usage) @click.command(cls=method_class, help=usage) @click.pass_context @click.option( '-h', '--how', metavar='METHOD', help='Use specified method for provisioning.') def provision(context, **kwargs): context.obj.steps.add('provision') Provision._save_context(context) return provision def wake(self, options=None, data=None): super().wake(options) def guest(self): raise NotImplementedError def requires(self): return Guest.requires() @classmethod def clean_images(cls, clean, dry): class Guest(tmt.utils.Common): _keys = ['guest', 'port', 'user', 'key', 'password'] def __init__(self, data, name=None, parent=None): super().__init__(parent, name) self.load(data) def _random_name(self, prefix='', length=16): min_random_part = max(5, length - len(prefix)) name = prefix + ''.join( random.choices(string.ascii_letters, k=min_random_part)) return name[-length:] def _ssh_guest(self): return f'{self.user}@{self.guest}' def _ssh_options(self, join=False): options = [ '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/dev/null', ] if self.key or self.password: options.extend(['-oIdentitiesOnly=yes']) if self.port: options.extend(['-p', str(self.port)]) if self.key: keys = self.key if isinstance(self.key, list) else [self.key] for key in keys: options.extend(['-i', shlex.quote(key) if join else key]) if self.password: options.extend(['-oPasswordAuthentication=yes']) return ' '.join(options) if join else options def _ssh_command(self, join=False): command = [] if self.password: password = shlex.quote(self.password) if join else self.password command.extend(["sshpass", "-p", password]) command.append("ssh") if join: return " ".join(command) + " " + self._ssh_options(join=True) else: return command + self._ssh_options() def load(self, data): for key in self._keys: setattr(self, key, data.get(key)) def save(self): data = dict() for key in self._keys: value = getattr(self, key) if value is not None: data[key] = value return data def wake(self): self.debug(f"Doing nothing to wake up guest '{self.guest}'.") def start(self): self.debug(f"Doing nothing to start guest '{self.guest}'.") def details(self): if self.opt('dry'): return try: distro = self.execute('cat /etc/os-release')[0].strip() distro = re.search('PRETTY_NAME="(.*)"', distro).group(1) except (tmt.utils.RunError, AttributeError): try: distro = self.execute('cat /etc/lsb-release')[0].strip() distro = re.search( 'DISTRIB_DESCRIPTION="(.*)"', distro).group(1) except (tmt.utils.RunError, AttributeError): try: distro = self.execute('cat /etc/redhat-release')[0].strip() except (tmt.utils.RunError, AttributeError): distro = None if distro is not None and 'Please login as the user' in distro: raise tmt.utils.GeneralError( f'Login to the guest failed.\n{distro}') if distro: self.info('distro', distro, 'green') kernel = self.execute('uname -r')[0].strip() self.verbose('kernel', kernel, 'green') def _ansible_verbosity(self): if self.opt('debug') < 3: return '' else: return ' -' + (self.opt('debug') - 2) * 'v' def _ansible_summary(self, output): if not output: return keys = 'ok changed unreachable failed skipped rescued ignored'.split() for key in keys: matched = re.search(rf'^.*\s:\s.*{key}=(\d+).*$', output, re.M) if matched and int(matched.group(1)) > 0: tasks = fmf.utils.listed(matched.group(1), 'task') self.verbose(key, tasks, 'green') def _ansible_playbook_path(self, playbook): self.debug(f"Applying playbook '{playbook}' on guest '{self.guest}'.") playbook = os.path.join(self.parent.plan.my_run.tree.root, playbook) self.debug(f"Playbook full path: '{playbook}'", level=2) return playbook def _export_environment(self, execute_environment=None): environment = dict() environment.update(execute_environment or dict()) environment.update(self.parent.plan.environment) if not environment: return '' return 'export {}; '.format( ' '.join(tmt.utils.shell_variables(environment))) def ansible(self, playbook): playbook = self._ansible_playbook_path(playbook) stdout, stderr = self.run( f'{self._export_environment()}' f'stty cols {tmt.utils.OUTPUT_WIDTH}; ansible-playbook ' f'--ssh-common-args="{self._ssh_options(join=True)}" ' f'{self._ansible_verbosity()} -i {self._ssh_guest()}, {playbook}', cwd=self.parent.plan.worktree) self._ansible_summary(stdout) def execute(self, command, **kwargs): environment = self._export_environment(kwargs.get('env', dict())) directory = kwargs.get('cwd') or '' if directory: directory = f"cd '{directory}'; " interactive = ['-t'] if kwargs.get('interactive') else [] if isinstance(command, (list, tuple)): command = ' '.join(command) self.debug(f"Execute command '{command}' on guest '{self.guest}'.") command = ( self._ssh_command() + interactive + [self._ssh_guest()] + [f'{environment}{directory}{command}']) return self.run(command, shell=False, **kwargs)
MIT License
deep500/deep500
deep500/datasets/ucf101.py
download_ucf101_and_get_file_paths
python
def download_ucf101_and_get_file_paths(folder='', split='01'): base_url = "https://www.crcv.ucf.edu/data/UCF101/" filenames = [('ucf101', 'UCF101.rar'), ('ucf101_split','UCF101TrainTestSplits-RecognitionTask.zip')] sub_folder = '/ucf101' local_files = real_download(base_url, filenames, sub_folder, output_dir=folder) files = unrar(local_files['ucf101']) zip = zipfile.ZipFile(local_files['ucf101_split']) path = os.path.dirname(os.path.abspath(local_files['ucf101']))+'/UCF-101/' train_files = [] with zip.open('ucfTrainTestlist/trainlist{}.txt'.format(split)) as file_split: for line in file_split: file = path + bytes.decode(line.split()[0]) if file in files: train_files.append(file) test_files = [] with zip.open('ucfTrainTestlist/testlist{}.txt'.format(split)) as file_split: for line in file_split: file = path + bytes.decode(line.strip()) if file in files: test_files.append(file) label_list = {} with zip.open('ucfTrainTestlist/classInd.txt') as labels: for line in labels: line = bytes.decode(line.strip()) label = line.split()[1] idx = int(line.split()[0]) - 1 label_list[label] = idx return train_files, test_files, label_list
Download ucf101 from University of Central Florida The archive contains the videos of different action classes :return: paths to different files
https://github.com/deep500/deep500/blob/34e93a46dea17513ac705bb4392b3514fa9d87c6/deep500/datasets/ucf101.py#L24-L63
import zipfile import os import PIL.Image from typing import List, Tuple, Dict import numpy as np from deep500.utils.download import real_download, unrar from deep500.lv2.dataset import FileListDataset from deep500.utils.onnx_interop.losses import SoftmaxCrossEntropy try: import av except (ImportError, ModuleNotFoundError) as ex: av = None def ucf101_shape(): return (101, None, 3, 240, 320) def ucf101_loss(): return SoftmaxCrossEntropy
BSD 3-Clause New or Revised License
dcramer/selenium-saucelabs-python
saucelabs/selenium/__init__.py
selenium.go_back
python
def go_back(self): self.do_command("goBack", [])
Simulates the user clicking the "back" button on their browser.
https://github.com/dcramer/selenium-saucelabs-python/blob/d964b18a8b93ec7693838eb3146a7d11bc13df90/saucelabs/selenium/__init__.py#L1000-L1005
__docformat__ = "restructuredtext en" import httplib import urllib class selenium: def __init__(self, host, port, browserStartCommand, browserURL): self.host = host self.port = port self.browserStartCommand = browserStartCommand self.browserURL = browserURL self.sessionId = None self.extensionJs = "" def setExtensionJs(self, extensionJs): self.extensionJs = extensionJs def start(self): result = self.get_string("getNewBrowserSession", [self.browserStartCommand, self.browserURL, self.extensionJs]) try: self.sessionId = result except ValueError: raise Exception, result def stop(self): self.do_command("testComplete", []) self.sessionId = None def do_command(self, verb, args): conn = httplib.HTTPConnection(self.host, self.port) body = u'cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8')) for i in range(len(args)): body += '&' + unicode(i+1) + '=' + urllib.quote_plus(unicode(args[i]).encode('utf-8')) if (None != self.sessionId): body += "&sessionId=" + unicode(self.sessionId) headers = {"Content-Type": "application/x-www-form-urlencoded; charset=utf-8"} conn.request("POST", "/selenium-server/driver/", body, headers) response = conn.getresponse() data = unicode(response.read(), "UTF-8") result = response.reason if (not data.startswith('OK')): raise Exception, data return data def get_string(self, verb, args): result = self.do_command(verb, args) return result[3:] def get_string_array(self, verb, args): csv = self.get_string(verb, args) token = "" tokens = [] escape = False for i in range(len(csv)): letter = csv[i] if (escape): token = token + letter escape = False continue if (letter == '\\'): escape = True elif (letter == ','): tokens.append(token) token = "" else: token = token + letter tokens.append(token) return tokens def get_number(self, verb, args): return self.get_string(verb, args) def get_number_array(self, verb, args): return self.get_string_array(verb, args) def get_boolean(self, verb, args): boolstr = self.get_string(verb, args) if ("true" == boolstr): return True if ("false" == boolstr): return False raise ValueError, "result is neither 'true' nor 'false': " + boolstr def get_boolean_array(self, verb, args): boolarr = self.get_string_array(verb, args) for i in range(len(boolarr)): if ("true" == boolarr[i]): boolarr[i] = True continue if ("false" == boolarr[i]): boolarr[i] = False continue raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i] return boolarr def click(self,locator): self.do_command("click", [locator,]) def double_click(self,locator): self.do_command("doubleClick", [locator,]) def context_menu(self,locator): self.do_command("contextMenu", [locator,]) def click_at(self,locator,coordString): self.do_command("clickAt", [locator,coordString,]) def double_click_at(self,locator,coordString): self.do_command("doubleClickAt", [locator,coordString,]) def context_menu_at(self,locator,coordString): self.do_command("contextMenuAt", [locator,coordString,]) def fire_event(self,locator,eventName): self.do_command("fireEvent", [locator,eventName,]) def focus(self,locator): self.do_command("focus", [locator,]) def key_press(self,locator,keySequence): self.do_command("keyPress", [locator,keySequence,]) def shift_key_down(self): self.do_command("shiftKeyDown", []) def shift_key_up(self): self.do_command("shiftKeyUp", []) def meta_key_down(self): self.do_command("metaKeyDown", []) def meta_key_up(self): self.do_command("metaKeyUp", []) def alt_key_down(self): self.do_command("altKeyDown", []) def alt_key_up(self): self.do_command("altKeyUp", []) def control_key_down(self): self.do_command("controlKeyDown", []) def control_key_up(self): self.do_command("controlKeyUp", []) def key_down(self,locator,keySequence): self.do_command("keyDown", [locator,keySequence,]) def key_up(self,locator,keySequence): self.do_command("keyUp", [locator,keySequence,]) def mouse_over(self,locator): self.do_command("mouseOver", [locator,]) def mouse_out(self,locator): self.do_command("mouseOut", [locator,]) def mouse_down(self,locator): self.do_command("mouseDown", [locator,]) def mouse_down_right(self,locator): self.do_command("mouseDownRight", [locator,]) def mouse_down_at(self,locator,coordString): self.do_command("mouseDownAt", [locator,coordString,]) def mouse_down_right_at(self,locator,coordString): self.do_command("mouseDownRightAt", [locator,coordString,]) def mouse_up(self,locator): self.do_command("mouseUp", [locator,]) def mouse_up_right(self,locator): self.do_command("mouseUpRight", [locator,]) def mouse_up_at(self,locator,coordString): self.do_command("mouseUpAt", [locator,coordString,]) def mouse_up_right_at(self,locator,coordString): self.do_command("mouseUpRightAt", [locator,coordString,]) def mouse_move(self,locator): self.do_command("mouseMove", [locator,]) def mouse_move_at(self,locator,coordString): self.do_command("mouseMoveAt", [locator,coordString,]) def type(self,locator,value): self.do_command("type", [locator,value,]) def type_keys(self,locator,value): self.do_command("typeKeys", [locator,value,]) def set_speed(self,value): self.do_command("setSpeed", [value,]) def get_speed(self): return self.get_string("getSpeed", []) def check(self,locator): self.do_command("check", [locator,]) def uncheck(self,locator): self.do_command("uncheck", [locator,]) def select(self,selectLocator,optionLocator): self.do_command("select", [selectLocator,optionLocator,]) def add_selection(self,locator,optionLocator): self.do_command("addSelection", [locator,optionLocator,]) def remove_selection(self,locator,optionLocator): self.do_command("removeSelection", [locator,optionLocator,]) def remove_all_selections(self,locator): self.do_command("removeAllSelections", [locator,]) def submit(self,formLocator): self.do_command("submit", [formLocator,]) def open(self,url): self.do_command("open", [url,]) def open_window(self,url,windowID): self.do_command("openWindow", [url,windowID,]) def select_window(self,windowID): self.do_command("selectWindow", [windowID,]) def select_pop_up(self,windowID): self.do_command("selectPopUp", [windowID,]) def deselect_pop_up(self): self.do_command("deselectPopUp", []) def select_frame(self,locator): self.do_command("selectFrame", [locator,]) def get_whether_this_frame_match_frame_expression(self,currentFrameString,target): return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,]) def get_whether_this_window_match_window_expression(self,currentWindowString,target): return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,]) def wait_for_pop_up(self,windowID,timeout): self.do_command("waitForPopUp", [windowID,timeout,]) def choose_cancel_on_next_confirmation(self): self.do_command("chooseCancelOnNextConfirmation", []) def choose_ok_on_next_confirmation(self): self.do_command("chooseOkOnNextConfirmation", []) def answer_on_next_prompt(self,answer): self.do_command("answerOnNextPrompt", [answer,])
Apache License 2.0
openstack/networking-powervm
networking_powervm/plugins/ibm/agent/powervm/agent_base.py
BasePVMNeutronAgent.is_hao_event
python
def is_hao_event(self, evt): return False
Determines if an Event warrants a heal_and_optimize. :param evt: A pypowervm.wrappers.event.Event wrapper to inspect. :return: True if heal_and_optimize should be invoked as a result of this event; False otherwise.
https://github.com/openstack/networking-powervm/blob/a00fc731b14bd3ef953fe4cffdc74c8e441c0e13/networking_powervm/plugins/ibm/agent/powervm/agent_base.py#L229-L236
import abc import time import eventlet from neutron.agent import rpc as agent_rpc from neutron.conf.agent import common as a_config from neutron_lib.agent import topics from neutron_lib import constants as q_const from neutron_lib import context as ctx from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from pypowervm import adapter as pvm_adpt from pypowervm.helpers import log_helper as log_hlp from pypowervm.helpers import vios_busy as vio_hlp from pypowervm.tasks import partition as pvm_par from pypowervm.wrappers import event as pvm_evt from pypowervm.wrappers import managed_system as pvm_ms from networking_powervm._i18n import _ from networking_powervm.plugins.ibm.agent.powervm import prov_req as preq from networking_powervm.plugins.ibm.agent.powervm import utils eventlet.monkey_patch() LOG = logging.getLogger(__name__) agent_opts = [ cfg.IntOpt('exception_interval', default=5, help=_("The number of seconds agent will wait between " "polling when exception is caught")), cfg.IntOpt('heal_and_optimize_interval', default=1800, help=_('The number of seconds the agent should wait between ' 'heal/optimize intervals.')), ] cfg.CONF.register_opts(agent_opts, "AGENT") a_config.register_agent_state_opts_helper(cfg.CONF) a_config.register_root_helper(cfg.CONF) ACONF = cfg.CONF.AGENT FULL_REFETCH_EVENTS = ( pvm_evt.EventType.CACHE_CLEARED, pvm_evt.EventType.MISSING_EVENTS, pvm_evt.EventType.NEW_CLIENT) SINGLE_OBJ_EVENTS = ( pvm_evt.EventType.INVALID_URI, pvm_evt.EventType.ADD_URI, pvm_evt.EventType.MODIFY_URI, pvm_evt.EventType.HIDDEN_URI, pvm_evt.EventType.VISIBLE_URI, pvm_evt.EventType.CUSTOM_CLIENT_EVENT, pvm_evt.EventType.DELETE_URI) class PVMPluginApi(agent_rpc.PluginApi): pass class VIFEventHandler(pvm_adpt.WrapperEventHandler): def __init__(self, agent): self.agent = agent self.adapter = self.agent.adapter self.just_started = True def _refetch_all(self, prov_req_set): lpar_vifs = utils.list_vifs(self.adapter, self.agent.vif_wrapper_class) prov_reqs = preq.ProvisionRequest.for_wrappers(self.agent, lpar_vifs, preq.PLUG) rms = {req for req in prov_req_set if req.action == preq.PLUG} LOG.debug("Removing all existing plug requests: %s", rms) prov_req_set -= rms LOG.debug("Adding new wrapper-based plug requests: %s", [str(prov_req) for prov_req in prov_reqs]) prov_req_set |= set(prov_reqs) def _process_event(self, event, prov_req_set): prov_req = preq.ProvisionRequest.for_event(self.agent, event) if prov_req is None: return False rms = {prq for prq in prov_req_set if prq == prov_req} LOG.debug("Consolidating - removing requests: %s", [str(rm_preq) for rm_preq in rms]) prov_req_set -= rms LOG.debug("Adding new event-based request: %s", str(prov_req)) prov_req_set.add(prov_req) def process(self, events): prov_req_set = set() do_heal = False for event in events: if event.etype in FULL_REFETCH_EVENTS: if not self.just_started: self._refetch_all(prov_req_set) elif event.etype in SINGLE_OBJ_EVENTS: self._process_event(event, prov_req_set) if self.agent.is_hao_event(event): LOG.info("Received heal-and-optimize event: %s", str(event)) do_heal = True self.just_started = False if do_heal: self.agent.heal_and_optimize() self.agent.provision_devices(prov_req_set) class BasePVMNeutronAgent(object): RPC_API_VERSION = '1.1' @abc.abstractproperty def agent_id(self): raise NotImplementedError() @abc.abstractproperty def agent_binary_name(self): raise NotImplementedError() @abc.abstractproperty def agent_type(self): raise NotImplementedError() @abc.abstractproperty def vif_type(self): raise NotImplementedError() @abc.abstractproperty def vif_wrapper_class(self): raise NotImplementedError() @abc.abstractmethod def parse_bridge_mappings(self): raise NotImplementedError() @abc.abstractmethod def heal_and_optimize(self): raise NotImplementedError()
Apache License 2.0
klen/aioauth-client
aioauth_client/__init__.py
OAuth2Client.__init__
python
def __init__(self, client_id: str, client_secret: str, base_url: str = None, authorize_url: str = None, access_token: str = None, access_token_url: str = None, access_token_key: str = None, transport: httpx.AsyncClient = None, logger: logging.Logger = None, **params): super().__init__(base_url, authorize_url, access_token_key, access_token_url, transport, logger) self.access_token = access_token self.client_id = client_id self.client_secret = client_secret self.params = params
Initialize the client.
https://github.com/klen/aioauth-client/blob/008195a8787f92318b7d49fabe5d0fcc0ba279ee/aioauth_client/__init__.py#L290-L301
from __future__ import annotations import typing as t import base64 import hmac import logging import time from hashlib import sha1 from random import SystemRandom from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit import httpx __version__ = "0.27.2" __project__ = "aioauth-client" __author__ = "Kirill Klenov <[email protected]>" __license__ = "MIT" RANDOM = SystemRandom().random class OAuthException(RuntimeError): pass class User: __slots__ = 'id', 'email', 'first_name', 'last_name', 'username', 'picture', 'link', 'locale', 'city', 'country', 'gender' def __init__(self, **info): for attr in self.__slots__: setattr(self, attr, info.get(attr)) class Signature(object): name: str = '' @staticmethod def _escape(s: str) -> str: return quote(s, safe=b'~') def sign(self, consumer_secret: str, method: str, url: str, oauth_token_secret: str = None, **params): raise NotImplementedError('Shouldnt be called.') class HmacSha1Signature(Signature): name = 'HMAC-SHA1' def sign(self, consumer_secret: str, method: str, url: str, oauth_token_secret: str = None, escape: bool = False, **params) -> str: if escape: query = [(self._escape(k), self._escape(v)) for k, v in params.items()] query_string = '&'.join(['%s=%s' % item for item in sorted(query)]) else: query_string = urlencode(params) signature = "&".join(map(self._escape, (method.upper(), url, query_string))) key = self._escape(consumer_secret) + "&" if oauth_token_secret: key += self._escape(oauth_token_secret) hashed = hmac.new(key.encode(), signature.encode(), sha1) return base64.b64encode(hashed.digest()).decode() class ClientRegistry(type): clients: t.Dict[str, t.Type[Client]] = {} def __new__(mcs, name, bases, params): cls = super().__new__(mcs, name, bases, params) mcs.clients[cls.name] = cls return cls class Client(object, metaclass=ClientRegistry): name: str = '' base_url: str = '' user_info_url: str = '' access_token_key: str = 'access_token' shared_key: str = 'oauth_verifier' access_token_url: str = '' authorize_url: str = '' def __init__(self, base_url: str = None, authorize_url: str = None, access_token_key: str = None, access_token_url: str = None, transport: httpx.AsyncClient = None, logger: logging.Logger = None): self.base_url = base_url or self.base_url self.authorize_url = authorize_url or self.authorize_url self.access_token_key = access_token_key or self.access_token_key self.access_token_url = access_token_url or self.access_token_url self.logger = logger or logging.getLogger('OAuth: %s' % self.name) self.transport = transport def _get_url(self, url: str) -> str: if self.base_url and not url.startswith(('http://', 'https://')): return urljoin(self.base_url, url) return url def __str__(self) -> str: return f"{ self.name.title() } {self.base_url}" def __repr__(self): return f"<{self}>" async def _request(self, method: str, url: str, raise_for_status: bool = False, **options) -> t.Union[t.Dict, str]: transport = self.transport or httpx.AsyncClient() async with transport as client: response = await client.request(method, url, **options) if raise_for_status and response.status_code >= 300: raise OAuthException(str(response)) self.logger.debug("Request %s: %s %r", method, url, options) if 'json' in response.headers.get('CONTENT-TYPE'): return response.json() return dict(parse_qsl(response.text)) or response.text def request(self, method: str, url: str, params: t.Dict = None, headers: t.Dict = None, **options): raise NotImplementedError('Shouldnt be called.') async def user_info(self, **options) -> t.Tuple[User, t.Any]: if not self.user_info_url: raise NotImplementedError( 'The provider doesnt support user_info method.') data = await self.request('GET', self.user_info_url, raise_for_status=True, **options) user = User(**dict(self.user_parse(data))) return user, data @staticmethod def user_parse(data: t.Any) -> t.Generator[t.Tuple[str, t.Any], None, None]: yield 'id', None def get_authorize_url(self, **params) -> str: return self.authorize_url async def get_access_token(self, *args, **kwargs) -> t.Tuple[str, t.Any]: raise NotImplementedError class OAuth1Client(Client): name = 'oauth1' access_token_key = 'oauth_token' version = '1.0' escape = False request_token_url: str = '' def __init__(self, consumer_key: str, consumer_secret: str, base_url: str = None, authorize_url: str = None, oauth_token: str = None, oauth_token_secret: str = None, request_token_url: str = None, access_token_url: str = None, access_token_key: str = None, transport: httpx.AsyncClient = None, logger: logging.Logger = None, signature: Signature = None, **params): super().__init__(base_url, authorize_url, access_token_key, access_token_url, transport, logger) self.oauth_token = oauth_token self.oauth_token_secret = oauth_token_secret self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.request_token_url = request_token_url or self.request_token_url self.params = params self.signature = signature or HmacSha1Signature() def get_authorize_url(self, request_token: str = None, **params) -> str: params.update({'oauth_token': request_token or self.oauth_token}) params.update(self.params) return self.authorize_url + '?' + urlencode(params) def request(self, method: str, url: str, params: t.Dict = None, headers: t.Dict = None, **options) -> t.Awaitable[t.Union[t.Dict, str]]: oparams = { 'oauth_consumer_key': self.consumer_key, 'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(), 'oauth_signature_method': self.signature.name, 'oauth_timestamp': str(int(time.time())), 'oauth_version': self.version, } oparams.update(params or {}) if self.oauth_token: oparams['oauth_token'] = self.oauth_token url = self._get_url(url) if urlsplit(url).query: raise ValueError( 'Request parameters should be in the "params" parameter, ' 'not inlined in the URL') oparams['oauth_signature'] = self.signature.sign( self.consumer_secret, method, url, oauth_token_secret=self.oauth_token_secret, escape=self.escape, **oparams) self.logger.debug("%s %s", url, oparams) return self._request(method, url, params=oparams, headers=headers, **options) async def get_request_token(self, **params) -> t.Tuple[str, t.Any]: params = dict(self.params, **params) data = await self.request( 'GET', self.request_token_url, raise_for_status=True, params=params) if not isinstance(data, dict): return '', data self.oauth_token = data.get('oauth_token') or '' self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, data async def get_access_token(self, oauth_verifier, request_token=None, headers=None, **params) -> t.Tuple[str, t.Any]: if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier: oauth_verifier = oauth_verifier[self.shared_key] if request_token and self.oauth_token != request_token: raise OAuthException( 'Failed to obtain OAuth 1.0 access token. ' 'Request token is invalid') data = await self.request( 'POST', self.access_token_url, raise_for_status=True, headers=headers, params={'oauth_verifier': oauth_verifier, 'oauth_token': request_token}) if isinstance(data, str): raise OAuthException( 'Failed to obtain OAuth 1.0 access token. ' f"Invalid data: {data}") self.oauth_token = data.get('oauth_token') or '' self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, data class OAuth2Client(Client): name = 'oauth2' shared_key = 'code'
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/cea708_caption_input_stream.py
Cea708CaptionInputStream.to_dict
python
def to_dict(self): result = {} if hasattr(super(Cea708CaptionInputStream, self), "to_dict"): result = super(Cea708CaptionInputStream, self).to_dict() for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if value is None: continue if isinstance(value, list): if len(value) == 0: continue result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]] elif hasattr(value, "to_dict"): result[self.attribute_map.get(attr)] = value.to_dict() elif isinstance(value, Enum): result[self.attribute_map.get(attr)] = value.value elif isinstance(value, dict): result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()} else: result[self.attribute_map.get(attr)] = value return result
Returns the model properties as a dict
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/cea708_caption_input_stream.py#L154-L177
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.input_stream import InputStream import pprint import six class Cea708CaptionInputStream(InputStream): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, input_id=None, input_path=None, channel=None): super(Cea708CaptionInputStream, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._input_id = None self._input_path = None self._channel = None self.discriminator = None if input_id is not None: self.input_id = input_id if input_path is not None: self.input_path = input_path if channel is not None: self.channel = channel @property def openapi_types(self): types = {} if hasattr(super(Cea708CaptionInputStream, self), 'openapi_types'): types = getattr(super(Cea708CaptionInputStream, self), 'openapi_types') types.update({ 'input_id': 'string_types', 'input_path': 'string_types', 'channel': 'int' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(Cea708CaptionInputStream, self), 'attribute_map'): attributes = getattr(super(Cea708CaptionInputStream, self), 'attribute_map') attributes.update({ 'input_id': 'inputId', 'input_path': 'inputPath', 'channel': 'channel' }) return attributes @property def input_id(self): return self._input_id @input_id.setter def input_id(self, input_id): if input_id is not None: if not isinstance(input_id, string_types): raise TypeError("Invalid type for `input_id`, type has to be `string_types`") self._input_id = input_id @property def input_path(self): return self._input_path @input_path.setter def input_path(self, input_path): if input_path is not None: if not isinstance(input_path, string_types): raise TypeError("Invalid type for `input_path`, type has to be `string_types`") self._input_path = input_path @property def channel(self): return self._channel @channel.setter def channel(self, channel): if channel is not None: if not isinstance(channel, int): raise TypeError("Invalid type for `channel`, type has to be `int`") self._channel = channel
MIT License
microsoft/botbuilder-python
libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py
GenerateAnswerUtils.__init__
python
def __init__( self, telemetry_client: Union[BotTelemetryClient, NullTelemetryClient], endpoint: QnAMakerEndpoint, options: QnAMakerOptions, http_client: ClientSession, ): self._telemetry_client = telemetry_client self._endpoint = endpoint self.options = ( options if isinstance(options, QnAMakerOptions) else QnAMakerOptions() ) self._validate_options(self.options) self._http_client = http_client
Parameters: ----------- telemetry_client: Telemetry client. endpoint: QnA Maker endpoint details. options: QnA Maker options to configure the instance. http_client: HTTP client.
https://github.com/microsoft/botbuilder-python/blob/41211de2d7854e27aca8e3d1eccb24352be7e915/libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py#L36-L63
from copy import copy from typing import Any, List, Union import json import requests from aiohttp import ClientResponse, ClientSession from botbuilder.core import BotTelemetryClient, NullTelemetryClient, TurnContext from botbuilder.schema import Activity from .http_request_utils import HttpRequestUtils from ..qnamaker_endpoint import QnAMakerEndpoint from ..qnamaker_options import QnAMakerOptions from ..models import ( GenerateAnswerRequestBody, QnAMakerTraceInfo, QueryResult, QueryResults, ) QNAMAKER_TRACE_NAME = "QnAMaker" QNAMAKER_TRACE_LABEL = "QnAMaker Trace" QNAMAKER_TRACE_TYPE = "https://www.qnamaker.ai/schemas/trace" class GenerateAnswerUtils:
MIT License
h2r/pomdp-py
pomdp_problems/tag/models/observation_model.py
TagObservationModel.sample
python
def sample(self, next_state, action): if next_state.robot_position == next_state.target_position: return TagObservation(next_state.target_position) else: return TagObservation(None)
There is no stochaisticity in the observation model
https://github.com/h2r/pomdp-py/blob/5c1837c38676eb53442af238cbec4115f3b17f28/pomdp_problems/tag/models/observation_model.py#L25-L30
import pomdp_py from pomdp_problems.tag.domain.observation import * import pomdp_problems.tag.constants as constants class TagObservationModel(pomdp_py.ObservationModel): def probability(self, observation, next_state, action, **kwargs): if next_state.robot_position == next_state.target_position: if observation.target_position is None: return constants.EPSILON else: if observation.target_position == next_state.target_position: return 1.0 - constants.EPSILON else: return constants.EPSILON else: if observation.target_position is None: return 1.0 - constants.EPSILON else: return constants.EPSILON
MIT License
google/personfinder
app/user_agents.py
prefer_sjis_charset
python
def prefer_sjis_charset(request): user_agent = request.headers.get('User-Agent') return user_agent and SJIS_PREFERRED_USER_AGENT_RE.match(user_agent)
Returns True if Shift_JIS charset should be used for the user agent.
https://github.com/google/personfinder/blob/475f4c0ce916036d39bae2d480cde07126550875/app/user_agents.py#L42-L45
import re JP_TIER2_MOBILE_USER_AGENT_RE = re.compile( r'^(KDDI|DoCoMo|SoftBank|J-PHONE|Vodafone)') SJIS_PREFERRED_USER_AGENT_RE = re.compile(r'^KDDI') def is_jp_tier2_mobile_phone(request): user_agent = request.headers.get('User-Agent') return user_agent and JP_TIER2_MOBILE_USER_AGENT_RE.match(user_agent) def prefer_lite_ui(request): user_agent = request.META.get('HTTP_USER_AGENT', None) return user_agent and JP_TIER2_MOBILE_USER_AGENT_RE.match(user_agent)
Apache License 2.0
quay/quay
buildman/manager/executor.py
BuilderExecutor.minimum_retry_threshold
python
def minimum_retry_threshold(self): return self.executor_config.get("MINIMUM_RETRY_THRESHOLD", 0)
Returns the minimum number of retries required for this executor to be used or 0 if none.
https://github.com/quay/quay/blob/f50f37a393fa2273234f8ac0aa9f34a03a77a731/buildman/manager/executor.py#L156-L160
import datetime import hashlib import io import json import logging import os import socket import subprocess import threading import time import uuid from functools import partial, wraps, lru_cache import boto3 import botocore import cachetools.func import requests from jinja2 import FileSystemLoader, Environment from prometheus_client import Histogram import release from _init import ROOT_DIR, OVERRIDE_CONFIG_DIRECTORY from app import app from buildman.container_cloud_config import CloudConfigContext from buildman.server import SECURE_GRPC_SERVER_PORT logger = logging.getLogger(__name__) ONE_HOUR = 60 * 60 _TAG_RETRY_COUNT = 3 _TAG_RETRY_SLEEP = 2 ENV = Environment(loader=FileSystemLoader(os.path.join(ROOT_DIR, "buildman/templates"))) CloudConfigContext().populate_jinja_environment(ENV) TEMPLATE = ENV.get_template("cloudconfig.json") build_start_duration = Histogram( "quay_build_start_duration_seconds", "seconds taken for a executor to start executing a queued build", labelnames=["executor"], ) def observe(metric, *labels): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): trigger_time = time.time() rv = func(*args, **kwargs) metric.labels(*labels).observe(time.time() - trigger_time) return rv return wrapper return decorator def persist_for_debugging(func): @wraps(func) def wrapper(self, *args, **kwargs): if self.executor_config.get("DEBUG", False): logger.debug("Executor %s DEBUG set, not calling 'stop_builder()'", self.name) return return func(self, *args, **kwargs) return wrapper class ExecutorException(Exception): pass class BuilderExecutor(object): def __init__(self, executor_config, registry_hostname, manager_hostname): self.executor_config = executor_config self.registry_hostname = registry_hostname self.manager_hostname = manager_hostname @property def name(self): return self.executor_config.get("NAME") or self.__class__.__name__ @property def setup_time(self): return self.executor_config.get("SETUP_TIME") def start_builder(self, token, build_uuid): raise NotImplementedError def stop_builder(self, builder_id): raise NotImplementedError @property def running_builders_count(self): raise NotImplementedError def allowed_for_namespace(self, namespace): namespace_whitelist = self.executor_config.get("NAMESPACE_WHITELIST") if namespace_whitelist is not None and namespace in namespace_whitelist: return True staged_rollout = self.executor_config.get("STAGED_ROLLOUT") if staged_rollout is not None: bucket = int(hashlib.sha256(namespace.encode("utf-8")).hexdigest()[-2:], 16) return bucket < (256 * staged_rollout) return staged_rollout is None and namespace_whitelist is None @property
Apache License 2.0
staggerpkg/stagger
stagger/fileutil.py
replace_chunk
python
def replace_chunk(filename, offset, length, chunk, in_place=True, max_mem=5): with suppress_interrupt(): _replace_chunk(filename, offset, length, chunk, in_place, max_mem)
Replace length bytes of data with chunk, starting at offset. Any KeyboardInterrupts arriving while replace_chunk is runnning are deferred until the operation is complete. If in_place is true, the operation works directly on the original file; this is fast and works on files that are already open, but an error or interrupt may lead to corrupt file contents. If in_place is false, the function prepares a copy first, then renames it back over the original file. This method is slower, but it prevents corruption on systems with atomic renames (UNIX), and reduces the window of vulnerability elsewhere (Windows). If there is no need to move data that is not being replaced, then we use the direct method irrespective of in_place. (In this case an interrupt may only corrupt the chunk being replaced.)
https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/fileutil.py#L83-L102
import io import os.path import shutil import tempfile import signal from contextlib import contextmanager def xread(file, length): data = file.read(length) if len(data) != length: raise EOFError return data @contextmanager def opened(filename, mode): if isinstance(filename, str): file = open(filename, mode) try: yield file finally: if not file.closed: file.close() else: yield filename @contextmanager def suppress_interrupt(): interrupted = False def sigint_handler(signum, frame): nonlocal interrupted interrupted = True s = signal.signal(signal.SIGINT, sigint_handler) try: yield None finally: signal.signal(signal.SIGINT, s) if interrupted: raise KeyboardInterrupt()
BSD 2-Clause Simplified License
veekun/pokedex
scripts/add-oras-locations.py
make_identifier
python
def make_identifier(name): if isinstance(name, bytes): identifier = name.decode('utf-8') else: identifier = name identifier = identifier.lower() identifier = identifier.replace(u'+', u' plus ') identifier = re.sub(u'[ _–]+', u'-', identifier) identifier = re.sub(u"['./;’(),:]", u'', identifier) identifier = identifier.replace(u'é', u'e') if identifier == '???': identifier = 'inside-of-truck' if not identifier.replace(u"-", u"").isalnum(): raise ValueError(identifier) return identifier
Make a string safe to use as an identifier. Valid characters are lowercase alphanumerics and "-". This function may raise ValueError if it can't come up with a suitable identifier. This function is useful for scripts which add things with names.
https://github.com/veekun/pokedex/blob/cc483e1877f22b8c19ac27ec0ff5fafd09c5cd5b/scripts/add-oras-locations.py#L11-L34
import os import re import io
MIT License
mrmitch/realdebrid-cli
rdcli/RDWorker.py
RDWorker.unrestrict
python
def unrestrict(self, link, password=''): opener = build_opener(HTTPCookieProcessor(self.cookies)) response = opener.open(self._endpoint % 'unrestrict.php?%s' % urlencode({'link': link, 'password': password})) resp = load(response) opener.close() if resp['error'] == 0: info = resp['generated_links'][0] return info[2], info[0].replace('/', '_') else: raise UnrestrictionError(resp['message'].encode('utf-8'), resp['error'])
Unrestrict a download URL. Returns tuple of the unrestricted URL and the filename. :param link: url to unrestrict :param password: password to use for the unrestriction :return: :raise:
https://github.com/mrmitch/realdebrid-cli/blob/da84d28a64c213d7c16d5d014ce119cb12c87271/rdcli/RDWorker.py#L97-L113
from cookielib import MozillaCookieJar from json import load from urllib import urlencode from urllib2 import build_opener, HTTPCookieProcessor from os import path class RDError(Exception): DEFAULT_CODE = -100 def __init__(self, message, code=DEFAULT_CODE): self.message = message self.code = code def __str__(self): return '[Error %i] %s' % (self.code, self.message) class UnrestrictionError(RDError): DEDICATED_SERVER = 3 UNSUPPORTED = 4 UPGRADE_NEEDED = 2 NO_SERVER = 9 UNAVAILABLE = 11 @classmethod def fixable_errors(cls): return cls.UPGRADE_NEEDED, cls.NO_SERVER, cls.DEDICATED_SERVER class LoginError(RDError): MISSING_INFO = -1 BAD_CREDENTIALS = 1 TOO_MANY_ATTEMPTS = 3 class RDWorker: _endpoint = 'http://www.real-debrid.com/ajax/%s' def __init__(self, cookie_file): self._cookie_file = cookie_file self.cookies = MozillaCookieJar(self._cookie_file) def login(self, username, password_hash): if path.isfile(self._cookie_file): self.cookies.load(self._cookie_file) for cookie in self.cookies: if cookie.name == 'auth' and not cookie.is_expired(): return opener = build_opener(HTTPCookieProcessor(self.cookies)) try: response = opener.open(self._endpoint % 'login.php?%s' % urlencode({'user': username, 'pass': password_hash})) resp = load(response) opener.close() if resp['error'] == 0: self.cookies.save(self._cookie_file) else: raise LoginError(resp['message'].encode('utf-8'), resp['error']) except Exception as e: raise Exception('Login failed: %s' % str(e))
MIT License
packtpublishing/python-object-oriented-programming-cookbook
Chapter02/C02R06_PassingStateinConstruction.py
Person.__str__
python
def __str__(self): return ( '<%s [%s] given_name=%s family_name=%s>' % ( self.__class__.__name__, hex(id(self)), self.given_name, self.family_name, ) )
Returns a string representation of the object
https://github.com/packtpublishing/python-object-oriented-programming-cookbook/blob/4840b0ee9e155c8ed664886c0aad20d44d48dac2/Chapter02/C02R06_PassingStateinConstruction.py#L21-L29
class Person: def __init__(self, given_name:str, family_name:str, birth_date:(str,None)=None, email_address:(str,None)=None ): self.given_name = given_name self.family_name = family_name self.birth_date = birth_date self.email_address = email_address
MIT License
hubblestack/hubble
tests/support/mock.py
MockFH.writelines_calls
python
def writelines_calls(self): return [x[1][0] for x in self.writelines.mock_calls]
Return a list of all calls to the .writelines() mock
https://github.com/hubblestack/hubble/blob/804eb29321f16ae36ce1e4a53f5c3e7bbcda1d0a/tests/support/mock.py#L155-L159
import collections import copy import errno import fnmatch import sys import hubblestack.utils.stringutils try: from unittest.mock import ( Mock, MagicMock, patch, sentinel, DEFAULT, create_autospec, FILTER_DIR, NonCallableMock, NonCallableMagicMock, PropertyMock, __version__ ) NO_MOCK = False NO_MOCK_REASON = '' mock_version = [] for __part in __version__.split('.'): try: mock_version.append(int(__part)) except ValueError: mock_version.append(__part) mock_version = tuple(mock_version) except ImportError as exc: NO_MOCK = True NO_MOCK_REASON = 'mock python module is unavailable' mock_version = (0, 0, 0) class MagicMock(object): __name__ = str('{0}.fakemock').format(__name__) def __init__(self, *args, **kwargs): pass def dict(self, *args, **kwargs): return self def multiple(self, *args, **kwargs): return self def __call__(self, *args, **kwargs): return self from unittest.mock import patch Mock = MagicMock patch = patch sentinel = object() DEFAULT = object() create_autospec = MagicMock() FILTER_DIR = True NonCallableMock = MagicMock() NonCallableMagicMock = MagicMock() mock_open = object() PropertyMock = object() call = tuple ANY = object() if NO_MOCK is False: try: from unittest.mock import call, ANY except ImportError: NO_MOCK = True NO_MOCK_REASON = 'you need to upgrade your mock version to >= 0.8.0' class MockFH(object): def __init__(self, filename, read_data, *args, **kwargs): self.filename = filename self.read_data = read_data try: self.mode = args[0] except IndexError: self.mode = kwargs.get('mode', 'r') self.binary_mode = 'b' in self.mode self.read_mode = any(x in self.mode for x in ('r', '+')) self.write_mode = any(x in self.mode for x in ('w', 'a', '+')) self.empty_string = b'' if self.binary_mode else '' self.call = MockCall(filename, *args, **kwargs) self.read_data_iter = self._iterate_read_data(read_data) self.read = Mock(side_effect=self._read) self.readlines = Mock(side_effect=self._readlines) self.readline = Mock(side_effect=self._readline) self.write = Mock(side_effect=self._write) self.writelines = Mock(side_effect=self._writelines) self.close = Mock() self.seek = Mock() self.__loc = 0 self.__read_data_ok = False def _iterate_read_data(self, read_data): newline = b'\n' if isinstance(read_data, bytes) else '\n' read_data = [line + newline for line in read_data.split(newline)] if read_data[-1] == newline: read_data = read_data[:-1] else: read_data[-1] = read_data[-1][:-1] for line in read_data: yield line @property def write_calls(self): return [x[1][0] for x in self.write.mock_calls] @property
Apache License 2.0
takos22/codingame
codingame/client/base.py
BaseClient.get_codingamer
python
def get_codingamer( self, codingamer: typing.Union[str, int] ) -> "CodinGamer":
|maybe_coro| Get a :class:`~codingame.CodinGamer` from their public handle, their ID or from their pseudo. .. note:: ``codingamer`` can be the public handle, the ID or the pseudo. Using the public handle or the ID is reccomended because it won't change even if the codingamer changes their pseudo. The public handle is a 39 character long hexadecimal string that is unique to the CodinGamer and identifies them. Regex of a public handle: ``[0-9a-f]{32}[0-9]{7}`` The ID is a 7 number long integer. Parameters ----------- codingamer: :class:`str` or :class:`int` The CodinGamer's public handle, ID or pseudo. Raises ------ :exc:`~codingame.CodinGamerNotFound` The CodinGamer with the given public handle, ID or pseudo isn't found. Returns -------- :class:`~codingame.CodinGamer` The requested CodinGamer. .. versionadded:: 0.1 .. versionchanged:: 0.2 Renamed ``Client.codingamer()`` to :meth:`~codingame.Client.get_codingamer`. .. versionchanged:: 0.3.3 Add searching with CodinGamer pseudo. .. versionchanged:: 0.3.5 Add searching with CodinGamer ID.
https://github.com/takos22/codingame/blob/0332c294783a8be1a0c044615ed75d60237e499c/codingame/client/base.py#L123-L169
import typing from abc import ABC, abstractmethod from ..state import ConnectionState if typing.TYPE_CHECKING: from ..clash_of_code import ClashOfCode from ..codingamer import CodinGamer from ..leaderboard import ( ChallengeLeaderboard, GlobalLeaderboard, PuzzleLeaderboard, ) from ..notification import Notification __all__ = ("BaseClient",) class BaseClient(ABC): def __init_subclass__(cls, doc_prefix: str = "", **kwargs): super().__init_subclass__(**kwargs) doc_prefix = doc_prefix.strip() + "\n\n" * (len(doc_prefix) > 0) prefix = "|maybe_coro|\n\n" for name, method in cls.__dict__.items(): if not callable(method): continue if name.startswith("__"): continue if method.__doc__ is None: method.__doc__ = getattr(cls.__base__, name).__doc__ method.__doc__ = doc_prefix + ( method.__doc__[len(prefix) :] if method.__doc__.startswith(prefix) else method.__doc__ ) def __init__(self, is_async: bool = False): self._state = ConnectionState(is_async) def __enter__(self): if self.is_async: raise TypeError( "Asynchronous client must be used in an asynchronous " "context manager (async with) not in a synchronous one (with)." ) return self def __exit__(self, *_): self.close() async def __aenter__(self): if not self.is_async: raise TypeError( "Synchronous client must be used in a synchronous context" "manager (with) not in an asynchronous one (async with)." ) return self async def __aexit__(self, *_): await self.close() def close(self): self._state.http.close() @property def is_async(self) -> bool: return self._state.is_async @property def logged_in(self) -> bool: return self._state.logged_in @property def codingamer(self) -> typing.Optional["CodinGamer"]: return self._state.codingamer @abstractmethod def login(self, email: str, password: str) -> "CodinGamer": @abstractmethod
MIT License
googleapis/python-tasks
google/cloud/tasks_v2beta3/services/cloud_tasks/transports/grpc_asyncio.py
CloudTasksGrpcAsyncIOTransport.set_iam_policy
python
def set_iam_policy( self, ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"]
r"""Return a callable for the set iam policy method over gRPC. Sets the access control policy for a [Queue][google.cloud.tasks.v2beta3.Queue]. Replaces any existing policy. Note: The Cloud Console does not check queue-level IAM permissions yet. Project-level permissions are required to use the Cloud Console. Authorization requires the following `Google IAM <https://cloud.google.com/iam>`__ permission on the specified resource parent: - ``cloudtasks.queues.setIamPolicy`` Returns: Callable[[~.SetIamPolicyRequest], Awaitable[~.Policy]]: A function that, when called, will call the underlying RPC on the server.
https://github.com/googleapis/python-tasks/blob/8bebdfc7b6520a2485e618ff7dee665c211c041a/google/cloud/tasks_v2beta3/services/cloud_tasks/transports/grpc_asyncio.py#L545-L580
import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import packaging.version import grpc from grpc.experimental import aio from google.cloud.tasks_v2beta3.types import cloudtasks from google.cloud.tasks_v2beta3.types import queue from google.cloud.tasks_v2beta3.types import queue as gct_queue from google.cloud.tasks_v2beta3.types import task from google.cloud.tasks_v2beta3.types import task as gct_task from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.protobuf import empty_pb2 from .base import CloudTasksTransport, DEFAULT_CLIENT_INFO from .grpc import CloudTasksGrpcTransport class CloudTasksGrpcAsyncIOTransport(CloudTasksTransport): _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "cloudtasks.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "cloudtasks.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: return self._grpc_channel @property def list_queues( self, ) -> Callable[ [cloudtasks.ListQueuesRequest], Awaitable[cloudtasks.ListQueuesResponse] ]: if "list_queues" not in self._stubs: self._stubs["list_queues"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/ListQueues", request_serializer=cloudtasks.ListQueuesRequest.serialize, response_deserializer=cloudtasks.ListQueuesResponse.deserialize, ) return self._stubs["list_queues"] @property def get_queue( self, ) -> Callable[[cloudtasks.GetQueueRequest], Awaitable[queue.Queue]]: if "get_queue" not in self._stubs: self._stubs["get_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/GetQueue", request_serializer=cloudtasks.GetQueueRequest.serialize, response_deserializer=queue.Queue.deserialize, ) return self._stubs["get_queue"] @property def create_queue( self, ) -> Callable[[cloudtasks.CreateQueueRequest], Awaitable[gct_queue.Queue]]: if "create_queue" not in self._stubs: self._stubs["create_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/CreateQueue", request_serializer=cloudtasks.CreateQueueRequest.serialize, response_deserializer=gct_queue.Queue.deserialize, ) return self._stubs["create_queue"] @property def update_queue( self, ) -> Callable[[cloudtasks.UpdateQueueRequest], Awaitable[gct_queue.Queue]]: if "update_queue" not in self._stubs: self._stubs["update_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/UpdateQueue", request_serializer=cloudtasks.UpdateQueueRequest.serialize, response_deserializer=gct_queue.Queue.deserialize, ) return self._stubs["update_queue"] @property def delete_queue( self, ) -> Callable[[cloudtasks.DeleteQueueRequest], Awaitable[empty_pb2.Empty]]: if "delete_queue" not in self._stubs: self._stubs["delete_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/DeleteQueue", request_serializer=cloudtasks.DeleteQueueRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_queue"] @property def purge_queue( self, ) -> Callable[[cloudtasks.PurgeQueueRequest], Awaitable[queue.Queue]]: if "purge_queue" not in self._stubs: self._stubs["purge_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/PurgeQueue", request_serializer=cloudtasks.PurgeQueueRequest.serialize, response_deserializer=queue.Queue.deserialize, ) return self._stubs["purge_queue"] @property def pause_queue( self, ) -> Callable[[cloudtasks.PauseQueueRequest], Awaitable[queue.Queue]]: if "pause_queue" not in self._stubs: self._stubs["pause_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/PauseQueue", request_serializer=cloudtasks.PauseQueueRequest.serialize, response_deserializer=queue.Queue.deserialize, ) return self._stubs["pause_queue"] @property def resume_queue( self, ) -> Callable[[cloudtasks.ResumeQueueRequest], Awaitable[queue.Queue]]: if "resume_queue" not in self._stubs: self._stubs["resume_queue"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/ResumeQueue", request_serializer=cloudtasks.ResumeQueueRequest.serialize, response_deserializer=queue.Queue.deserialize, ) return self._stubs["resume_queue"] @property def get_iam_policy( self, ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.cloud.tasks.v2beta3.CloudTasks/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property
Apache License 2.0
cloudant/python-cloudant
src/cloudant/feed.py
Feed.__iter__
python
def __iter__(self): return self
Makes this object an iterator.
https://github.com/cloudant/python-cloudant/blob/3bb26d75fa255802a5f308bbf9cff1ba3b34439b/src/cloudant/feed.py#L137-L141
import json from ._2to3 import iteritems_, next_, unicode_, STRTYPE, NONETYPE from .error import CloudantArgumentError, CloudantFeedException from ._common_util import ANY_ARG, ANY_TYPE, feed_arg_types, TYPE_CONVERTERS class Feed(object): def __init__(self, source, raw_data=False, **options): self._r_session = source.r_session self._raw_data = raw_data self._options = options self._source = source.__class__.__name__ if self._source == 'CouchDB': self._url = '/'.join([source.server_url, '_db_updates']) self._options['feed'] = self._options.get('feed', 'longpoll') self._options['heartbeat'] = self._options.get('heartbeat', True) elif self._source == 'Cloudant': self._url = '/'.join([source.server_url, '_db_updates']) else: self._url = '/'.join([source.database_url, '_changes']) self._chunk_size = self._options.pop('chunk_size', 512) self._resp = None self._lines = None self._last_seq = None self._stop = False @property def last_seq(self): return self._last_seq def stop(self): self._stop = True def _start(self): params = self._translate(self._options) self._resp = self._r_session.get(self._url, params=params, stream=True) self._resp.raise_for_status() self._lines = self._resp.iter_lines(self._chunk_size) def _translate(self, options): translation = dict() for key, val in iteritems_(options): self._validate(key, val, feed_arg_types(self._source)) try: if isinstance(val, STRTYPE): translation[key] = val elif not isinstance(val, NONETYPE): arg_converter = TYPE_CONVERTERS.get(type(val), json.dumps) translation[key] = arg_converter(val) except Exception as ex: raise CloudantArgumentError(115, key, ex) return translation def _validate(self, key, val, arg_types): if key in arg_types: arg_type = arg_types[key] else: if ANY_ARG not in arg_types: raise CloudantArgumentError(116, key) arg_type = arg_types[ANY_ARG] if arg_type == ANY_TYPE: return if (not isinstance(val, arg_type) or (isinstance(val, bool) and int in arg_type)): raise CloudantArgumentError(117, key, arg_type) if isinstance(val, int) and val < 0 and not isinstance(val, bool): raise CloudantArgumentError(118, key, val) if key == 'feed': valid_vals = ('continuous', 'normal', 'longpoll') if self._source == 'CouchDB': valid_vals = ('continuous', 'longpoll') if val not in valid_vals: raise CloudantArgumentError(119, val, valid_vals) if key == 'style' and val not in ('main_only', 'all_docs'): raise CloudantArgumentError(120, val)
Apache License 2.0
pimoroni/unicornhatmini-python
examples/simon.py
Game.start
python
def start(self): self._lives = self._starting_lives self._level = self._starting_level self._compare = [] self._sequence = [random.choice([R, G, B, Y])] * (self._level + 1) self._current_playback_step = 0 self._set_mode('play_pattern')
Start the game. Sets the level to the starting level and builds a long-enough sequence to begin.
https://github.com/pimoroni/unicornhatmini-python/blob/ec52b016b7449b754eb30f0b7731f0ce70d18084/examples/simon.py#L276-L287
import time import math import random import colorsys from gpiozero import Button from unicornhatmini import UnicornHATMini unicornhatmini = UnicornHATMini() unicornhatmini.set_brightness(0.5) digits_5x3 = [ 0b111111000111111, 0b100011111100001, 0b101111010111101, 0b101011010111111, 0b111000010011111, 0b111011010110111, 0b111111010100111, 0b100001000011111, 0b111111010111111, 0b111001010011111 ] R = 0 G = 1 B = 2 Y = 3 class Display(): def __init__(self, output_device): self._output = output_device self._width, self._height = self._output.get_shape() self._br_red = 0 self._br_green = 0 self._br_blue = 0 self._br_yellow = 0 self._level = 0 self.red = (255, 0, 0) self.green = (0, 255, 0) self.blue = (0, 0, 255) self.yellow = (255, 255, 0) self._digit_left = None self._digit_right = None self._digit_left_br = 1.0 self._digit_right_br = 1.0 self._digit_left_color = (128, 128, 128) self._digit_right_color = (128, 128, 128) def _draw_light(self, brightness, x, y, r, g, b): r, g, b = [int(c * brightness) for c in (r, g, b)] self._draw_rect(x, y, 3, 3, r, g, b) def _draw_rect(self, x, y, w, h, r, g, b): for ry in range(h): for rx in range(w): self._output.set_pixel(x + rx, y + ry, r, g, b) def _draw_digit(self, digit, x, y, r, g, b): digit = digits_5x3[digit] cols = [ (digit >> 10) & 0b11111, (digit >> 5) & 0b11111, (digit) & 0b11111 ] for dx in range(3): col = cols[dx] for dy in range(5): if col & (1 << (4 - dy)): self._output.set_pixel(x + dx, y + dy, r, g, b) def clear(self): self._output.clear() def update(self): self._draw_light(self._br_red, 0, 0, *self.red) self._draw_light(self._br_blue, 0, self._height - 3, *self.green) self._draw_light(self._br_green, self._width - 3, 0, *self.blue) self._draw_light(self._br_yellow, self._width - 3, self._height - 3, *self.yellow) if self._digit_left is not None: r, g, b = [int(c * self._digit_left_br) for c in self._digit_left_color] self._draw_digit(self._digit_left, 5, 1, r, g, b) if self._digit_right is not None: r, g, b = [int(c * self._digit_right_br) for c in self._digit_right_color] self._draw_digit(self._digit_right, 9, 1, r, g, b) self._output.show() def set_light_brightness(self, red, green, blue, yellow): self._br_red = red self._br_green = green self._br_blue = blue self._br_yellow = yellow def set_digits(self, left, right): self._digit_left = left self._digit_right = right def set_digit_brightness(self, left, right): self._digit_left_br = left self._digit_right_br = right def set_digit_color(self, left, right): self._digit_left_color = left self._digit_right_color = right class Game(): def __init__(self, display, starting_lives=3, starting_level=0, mode='attract'): self._starting_lives = starting_lives self._starting_level = starting_level self._mode = mode self._display = display self._level = 0 self._lives = 0 self._sequence = [] self._compare = [] self._current_playback_step = 0 self._current_mode_start = 0 self._button_map = {'a': R, 'b': B, 'x': G, 'y': Y} def update(self): self._display.clear() getattr(self, "_{}".format(self._mode))(time.time()) self._display.update() def _set_mode(self, mode): self._mode = mode self._current_mode_start = time.time() def _attract(self, time): self._display.set_digits(5, 1) self._display.set_light_brightness( self._pulse(time / 2), self._pulse((time + 0.25) / 2), self._pulse((time + 0.5) / 2), self._pulse((time + 0.75) / 2) ) self._display.set_digit_brightness( self._pulse(time), self._pulse(time) ) self._display.set_digit_color( self._hue(time / 10), self._hue(time / 10 + 1) ) def _play_pattern(self, time): self._display_level((255, 0, 0)) br = [0, 0, 0, 0] color = self._sequence[self._current_playback_step] br[color] = self._pulse(time - self._current_mode_start) self._display.set_light_brightness(*br) if time - self._current_mode_start > (self._current_playback_step + 1): self._current_playback_step += 1 if self._current_playback_step >= len(self._sequence): self._current_playback_step = 0 self._set_mode('wait_for_input') def _flash_lives(self, time): fake_lives = self._lives if time - self._current_mode_start < 1.5: fake_lives += 1 self._display.set_digits(int(fake_lives / 10), fake_lives % 10) self._display.set_digit_brightness( self._pulse(time), self._pulse(time) ) self._display.set_digit_color((255, 0, 0), (255, 0, 0)) if time - self._current_mode_start > 3.0: if self._lives > 0: self._set_mode('play_pattern') else: self._set_mode('you_lose') def _you_win(self, time): self._display_level() self._display.set_light_brightness( self._pulse(time), self._pulse(time), self._pulse(time), self._pulse(time) ) def _you_lose(self, time): self._display.set_digits(0, 0) self._display.set_digit_brightness( self._pulse(time), self._pulse(time) ) self._display.set_digit_color((255, 0, 0), (255, 0, 0)) if time - self._current_mode_start > 20.0: self._set_mode('attract') def _wait_for_input(self, time): self._display_level() if self._compare == self._sequence[:len(self._compare)]: if len(self._compare) == len(self._sequence): self.next_level() else: self._compare = [] self._lives -= 1 self._set_mode('flash_lives') def _display_level(self, color=(255, 255, 255)): self._display.set_digit_brightness(0.5, 0.5) self._display.set_digit_color(color, color) self._display.set_digits( int(self._level / 10.0), self._level % 10 ) def _pulse(self, time): return (math.sin(time * 2 * math.pi - (math.pi / 2)) + 1) / 2.0 def _hue(self, h): return tuple([int(c * 255) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)])
MIT License
distributedsystemsgroup/zoe
zoe_lib/state/execution.py
Execution.set_error
python
def set_error(self): self._status = self.ERROR_STATUS self.time_end = datetime.datetime.utcnow() self.sql_manager.executions.update(self.id, status=self._status, time_end=self.time_end)
The scheduler encountered an error starting or running the execution.
https://github.com/distributedsystemsgroup/zoe/blob/c8e0c908af1954a8b41d0f6de23d08589564f0ab/zoe_lib/state/execution.py#L170-L174
import datetime import logging import functools import psycopg2 try: from kazoo.client import KazooClient except ImportError: KazooClient = None from zoe_lib.state.base import BaseRecord, BaseTable import zoe_lib.config log = logging.getLogger(__name__) class Execution(BaseRecord): SUBMIT_STATUS = "submitted" QUEUED_STATUS = "queued" STARTING_STATUS = "starting" ERROR_STATUS = "error" RUNNING_STATUS = "running" CLEANING_UP_STATUS = "cleaning up" TERMINATED_STATUS = "terminated" def __init__(self, d, sql_manager): super().__init__(d, sql_manager) self.user_id = d['user_id'] self.name = d['name'] self.description = d['description'] if isinstance(d['time_submit'], datetime.datetime): self.time_submit = d['time_submit'] else: self.time_submit = datetime.datetime.utcfromtimestamp(d['time_submit']) if isinstance(d['time_submit'], datetime.datetime): self.time_start = d['time_start'] else: self.time_start = datetime.datetime.utcfromtimestamp(d['time_start']) if isinstance(d['time_submit'], datetime.datetime): self.time_end = d['time_end'] else: self.time_submit = datetime.datetime.utcfromtimestamp(d['time_start']) self._status = d['status'] self.error_message = d['error_message'] if d['size'] is not None: self.size = float(d['size']) else: try: self.size = self.description['size'] except KeyError: self.size = self.description['priority'] self.app_name = self.description['name'] def serialize(self): return { 'id': self.id, 'user_id': self.user_id, 'name': self.name, 'description': self.description, 'time_submit': (self.time_submit - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1), 'time_start': None if self.time_start is None else (self.time_start - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1), 'time_end': None if self.time_end is None else (self.time_end - datetime.datetime(1970, 1, 1)) / datetime.timedelta(seconds=1), 'status': self._status, 'error_message': self.error_message, 'services': [s.id for s in self.services], 'size': self.size } def __eq__(self, other): return self.id == other.id def set_queued(self): self._status = self.QUEUED_STATUS self.sql_manager.executions.update(self.id, status=self._status) def set_starting(self): self._status = self.STARTING_STATUS self.sql_manager.executions.update(self.id, status=self._status) def set_running(self): self._status = self.RUNNING_STATUS self.time_start = datetime.datetime.utcnow() self.sql_manager.executions.update(self.id, status=self._status, time_start=self.time_start) if zoe_lib.config.get_conf().traefik_zk_ips is not None: zk_cli = KazooClient(hosts=zoe_lib.config.get_conf().traefik_zk_ips) zk_cli.start() for service in self.services: for port in service.ports: if port.enable_proxy: format_args = { "ip_port": port.external_ip + ":" + str(port.external_port), "proxy_path": '{}/{}'.format(zoe_lib.config.get_conf().traefik_base_url, port.proxy_key()) } endpoint = port.url_template.format(**format_args).encode('utf-8') traefik_name = 'zoe_exec_{}_{}'.format(self.id, port.id) zk_cli.create('/traefik/backends/{}/servers/server/url'.format(traefik_name), endpoint, makepath=True) zk_cli.create('/traefik/frontends/{}/routes/path/rule'.format(traefik_name), 'PathPrefix:{}/{}'.format(zoe_lib.config.get_conf().traefik_base_url, port.proxy_key()).encode('utf-8'), makepath=True) zk_cli.create('/traefik/frontends/{}/backend'.format(traefik_name), traefik_name.encode('utf-8'), makepath=True) zk_cli.create('/traefik/alias') zk_cli.delete('/traefik/alias') zk_cli.stop() def set_cleaning_up(self): self._status = self.CLEANING_UP_STATUS self.sql_manager.executions.update(self.id, status=self._status) if zoe_lib.config.get_conf().traefik_zk_ips is not None: zk_cli = KazooClient(hosts=zoe_lib.config.get_conf().traefik_zk_ips) zk_cli.start() for service in self.services: for port in service.ports: if port.enable_proxy: traefik_name = 'zoe_exec_{}_{}'.format(self.id, port.id) zk_cli.delete('/traefik/backends/{}'.format(traefik_name), recursive=True) zk_cli.delete('/traefik/frontends/{}'.format(traefik_name), recursive=True) zk_cli.create('/traefik/alias') zk_cli.delete('/traefik/alias') zk_cli.stop() def set_terminated(self, reason=None): self._status = self.TERMINATED_STATUS self.time_end = datetime.datetime.utcnow() if reason is not None: self.sql_manager.executions.update(self.id, status=self._status, time_end=self.time_end, error_message=reason) else: self.sql_manager.executions.update(self.id, status=self._status, time_end=self.time_end)
Apache License 2.0
peercoin/peercoin_rpc
peercoin_rpc/peercoin_rpc.py
Client.getblockchaininfo
python
def getblockchaininfo(self): return self.req("getblockchaininfo")
return getblockchaininfo from peercoind
https://github.com/peercoin/peercoin_rpc/blob/d79b66523ecbc79eea4d9651f1dfe81da4112632/peercoin_rpc/peercoin_rpc.py#L118-L120
__copyright__ = "Copyright 2019, The Peerchemist" __license__ = "MIT" __email__ = "[email protected]" import requests import json import os class Client: def __init__( self, testnet=False, username=None, password=None, ip=None, port=None, directory=None, ): if not ip: self.ip = "localhost" else: self.ip = ip if not username and not password: if not directory: try: self.username, self.password = ( self.userpass() ) except: self.username, self.password = self.userpass( dir="peercoin" ) else: self.username, self.password = self.userpass( dir=directory ) else: self.username = username self.password = password if testnet is True: self.testnet = True self.port = 9904 self.url = "http://{0}:{1}".format(self.ip, self.port) else: self.testnet = False self.port = 9902 self.url = "http://{0}:{1}".format(self.ip, self.port) if port is not None: self.port = port self.url = "http://{0}:{1}".format(self.ip, self.port) self.session = requests.Session() self.session.auth = (self.username, self.password) self.session.headers.update({"content-type": "application/json"}) def userpass(self, dir="ppcoin"): source = os.path.expanduser("~/.{0}/{0}.conf").format(dir) dest = open(source, "r") with dest as conf: for line in conf: if line.startswith("rpcuser"): username = line.split("=")[1].strip() if line.startswith("rpcpassword"): password = line.split("=")[1].strip() return username, password def req(self, method, params=()): response = self.session.post( self.url, data=json.dumps({"method": method, "params": params, "jsonrpc": "1.1"}), ).json() if response["error"] is not None: return response["error"] else: return response["result"] def batch(self, reqs): batch_data = [] for req_id, req in enumerate(reqs): batch_data.append( {"method": req[0], "params": req[1], "jsonrpc": "2.0", "id": req_id} ) data = json.dumps(batch_data) response = self.session.post(self.url, data=data).json() return response
MIT License