repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
schmittx/home-assistant-eero
custom_components/eero/switch.py
EeroSwitch.turn_on
python
def turn_on(self, **kwargs): setattr(self.resource, self.variable, True)
Turn the device on.
https://github.com/schmittx/home-assistant-eero/blob/3d6960ed239c63fe07d7b2a4da38bf70ea8f9eb0/custom_components/eero/switch.py#L163-L165
import logging from homeassistant.components.switch import DEVICE_CLASS_SWITCH, SwitchEntity from . import EeroEntity from .const import ( CONF_CLIENTS, CONF_EEROS, CONF_NETWORKS, CONF_PROFILES, DATA_COORDINATOR, DOMAIN as EERO_DOMAIN, ) _LOGGER = logging.getLogger(__name__) BASIC_TYPES = { "band_steering": [ "Band Steering", ], "guest_network_enabled": [ "Guest Network", ], "led_on": [ "LED Status Light", ], "paused": [ "Paused", ], "sqm": [ "Smart Queue Management", ], "upnp": [ "UPnP", ], "wpa3": [ "WPA3", ], } PREMIUM_TYPES = { "ad_block": [ "Ad Blocking", ], "block_gaming_content": [ "Gaming Content Filter", ], "block_illegal_content": [ "Illegal or Criminal Content Filter", ], "block_malware": [ "Advanced Security", ], "block_messaging_content": [ "Chat and Messaging Content Filter", ], "block_pornographic_content": [ "Adult Content Filter", ], "block_shopping_content": [ "Shopping Content Filter", ], "block_social_content": [ "Social Media Content Filter", ], "block_streaming_content": [ "Streaming Content Filter", ], "block_violent_content": [ "Violent Content Filter", ], "safe_search_enabled": [ "SafeSearch Content Filter", ], "youtube_restricted": [ "YouTube Restricted Content Filter", ], } SWITCH_TYPES = {**BASIC_TYPES, **PREMIUM_TYPES} async def async_setup_entry(hass, entry, async_add_entities): entry = hass.data[EERO_DOMAIN][entry.entry_id] conf_networks = entry[CONF_NETWORKS] conf_eeros = entry[CONF_EEROS] conf_profiles = entry[CONF_PROFILES] conf_clients = entry[CONF_CLIENTS] coordinator = entry[DATA_COORDINATOR] def get_entities(): entities = [] for network in coordinator.data.networks: if network.id in conf_networks: for variable in SWITCH_TYPES: if variable in PREMIUM_TYPES and not network.premium_status_active: continue elif hasattr(network, variable): entities.append(EeroSwitch(coordinator, network, None, variable)) for eero in network.eeros: if eero.id in conf_eeros: for variable in SWITCH_TYPES: if variable in PREMIUM_TYPES and not network.premium_status_active: continue elif hasattr(eero, variable): entities.append(EeroSwitch(coordinator, network, eero, variable)) for profile in network.profiles: if profile.id in conf_profiles: for variable in SWITCH_TYPES: if variable in PREMIUM_TYPES and not network.premium_status_active: continue elif hasattr(profile, variable): entities.append(EeroSwitch(coordinator, network, profile, variable)) for client in network.clients: if client.id in conf_clients: for variable in SWITCH_TYPES: if variable in PREMIUM_TYPES and not network.premium_status_active: continue elif hasattr(client, variable): entities.append(EeroSwitch(coordinator, network, client, variable)) return entities async_add_entities(await hass.async_add_job(get_entities), True) class EeroSwitch(SwitchEntity, EeroEntity): @property def name(self): if self.resource.is_client: return f"{self.network.name} {self.resource.name_connection_type} {SWITCH_TYPES[self.variable][0]}" elif self.resource.is_eero or self.resource.is_profile: return f"{self.network.name} {self.resource.name} {SWITCH_TYPES[self.variable][0]}" return f"{self.resource.name} {SWITCH_TYPES[self.variable][0]}" @property def device_class(self): return DEVICE_CLASS_SWITCH @property def is_on(self): return bool(getattr(self.resource, self.variable)) @property def device_state_attributes(self): attrs = super().device_state_attributes if self.variable == "guest_network_enabled" and self.is_on: attrs["guest_network_name"] = self.resource.guest_network_name attrs["connected_guest_clients"] = self.resource.connected_guest_clients_count return attrs
MIT License
ciscodevnet/webexteamssdk
webexteamssdk/api/team_memberships.py
TeamMembershipsAPI.get
python
def get(self, membershipId): check_type(membershipId, basestring) json_data = self._session.get(API_ENDPOINT + '/' + membershipId) return self._object_factory(OBJECT_TYPE, json_data)
Get details for a team membership, by ID. Args: membershipId(basestring): The team membership ID. Returns: TeamMembership: A TeamMembership object with the details of the requested team membership. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
https://github.com/ciscodevnet/webexteamssdk/blob/673312779b8e05cf0535bea8b96599015cccbff1/webexteamssdk/api/team_memberships.py#L165-L186
from __future__ import ( absolute_import, division, print_function, unicode_literals, ) from builtins import * from past.builtins import basestring from ..generator_containers import generator_container from ..restsession import RestSession from ..utils import ( check_type, dict_from_items_with_values, ) API_ENDPOINT = 'team/memberships' OBJECT_TYPE = 'team_membership' class TeamMembershipsAPI(object): def __init__(self, session, object_factory): check_type(session, RestSession) super(TeamMembershipsAPI, self).__init__() self._session = session self._object_factory = object_factory @generator_container def list(self, teamId, max=100, **request_parameters): check_type(teamId, basestring) check_type(max, int, optional=True) params = dict_from_items_with_values( request_parameters, teamId=teamId, max=max, ) items = self._session.get_items(API_ENDPOINT, params=params) for item in items: yield self._object_factory(OBJECT_TYPE, item) def create(self, teamId, personId=None, personEmail=None, isModerator=False, **request_parameters): check_type(teamId, basestring) check_type(personId, basestring, optional=True) check_type(personEmail, basestring, optional=True) check_type(isModerator, bool, optional=True) post_data = dict_from_items_with_values( request_parameters, teamId=teamId, personId=personId, personEmail=personEmail, isModerator=isModerator, ) json_data = self._session.post(API_ENDPOINT, json=post_data) return self._object_factory(OBJECT_TYPE, json_data)
MIT License
gamechanger/monufacture
monufacture/helpers.py
random_number
python
def random_number(a, b=None): def build(*args): return random.randrange(a, b) return build
Inserts a random number in the given range into the document.
https://github.com/gamechanger/monufacture/blob/348a9ffeb8c1073d45a7a819208be17ce51fcb6a/monufacture/helpers.py#L211-L215
import monufacture import string import random from pytz import timezone from datetime import datetime, timedelta from bson.objectid import ObjectId from bson.dbref import DBRef class Sequence(object): def __init__(self): self.seq_num = 0 def next(self): self.seq_num = self.seq_num + 1 return self.seq_num def sequence(fn=None): sequence = Sequence() if not fn: fn = lambda n: n def build(*args): return fn(sequence.next()) return build def dependent(fn): def build(obj): return fn(obj) return build def id_of(factory_, document_=None, **overrides): def build(*args): instance_overrides = {} for key, value in overrides.iteritems(): if callable(value): instance_overrides[key] = value(*args) else: instance_overrides[key] = value return monufacture.create(factory_, document_, **instance_overrides)["_id"] return build def text(*args, **kwargs): return random_text(*args, **kwargs) def random_text(length=10, spaces=False, digits=False, upper=True, lower=True, other_chars=[]): char_set = [] if upper: char_set += list(string.uppercase) if lower: char_set += list(string.lowercase) if spaces: char_set.append(" ") if digits: char_set += list(string.digits) char_set += other_chars def build(*args): return "".join([random.choice(char_set) for i in xrange(length)]) return build def dbref_to(factory, document=None, **overrides): def build(*args): collection = monufacture.get_factory(factory).collection.name _id = monufacture.create(factory, document, **overrides)["_id"] return DBRef(collection, _id) return build def date(year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tz=None): if year or month or day or hour or minute or second or microsecond: if not (year and month and day): raise ValueError("Either all components of a date must be provided or none of them.") def compact_args(**kwargs): out = {} out.update((k, v) for k, v in kwargs.iteritems() if v is not None) return out dt_args = compact_args( year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond) def build_specific(*args): dt = datetime(**dt_args) if tz: dt = timezone(tz).localize(dt) return dt return build_specific def build_now(*args): return datetime.utcnow() return build_now def now(): return date() def _convert_years_months_to_days(timedeltas): if 'months' in timedeltas: timedeltas['days'] = timedeltas.setdefault('days', 0) + 30 * timedeltas['months'] del timedeltas['months'] if 'years' in timedeltas: timedeltas['days'] = timedeltas.setdefault('days', 0) + 365 * timedeltas['years'] del timedeltas['years'] def ago(**kwargs): def build(*args): _convert_years_months_to_days(kwargs) return datetime.utcnow() - timedelta(**kwargs) return build def from_now(**kwargs): def build(*args): _convert_years_months_to_days(kwargs) return datetime.utcnow() + timedelta(**kwargs) return build def list_of(fn, length): def build(*args): return [fn(*args) for i in range(length)] return build def object_id(): def build(*args): return ObjectId() return build def union(*fns): def build(*args): out = [] for fn in fns: out += fn(*args) return out return build def one_of(*values): def build(*args): return random.choice(values) return build
MIT License
moluwole/bast
bast/controller.py
Controller.write_error
python
def write_error(self, status_code, **kwargs): reason = self._reason if self.settings.get("serve_traceback") and "exc_info" in kwargs: error = [] for line in traceback.format_exception(*kwargs["exc_info"]): error.append(line) else: error = None data = {'_traceback': error, 'message': reason, 'code': status_code} content = self.render_exception(**data) self.write(content)
Handle Exceptions from the server. Formats the HTML into readable form
https://github.com/moluwole/bast/blob/231d3b57fb8fd110e6ddbc668b3909bb6f66b326/bast/controller.py#L36-L50
import importlib import os import traceback from tornado.gen import coroutine from tornado.util import unicode_type from tornado.web import RequestHandler from bast import Bast from .exception import BastException from .jsonifier import Json as json_ from .view import TemplateRendering class Controller(RequestHandler, TemplateRendering): method = None middleware = None providers = {} request_type = None def __init__(self, application, request, **kwargs): super(Controller, self).__init__(application, request, **kwargs) self.request = request self.application = application self.session_driver = os.getenv("SESSION") self.session = Bast.session['session']
MIT License
rebiocoder/bioforum
venv/Lib/site-packages/django/contrib/gis/geos/geometry.py
GEOSGeometryBase.geom_typeid
python
def geom_typeid(self): return capi.geos_typeid(self.ptr)
Return an integer representing the Geometry type.
https://github.com/rebiocoder/bioforum/blob/08c8ff2f07ae667d37ce343f537e878d78ac8fe2/venv/Lib/site-packages/django/contrib/gis/geos/geometry.py#L184-L186
import re from ctypes import addressof, byref, c_double from django.contrib.gis import gdal from django.contrib.gis.geometry import hex_regex, json_regex, wkt_regex from django.contrib.gis.geos import prototypes as capi from django.contrib.gis.geos.base import GEOSBase from django.contrib.gis.geos.coordseq import GEOSCoordSeq from django.contrib.gis.geos.error import GEOSException from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.mutable_list import ListMixin from django.contrib.gis.geos.prepared import PreparedGeometry from django.contrib.gis.geos.prototypes.io import ( ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w, ) from django.utils.deconstruct import deconstructible from django.utils.encoding import force_bytes, force_text class GEOSGeometryBase(GEOSBase): _GEOS_CLASSES = None ptr_type = GEOM_PTR destructor = capi.destroy_geom has_cs = False def __init__(self, ptr, cls): self._ptr = ptr if type(self) in (GEOSGeometryBase, GEOSGeometry): if cls is None: if GEOSGeometryBase._GEOS_CLASSES is None: from .linestring import LineString, LinearRing from .point import Point from .polygon import Polygon from .collections import ( GeometryCollection, MultiPoint, MultiLineString, MultiPolygon, ) GEOSGeometryBase._GEOS_CLASSES = { 0: Point, 1: LineString, 2: LinearRing, 3: Polygon, 4: MultiPoint, 5: MultiLineString, 6: MultiPolygon, 7: GeometryCollection, } cls = GEOSGeometryBase._GEOS_CLASSES[self.geom_typeid] self.__class__ = cls self._post_init() def _post_init(self): self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz) if self.has_cs else None def __copy__(self): return self.clone() def __deepcopy__(self, memodict): return self.clone() def __str__(self): return self.ewkt def __repr__(self): return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr))) def __getstate__(self): return bytes(self.wkb), self.srid def __setstate__(self, state): wkb, srid = state ptr = wkb_r().read(memoryview(wkb)) if not ptr: raise GEOSException('Invalid Geometry loaded from pickled state.') self.ptr = ptr self._post_init() self.srid = srid @classmethod def _from_wkb(cls, wkb): return wkb_r().read(wkb) @staticmethod def from_ewkt(ewkt): ewkt = force_bytes(ewkt) srid = None parts = ewkt.split(b';', 1) if len(parts) == 2: srid_part, wkt = parts match = re.match(b'SRID=(?P<srid>\-?\d+)', srid_part) if not match: raise ValueError('EWKT has invalid SRID part.') srid = int(match.group('srid')) else: wkt = ewkt if not wkt: raise ValueError('Expected WKT but got an empty string.') return GEOSGeometry(GEOSGeometry._from_wkt(wkt), srid=srid) @staticmethod def _from_wkt(wkt): return wkt_r().read(wkt) @classmethod def from_gml(cls, gml_string): return gdal.OGRGeometry.from_gml(gml_string).geos def __eq__(self, other): if isinstance(other, str): try: other = GEOSGeometry.from_ewkt(other) except (ValueError, GEOSException): return False return isinstance(other, GEOSGeometry) and self.srid == other.srid and self.equals_exact(other) def __or__(self, other): return self.union(other) def __and__(self, other): return self.intersection(other) def __sub__(self, other): return self.difference(other) def __xor__(self, other): return self.sym_difference(other) @property def coord_seq(self): if self.has_cs: return self._cs.clone() @property def geom_type(self): return capi.geos_type(self.ptr).decode() @property
MIT License
psd-tools/psd-tools
src/psd_tools/api/psd_image.py
PSDImage.bottom
python
def bottom(self): return self.height
Bottom coordinate. :return: `int`
https://github.com/psd-tools/psd-tools/blob/00241f3aed2ca52a8012e198a0f390ff7d8edca9/src/psd_tools/api/psd_image.py#L290-L296
from __future__ import absolute_import, unicode_literals import logging from psd_tools.constants import ( Clipping, Compression, ColorMode, SectionDivider, Resource, Tag ) from psd_tools.psd import PSD, FileHeader, ImageData, ImageResources from psd_tools.api.layers import ( Artboard, Group, PixelLayer, ShapeLayer, SmartObjectLayer, TypeLayer, GroupMixin, FillLayer ) from psd_tools.api import adjustments from psd_tools.api import deprecated logger = logging.getLogger(__name__) class PSDImage(GroupMixin): def __init__(self, data): assert isinstance(data, PSD) self._record = data self._layers = [] self._tagged_blocks = None self._init() @classmethod def new(cls, mode, size, color=0, depth=8, **kwargs): header = cls._make_header(mode, size, depth) image_data = ImageData.new(header, color=color, **kwargs) return cls( PSD( header=header, image_data=image_data, image_resources=ImageResources.new(), ) ) @classmethod def frompil(cls, image, compression=Compression.RLE): header = cls._make_header(image.mode, image.size) image_data = ImageData(compression=compression) image_data.set_data([channel.tobytes() for channel in image.split()], header) return cls( PSD( header=header, image_data=image_data, image_resources=ImageResources.new(), ) ) @classmethod def open(cls, fp, **kwargs): if hasattr(fp, 'read'): self = cls(PSD.read(fp, **kwargs)) else: with open(fp, 'rb') as f: self = cls(PSD.read(f, **kwargs)) return self def save(self, fp, mode='wb', **kwargs): if hasattr(fp, 'write'): self._record.write(fp, **kwargs) else: with open(fp, mode) as f: self._record.write(f, **kwargs) def topil(self, channel=None, apply_icc=False): from .pil_io import convert_image_data_to_pil if self.has_preview(): return convert_image_data_to_pil(self, channel, apply_icc) return None @deprecated def compose(self, force=False, bbox=None, layer_filter=None): from psd_tools.composer import compose image = None if (not force or len(self) == 0) and not bbox and not layer_filter: image = self.topil() if image is None: image = compose( self, bbox=bbox or self.viewbox, force=force, layer_filter=layer_filter, ) elif bbox is not None: image = image.crop(bbox) return image def numpy(self, channel=None): from .numpy_io import get_array return get_array(self, channel) def composite( self, viewport=None, force=False, color=1.0, alpha=0.0, layer_filter=None, ignore_preview=False, ): from psd_tools.composite import composite_pil if not (ignore_preview or force or layer_filter) and self.has_preview(): return self.topil() return composite_pil(self, color, alpha, viewport, layer_filter, force) def is_visible(self): return self.visible @property def parent(self): return None def is_group(self): return isinstance(self, GroupMixin) def has_preview(self): version_info = self.image_resources.get_data(Resource.VERSION_INFO) if version_info: return version_info.has_composite return True @property def name(self): return 'Root' @property def kind(self): return self.__class__.__name__.lower() @property def visible(self): return True @property def left(self): return 0 @property def top(self): return 0 @property def right(self): return self.width @property
MIT License
sublimelsp/lsp
plugin/session_buffer.py
SessionBuffer.language_id
python
def language_id(self) -> str: return self.get_language_id() or ""
Deprecated: use get_language_id
https://github.com/sublimelsp/lsp/blob/027333e68d740e1b708ab335f18fdec6d295283b/plugin/session_buffer.py#L133-L137
from .core.protocol import Diagnostic from .core.protocol import DiagnosticSeverity from .core.protocol import DocumentUri from .core.protocol import Range from .core.protocol import TextDocumentSyncKindFull from .core.protocol import TextDocumentSyncKindNone from .core.sessions import SessionViewProtocol from .core.settings import userprefs from .core.types import Capabilities from .core.types import debounced from .core.types import Debouncer from .core.types import FEATURES_TIMEOUT from .core.typing import Any, Iterable, Optional, List, Dict, Tuple from .core.views import DIAGNOSTIC_SEVERITY from .core.views import diagnostic_severity from .core.views import did_change from .core.views import did_close from .core.views import did_open from .core.views import did_save from .core.views import format_diagnostic_for_panel from .core.views import MissingUriError from .core.views import range_to_region from .core.views import will_save from weakref import WeakSet import sublime import time class PendingChanges: __slots__ = ('version', 'changes') def __init__(self, version: int, changes: Iterable[sublime.TextChange]) -> None: self.version = version self.changes = list(changes) def update(self, version: int, changes: Iterable[sublime.TextChange]) -> None: self.version = version self.changes.extend(changes) class DiagnosticSeverityData: __slots__ = ('regions', 'regions_with_tag', 'annotations', 'panel_contribution', 'scope', 'icon') def __init__(self, severity: int) -> None: self.regions = [] self.regions_with_tag = {} self.annotations = [] self.panel_contribution = [] _, _, self.scope, self.icon, _, _ = DIAGNOSTIC_SEVERITY[severity - 1] if userprefs().diagnostics_gutter_marker != "sign": self.icon = userprefs().diagnostics_gutter_marker class SessionBuffer: def __init__(self, session_view: SessionViewProtocol, buffer_id: int, uri: DocumentUri) -> None: view = session_view.view self.opened = False self.capabilities = Capabilities() self.session = session_view.session self.session_views = WeakSet() self.session_views.add(session_view) self.last_known_uri = uri self.id = buffer_id self.pending_changes = None self.diagnostics = [] self.data_per_severity = {} self.diagnostics_version = -1 self.diagnostics_flags = 0 self.diagnostics_are_visible = False self.last_text_change_time = 0.0 self.total_errors = 0 self.total_warnings = 0 self.should_show_diagnostics_panel = False self.diagnostics_debouncer = Debouncer() self._check_did_open(view) self.session.register_session_buffer_async(self) def __del__(self) -> None: mgr = self.session.manager() if mgr: mgr.update_diagnostics_panel_async() if not self.session.exiting: self._check_did_close() self.session.unregister_session_buffer_async(self) def _check_did_open(self, view: sublime.View) -> None: if not self.opened and self.should_notify_did_open(): language_id = self.get_language_id() if not language_id: return self.session.send_notification(did_open(view, language_id)) self.opened = True self.session.notify_plugin_on_session_buffer_change(self) def _check_did_close(self) -> None: if self.opened and self.should_notify_did_close(): self.session.send_notification(did_close(uri=self.last_known_uri)) self.opened = False def get_uri(self) -> Optional[str]: for sv in self.session_views: return sv.get_uri() return None def get_language_id(self) -> Optional[str]: for sv in self.session_views: return sv.get_language_id() return None def get_view_in_group(self, group: int) -> sublime.View: for sv in self.session_views: view = sv.get_view_for_group(group) if view: return view return next(iter(self.session_views)).view @property
MIT License
ni/hoplite
hoplite/client/remote_job.py
RemoteJob._get_job
python
def _get_job(self, force=False): time_elapsed = time.time() - self._last_poll if time_elapsed > .2 or force: resp = self.jget(self._daemon_addr + '/jobs/{0}'.format(self.uuid)) if resp.status_code == 404: raise JobDoesNotExistError self._set_attributes_from_response_json( hoplite_loads(str(resp.text))) self._last_poll = time.time()
I call this before most other requests to get the status code sanity check. This method is rate limited for sanity
https://github.com/ni/hoplite/blob/bc1b01aa08ba21daa36f46b06000b62890096787/hoplite/client/remote_job.py#L166-L180
import pickle import time from hoplite.client.helpers import ClientMixin from hoplite.exceptions import ( JobDoesNotExistError, TimeoutError, ConnectionError, JobFailedError) from hoplite.serializer import hoplite_loads import requests.exceptions class RemoteJob(ClientMixin): def __init__(self, address, port=5000, name="", uuid="", api_key="", config={}): if ':' in address: self.address = address.split(':')[0] self.port = address.split(':')[1] else: self.address = address self.port = port self._daemon_addr = 'http://{0}:{1}'.format(self.address, self.port) self._config = config self.name = name self.uuid = uuid self._api_key = api_key self._last_poll = 0 try: if not self.uuid: self._create_job() self._get_job() except requests.exceptions.ConnectionError: raise ConnectionError(self.address) def config(self, force=False): self._get_job(force) return self._config def status(self, force=False): self._get_job(force) exception_dict = self._status.get("exception", None) if exception_dict: raise JobFailedError( self.address, self.uuid, pickle.loads(exception_dict['traceback']), exception_dict['previous_exception']) return self._status def start(self): self._get_job() resp = self.jput( self._daemon_addr + '/jobs/{0}/start'.format(self.uuid)) return resp.json()["started"] def join(self, timeout=-1): num_seconds = 0 poll_interval = .05 while num_seconds < timeout or timeout == -1: if self.finished(): return True time.sleep(poll_interval) num_seconds += poll_interval raise TimeoutError(self.uuid) def kill(self, force=False): self._get_job(force) resp = self.jput( self._daemon_addr + '/jobs/{0}/kill'.format(self.uuid)) return hoplite_loads(str(resp.text))["killed"] def running(self, force=False): self._get_job(force) return self._running def finished(self, force=False): self.status(force) return self._finished
MIT License
nipy/nipy
nipy/labs/utils/reproducibility_measures.py
cluster_threshold
python
def cluster_threshold(stat_map, domain, th, csize): if stat_map.shape[0] != domain.size: raise ValueError('incompatible dimensions') thresholded_domain = domain.mask(stat_map > th) label = thresholded_domain.connected_components() binary = - np.ones(domain.size) binary[stat_map > th] = label nbcc = len(np.unique(label)) for i in range(nbcc): if np.sum(label == i) < csize: binary[binary == i] = - 1 binary = (binary > -1) return binary
Perform a thresholding of a map at the cluster-level Parameters ---------- stat_map: array of shape(nbvox) the input data domain: Nifti1Image instance, referential- and domain-defining image th (float): cluster-forming threshold csize (int>0): cluster size threshold Returns ------- binary array of shape (nvox): the binarized thresholded map Notes ----- Should be replaced by a more standard function in the future
https://github.com/nipy/nipy/blob/d16d268938dcd5c15748ca051532c21f57cf8a22/nipy/labs/utils/reproducibility_measures.py#L55-L92
from __future__ import absolute_import import numpy as np from nipy.io.nibcompat import get_affine from nipy.labs.spatial_models.discrete_domain import grid_domain_from_binary_array def histo_repro(h): k = np.size(h) - 1 if k == 1: return 0. nf = np.dot(h, np.arange(k + 1)) / k if nf == 0: return 0. n1k = np.arange(1, k + 1) res = 1.0 * np.dot(h[1:], n1k * (n1k - 1)) / (k * (k - 1)) return res / nf
BSD 3-Clause New or Revised License
jgm/pandocfilters
pandocfilters.py
get_value
python
def get_value(kv, key, value = None): res = [] for k, v in kv: if k == key: value = v else: res.append([k, v]) return value, res
get value from the keyvalues (options)
https://github.com/jgm/pandocfilters/blob/06f4db99548a129c3ee8ac667436cb51a80c0f58/pandocfilters.py#L59-L67
import codecs import hashlib import io import json import os import sys import atexit import shutil import tempfile def get_filename4code(module, content, ext=None): if os.getenv('PANDOCFILTER_CLEANUP'): imagedir = tempfile.mkdtemp(prefix=module) atexit.register(lambda: shutil.rmtree(imagedir)) else: imagedir = module + "-images" fn = hashlib.sha1(content.encode(sys.getfilesystemencoding())).hexdigest() try: os.mkdir(imagedir) sys.stderr.write('Created directory ' + imagedir + '\n') except OSError: sys.stderr.write('Could not create directory "' + imagedir + '"\n') if ext: fn += "." + ext return os.path.join(imagedir, fn)
BSD 3-Clause New or Revised License
ali5h/rules_pip
third_party/py/click/formatting.py
HelpFormatter.write_usage
python
def write_usage(self, prog, args="", prefix="Usage: "): usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent) text_width = self.width - self.current_indent if text_width >= (term_len(usage_prefix) + 20): indent = " " * term_len(usage_prefix) self.write( wrap_text( args, text_width, initial_indent=usage_prefix, subsequent_indent=indent, ) ) else: self.write(usage_prefix) self.write("\n") indent = " " * (max(self.current_indent, term_len(prefix)) + 4) self.write( wrap_text( args, text_width, initial_indent=indent, subsequent_indent=indent ) ) self.write("\n")
Writes a usage line into the buffer. :param prog: the program name. :param args: whitespace separated list of arguments. :param prefix: the prefix for the first line.
https://github.com/ali5h/rules_pip/blob/fb02cb7bf5c03bc8cd4269679e4aea2e1839b501/third_party/py/click/formatting.py#L130-L162
from contextlib import contextmanager from ._compat import term_len from .parser import split_opt from .termui import get_terminal_size FORCED_WIDTH = None def measure_table(rows): widths = {} for row in rows: for idx, col in enumerate(row): widths[idx] = max(widths.get(idx, 0), term_len(col)) return tuple(y for x, y in sorted(widths.items())) def iter_rows(rows, col_count): for row in rows: row = tuple(row) yield row + ("",) * (col_count - len(row)) def wrap_text( text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False ): from ._textwrap import TextWrapper text = text.expandtabs() wrapper = TextWrapper( width, initial_indent=initial_indent, subsequent_indent=subsequent_indent, replace_whitespace=False, ) if not preserve_paragraphs: return wrapper.fill(text) p = [] buf = [] indent = None def _flush_par(): if not buf: return if buf[0].strip() == "\b": p.append((indent or 0, True, "\n".join(buf[1:]))) else: p.append((indent or 0, False, " ".join(buf))) del buf[:] for line in text.splitlines(): if not line: _flush_par() indent = None else: if indent is None: orig_len = term_len(line) line = line.lstrip() indent = orig_len - term_len(line) buf.append(line) _flush_par() rv = [] for indent, raw, text in p: with wrapper.extra_indent(" " * indent): if raw: rv.append(wrapper.indent_only(text)) else: rv.append(wrapper.fill(text)) return "\n\n".join(rv) class HelpFormatter(object): def __init__(self, indent_increment=2, width=None, max_width=None): self.indent_increment = indent_increment if max_width is None: max_width = 80 if width is None: width = FORCED_WIDTH if width is None: width = max(min(get_terminal_size()[0], max_width) - 2, 50) self.width = width self.current_indent = 0 self.buffer = [] def write(self, string): self.buffer.append(string) def indent(self): self.current_indent += self.indent_increment def dedent(self): self.current_indent -= self.indent_increment
MIT License
neurotechx/moabb
moabb/datasets/base.py
BaseDataset.get_data
python
def get_data(self, subjects=None): if subjects is None: subjects = self.subject_list if not isinstance(subjects, list): raise (ValueError("subjects must be a list")) data = dict() for subject in subjects: if subject not in self.subject_list: raise ValueError("Invalid subject {:d} given".format(subject)) data[subject] = self._get_single_subject_data(subject) return data
Return the data correspoonding to a list of subjects. The returned data is a dictionary with the folowing structure:: data = {'subject_id' : {'session_id': {'run_id': raw} } } subjects are on top, then we have sessions, then runs. A sessions is a recording done in a single day, without removing the EEG cap. A session is constitued of at least one run. A run is a single contigous recording. Some dataset break session in multiple runs. Parameters ---------- subjects: List of int List of subject number Returns ------- data: Dict dict containing the raw data
https://github.com/neurotechx/moabb/blob/70d27fdb7b96b671d4dfa716451cbe6e49cd7bb6/moabb/datasets/base.py#L77-L115
import abc import logging from inspect import signature log = logging.getLogger(__name__) class BaseDataset(metaclass=abc.ABCMeta): def __init__( self, subjects, sessions_per_subject, events, code, interval, paradigm, doi=None, unit_factor=1e6, ): try: _ = iter(subjects) except TypeError: raise ValueError("subjects must be a iterable, like a list") from None self.subject_list = subjects self.n_sessions = sessions_per_subject self.event_id = events self.code = code self.interval = interval self.paradigm = paradigm self.doi = doi self.unit_factor = unit_factor
BSD 3-Clause New or Revised License
carlmontanari/nornir_ansible
nornir_ansible/plugins/inventory/ansible.py
_get_inventory_element
python
def _get_inventory_element( typ: Type[HostOrGroup], data: Dict[str, Any], name: str, defaults: Defaults ) -> HostOrGroup: return typ( name=name, hostname=data.get("hostname"), port=data.get("port"), username=data.get("username"), password=data.get("password"), platform=data.get("platform"), data=data.get("data"), groups=data.get("groups"), defaults=defaults, connection_options=_get_connection_options(data.get("connection_options", {})), )
Get inventory information for a given host/group Arguments: data: dictionary of host or group data to serialize
https://github.com/carlmontanari/nornir_ansible/blob/f5ef4e792bdcce7071a35ce624ef55497b423463/nornir_ansible/plugins/inventory/ansible.py#L475-L496
import configparser as cp import logging from collections import defaultdict from pathlib import Path from typing import Any, DefaultDict, Dict, List, MutableMapping, Optional, Tuple, Type, Union, cast import ruamel.yaml from mypy_extensions import TypedDict from nornir.core.exceptions import NornirNoValidInventoryError from nornir.core.inventory import ( ConnectionOptions, Defaults, Group, Host, HostOrGroup, Inventory, ParentGroups, ) from ruamel.yaml.composer import ComposerError from ruamel.yaml.scanner import ScannerError VARS_FILENAME_EXTENSIONS = ["", ".ini", ".yml", ".yaml"] RESERVED_FIELDS = ("hostname", "port", "username", "password", "platform", "connection_options") YAML = ruamel.yaml.YAML(typ="safe") LOG = logging.getLogger(__name__) VarsDict = Dict[str, Any] AnsibleHostsDict = Dict[str, Optional[VarsDict]] AnsibleGroupDataDict = TypedDict( "AnsibleGroupDataDict", {"children": Dict[str, Any], "vars": VarsDict, "hosts": AnsibleHostsDict}, total=False, ) AnsibleGroupsDict = Dict[str, AnsibleGroupDataDict] class AnsibleParser: def __init__(self, hostsfile: str) -> None: self.hostsfile = hostsfile self.path = str(Path(hostsfile).absolute().parents[0]) self.hosts: Dict[str, Any] = {} self.groups: Dict[str, Any] = {} self.defaults: Dict[str, Any] = {"data": {}} self.original_data: Optional[AnsibleGroupsDict] = None self.load_hosts_file() def parse_group( self, group: str, data: AnsibleGroupDataDict, parent: Optional[str] = None ) -> None: data = data or {} if group == "defaults": group_file = "all" dest_group = self.defaults else: self.add(group, self.groups) group_file = group dest_group = self.groups[group] if parent and parent != "defaults": dest_group["groups"].append(parent) group_data = data.get("vars", {}) vars_file_data = {} if self._vars_file_exists(f"{self.path}/group_vars/{group_file}"): vars_file_data = self.read_vars_file( element=group_file, path=self.path, is_host=False, is_dir=False ) elif Path(f"{self.path}/group_vars/{group_file}").is_dir(): for file in self._get_all_files(f"{self.path}/group_vars/{group_file}"): t_vars_file_data = self.read_vars_file( element=group_file, path=file, is_host=False, is_dir=True, ) if isinstance(t_vars_file_data, dict): vars_file_data = {**t_vars_file_data, **vars_file_data} self.normalize_data(dest_group, group_data, vars_file_data) self.map_nornir_vars(dest_group) self.parse_hosts(data.get("hosts", {}), parent=group) for children, children_data in data.get("children", {}).items(): self.parse_group(children, cast(AnsibleGroupDataDict, children_data), parent=group) def parse(self) -> None: if self.original_data is not None: self.parse_group("defaults", self.original_data["all"]) self.sort_groups() def parse_hosts(self, hosts: AnsibleHostsDict, parent: Optional[str] = None) -> None: for host, data in hosts.items(): data = data or {} self.add(host, self.hosts) if parent and parent != "defaults": self.hosts[host]["groups"].append(parent) vars_file_data = {} if self._vars_file_exists(f"{self.path}/host_vars/{host}"): vars_file_data = self.read_vars_file( element=host, path=self.path, is_host=True, is_dir=False ) elif Path(f"{self.path}/host_vars/{host}").is_dir(): vars_file_data = {} for file in self._get_all_files(f"{self.path}/host_vars/{host}"): t_vars_file_data = self.read_vars_file( element=host, path=file, is_host=True, is_dir=True, ) if isinstance(t_vars_file_data, dict): vars_file_data = {**t_vars_file_data, **vars_file_data} self.normalize_data(self.hosts[host], data, vars_file_data, host) self.map_nornir_vars(self.hosts[host]) @staticmethod def _get_all_files(path: str) -> List[str]: files_list = [ str(file) for file in Path(path).rglob("*") if Path(file).suffix in VARS_FILENAME_EXTENSIONS and Path(file).is_file() ] return files_list @staticmethod def _vars_file_exists(path: str) -> bool: for ext in VARS_FILENAME_EXTENSIONS: if Path(f"{path}{ext}").is_file(): return True return False def normalize_data( self, host_or_group: Dict[str, Any], data: Dict[str, Any], vars_data: Dict[str, Any], hostname: Optional[str] = None, ) -> None: self.map_nornir_vars(data) for k, v in data.items(): if k in RESERVED_FIELDS: host_or_group[k] = v else: host_or_group["data"][k] = v self.map_nornir_vars(vars_data) for k, v in vars_data.items(): if k in RESERVED_FIELDS: host_or_group[k] = v else: host_or_group["data"][k] = v for field in RESERVED_FIELDS: if field not in host_or_group: if field == "connection_options": host_or_group[field] = {} elif field == "hostname" and hostname is not None: host_or_group[field] = hostname else: host_or_group[field] = None def sort_groups(self) -> None: for host in self.hosts.values(): host["groups"].sort() for name, group in self.groups.items(): if name == "defaults": continue group["groups"].sort() @staticmethod def read_vars_file( element: str, path: str, is_host: bool = True, is_dir: bool = False ) -> VarsDict: sub_dir = "host_vars" if is_host else "group_vars" vars_dir = Path(path) / sub_dir if is_dir: with open(path) as f: for ext in VARS_FILENAME_EXTENSIONS: if Path(f"{path}{ext}").is_file(): LOG.debug("AnsibleInventory: reading var file %r", path) return cast(Dict[str, Any], YAML.load(f)) elif vars_dir.is_dir(): vars_file_base = vars_dir / element for extension in VARS_FILENAME_EXTENSIONS: vars_file = vars_file_base.with_suffix(vars_file_base.suffix + extension) if vars_file.is_file(): with open(vars_file) as f: LOG.debug("AnsibleInventory: reading var file %r", vars_file) return cast(Dict[str, Any], YAML.load(f)) LOG.debug( "AnsibleInventory: no vars file was found with the path %r " "and one of the supported extensions: %s", vars_file_base, VARS_FILENAME_EXTENSIONS, ) return {} @staticmethod def map_nornir_vars(obj: VarsDict) -> None: mappings = { "ansible_host": "hostname", "ansible_port": "port", "ansible_user": "username", "ansible_password": "password", } for ansible_var, nornir_var in mappings.items(): if ansible_var in obj: obj[nornir_var] = obj.pop(ansible_var) @staticmethod def add(element: str, element_dict: Dict[str, VarsDict]) -> None: if element not in element_dict: element_dict[element] = {"groups": [], "data": {}} def load_hosts_file(self) -> None: raise NotImplementedError class INIParser(AnsibleParser): @staticmethod def normalize_value(value: str) -> Union[str, int]: try: return int(value) except (ValueError, TypeError): return value @staticmethod def normalize_content(content: str) -> VarsDict: result: VarsDict = {} if not content: return result for option in content.split(): key, value = option.split("=") result[key] = INIParser.normalize_value(value) return result @staticmethod def process_meta(meta: Optional[str], section: MutableMapping[str, str]) -> Dict[str, Any]: if meta == "vars": return {key: INIParser.normalize_value(value) for key, value in section.items()} if meta == "children": return {group_name: {} for group_name in section} raise ValueError(f"Unknown tag {meta}") def normalize(self, data: cp.ConfigParser) -> Dict[str, AnsibleGroupDataDict]: groups: DefaultDict[str, Dict[str, Any]] = defaultdict(dict) result: Dict[str, Dict[str, Dict[str, Dict[str, Any]]]] = {"all": {"children": groups}} for section_name, section in data.items(): if section_name == "DEFAULT": continue if ":" in section_name: group_name, meta = section_name.split(":") subsection = self.process_meta(meta, section) if group_name == "all": result["all"][meta] = subsection else: groups[group_name][meta] = subsection else: groups[section_name]["hosts"] = { host: self.normalize_content(host_vars) for host, host_vars in section.items() } return cast(AnsibleGroupsDict, result) def load_hosts_file(self) -> None: original_data = cp.ConfigParser(interpolation=None, allow_no_value=True, delimiters=" =") original_data.read(self.hostsfile) self.original_data = self.normalize(original_data) class YAMLParser(AnsibleParser): def load_hosts_file(self) -> None: with open(self.hostsfile, "r") as f: self.original_data = cast(AnsibleGroupsDict, YAML.load(f)) def parse(hostsfile: str) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]: try: parser: AnsibleParser = INIParser(hostsfile) except cp.Error: try: parser = YAMLParser(hostsfile) except (ScannerError, ComposerError) as exc: LOG.error("AnsibleInventory: file %r is not INI or YAML file", hostsfile) raise NornirNoValidInventoryError( f"AnsibleInventory: no valid inventory source(s) to parse. Tried: {hostsfile}" ) from exc parser.parse() return parser.hosts, parser.groups, parser.defaults def _get_connection_options(data: Dict[str, Any]) -> Dict[str, ConnectionOptions]: connection_options = {} for connection_name, connection_data in data.items(): connection_options[connection_name] = ConnectionOptions( hostname=connection_data.get("hostname"), port=connection_data.get("port"), username=connection_data.get("username"), password=connection_data.get("password"), platform=connection_data.get("platform"), extras=connection_data.get("extras"), ) return connection_options def _get_defaults(data: Dict[str, Any]) -> Defaults: return Defaults( hostname=data.get("hostname"), port=data.get("port"), username=data.get("username"), password=data.get("password"), platform=data.get("platform"), data=data.get("data"), connection_options=_get_connection_options(data.get("connection_options", {})), )
Apache License 2.0
nosarthur/gita
gita/utils.py
get_relative_path
python
def get_relative_path(kid: str, parent: str) -> Union[List[str], None]: if parent == '': return None if parent == os.path.commonpath((kid, parent)): rel = os.path.normpath(os.path.relpath(kid, parent)).split(os.sep) if rel == ['.']: rel = [] return rel else: return None
Return the relative path depth if relative, otherwise MAX_INT. Both the `kid` and `parent` should be absolute paths without trailing /
https://github.com/nosarthur/gita/blob/09e6f755c95764ada1ca0e677aa74e49c32f5ab4/gita/utils.py#L20-L37
import sys import os import json import csv import asyncio import platform import subprocess from functools import lru_cache, partial from pathlib import Path from typing import List, Dict, Coroutine, Union, Iterator, Tuple from collections import Counter, defaultdict from . import info from . import common MAX_INT = sys.maxsize
MIT License
mila-iqia/myia
myia/operations/prim_random_initialize.py
bprop_random_initialize
python
def bprop_random_initialize(seed, out, dout): return (zeros_like(seed),)
Backpropagator for primitive `random_initialize`.
https://github.com/mila-iqia/myia/blob/56774a39579b4ec4123f44843ad4ca688acc859b/myia/operations/prim_random_initialize.py#L27-L29
import numpy as np from .. import xtype from ..lib import ( AbstractRandomState, bprop_to_grad_transform, standard_prim, zeros_like, ) from . import primitives as P def pyimpl_random_initialize(seed): return np.random.RandomState(seed) @standard_prim(P.random_initialize) async def infer_random_initialize(self, engine, seed: xtype.u32): return AbstractRandomState() @bprop_to_grad_transform(P.random_initialize)
MIT License
tlc-pack/tenset
python/tvm/contrib/tedd.py
insert_dot_id
python
def insert_dot_id(sch): for stage_idx, stage in enumerate(sch["stages"]): dom_path = [stage_idx] stage["id"] = dom_path_to_string(dom_path, stage["type"]) for itervar_idx, itervar in enumerate(stage["all_itervars"]): dom_path = [stage_idx, itervar_idx] itervar["id"] = dom_path_to_string(dom_path, itervar["type"]) for rel_idx, rel in enumerate(stage["relations"]): dom_path = [stage_idx, rel_idx] rel["id"] = dom_path_to_string(dom_path, rel["type"]) for tensor_idx, tensor in enumerate(stage["output_tensors"]): dom_path = [stage_idx, tensor_idx] tensor["id"] = dom_path_to_string(dom_path, tensor["type"]) return sch
Insert unique ID for each node in the DOM tree. They are used as Dot node ID.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/contrib/tedd.py#L62-L78
import html import json import warnings from graphviz import Digraph from graphviz import Source import tvm TVMDD_TABLE_BODY_WIDTH = 30 ITERVAR_TYPE_STRING_MAP = { 0: ("kDataPar", "#FFFFFF"), 1: ("kThreadIndex", "#2980B9"), 2: ("kCommReduce", "#FAD7A0"), 3: ("kOrdered", "#D35400"), 4: ("kOpaque", "#ABB2B9"), 5: ("kUnrolled", "#D2B4DE"), 6: ("kVectorized", "#AED6F1"), 7: ("kParallelized", "#F5B7B1"), 8: ("kTensorized", "#A9DFBF"), } PALETTE = { 0: "#000000", 1: "#922B21", 2: "#76448A", 3: "#1F618D", 4: "#148F77", 5: "#B7950B", 6: "#AF601A", 7: "#F5B7B1", 8: "#A9DFBF", } PALETTE_SIZE = 9 def dom_path_to_string(dom_path, prefix=""): path_string = prefix for index in dom_path: path_string = path_string + "_" + str(index) return path_string
Apache License 2.0
ekimekim/factoriocalc
factoriocalc/calculator.py
split_into_steps
python
def split_into_steps(processes, input_limit=None, input_liquid_limit=None): def limit(item, input=False): if input and is_liquid(item) and input_liquid_limit is not None: return input_liquid_limit elif input and not is_liquid(item) and input_limit is not None: return input_limit else: return line_limit(item) results = [] inputs = [] for process in processes.values(): steps = max( [ throughput / limit(item, process.is_input) for item, throughput in process.inputs().items() ] + [ throughput / limit(item, process.is_input) for item, throughput in process.outputs().items() ] ) whole_steps, leftover = divmod(steps, 1) maximal_step = process.rescale(process.throughput / steps) fractional_step = maximal_step.rescale(maximal_step.throughput * leftover) part = [maximal_step] * whole_steps if leftover: part.append(fractional_step) if process.is_input: inputs += part else: results += part return results, inputs
Splits a dict of full processes into an unordered list of steps, where each step uses no more than 1 belt for each input or output. To prevent balance issues, all but the final step is maximised, ie. scaled to the point that one or more inputs or outputs is running at exactly 45 items/sec. Since raw inputs aren't really a "step", it returns them seperately. Inputs are optionally split by lower limits input_limit and input_liquid_limit. Returns (steps, inputs)
https://github.com/ekimekim/factoriocalc/blob/18583ee0ea16a12c061b272db68469edee86606d/factoriocalc/calculator.py#L257-L305
from fractions import Fraction from .util import line_limit, is_liquid class Process(object): def __init__(self, item, recipe, throughput, outputs=None): self.item = item self.recipe = recipe self.throughput = throughput if outputs: self.per_process_outputs = outputs elif self.recipe and self.recipe.is_virtual: self.per_process_outputs = {} else: self.per_process_outputs = {item: 1} @property def is_input(self): return self.recipe is None def buildings(self): return None if self.is_input else self.throughput / self.recipe.throughput def inputs(self): return {} if self.is_input else { k: v * self.throughput for k, v in self.recipe.inputs.items() } def outputs(self): return {k: v * self.throughput for k, v in self.per_process_outputs.items()} def rescale(self, new_throughput): return type(self)(self.item, self.recipe, new_throughput, self.per_process_outputs) def __str__(self): return "<{cls.__name__}: {throughput:.2f}/sec of {self.item}>".format( cls=type(self), self=self, throughput=float(self.throughput) ) __repr__ = __str__ def merge_processes_into(a, b): for k, v in b.items(): if k in a: assert a[k].recipe == v.recipe a[k].throughput += v.throughput else: a[k] = v def merge_into(a, b): for k, v in b.items(): a[k] = a.get(k, 0) + v class Calculator(object): DEFAULT_MODS = ['prod 3'] * 4 + ['speed 3'] * 4 def __init__(self, datafile, stop_items=[], module_priorities=DEFAULT_MODS, beacon_speed=0, oil_beacon_speed=None): self.datafile = datafile self.stop_items = stop_items self.module_priorities = module_priorities self.beacon_speed = beacon_speed self.oil_beacon_speed = beacon_speed if oil_beacon_speed is None else oil_beacon_speed def solve(self, item, throughput): if item not in self.datafile.recipes or item in self.stop_items: return {item: Process(item, None, throughput)} recipe = self.datafile.recipes[item] recipe = self.datafile.resolve_recipe(recipe, self.module_priorities, self.beacon_speed) result = {item: Process(item, recipe, throughput)} for name, amount in recipe.inputs.items(): amount *= throughput subresult = self.solve(name, amount) merge_processes_into(result, subresult) return result def solve_all(self, items): results = {} for item, throughput in items.items(): merge_processes_into(results, self.solve(item, throughput)) return results def solve_oil(self, processes): HEAVY_PER_PROCESS, LIGHT_PER_PROCESS, PETROL_PER_PROCESS = 10, 45, 55 refinery_recipe = self.datafile.recipes['oil products'] refinery_recipe = self.datafile.resolve_recipe(refinery_recipe, self.module_priorities, self.oil_beacon_speed) heavy_crack_recipe = self.datafile.recipes['heavy oil cracking'] heavy_crack_recipe = self.datafile.resolve_recipe(heavy_crack_recipe, self.module_priorities, self.beacon_speed) light_crack_recipe = self.datafile.recipes['light oil cracking'] light_crack_recipe = self.datafile.resolve_recipe(light_crack_recipe, self.module_priorities, self.beacon_speed) light_per_heavy = Fraction(1) / heavy_crack_recipe.inputs['heavy oil'] petrol_per_light = Fraction(1) / light_crack_recipe.inputs['light oil'] excesses = {} heavy_cracking = 0 light_cracking = 0 oil_processing = 0 heavy_oil_needed = processes.pop('heavy oil').throughput if 'heavy oil' in processes else 0 light_oil_needed = processes.pop('light oil').throughput if 'light oil' in processes else 0 petroleum_needed = processes.pop('petroleum').throughput if 'petroleum' in processes else 0 oil_processing = Fraction(heavy_oil_needed) / HEAVY_PER_PROCESS extra_light = light_oil_needed - oil_processing * LIGHT_PER_PROCESS if extra_light < 0: excesses['light oil'] = extra_light else: total_light_per_process = LIGHT_PER_PROCESS + HEAVY_PER_PROCESS * light_per_heavy processing_for_light = extra_light / total_light_per_process light_from_cracking = extra_light - processing_for_light * LIGHT_PER_PROCESS oil_processing += processing_for_light heavy_cracking += light_from_cracking extra_petrol = petroleum_needed - oil_processing * PETROL_PER_PROCESS if extra_petrol < 0: excesses['petroleum'] = extra_petrol else: petrol_available = -excesses.get('light oil', 0) * petrol_per_light if petrol_available > extra_petrol: excesses['light oil'] += extra_petrol / petrol_per_light light_cracking += extra_petrol else: extra_petrol -= petrol_available light_cracking += petrol_available excesses.pop('light oil', None) total_petrol_per_process = PETROL_PER_PROCESS + petrol_per_light * ( LIGHT_PER_PROCESS + light_per_heavy * HEAVY_PER_PROCESS ) processing_for_petrol = extra_petrol / total_petrol_per_process petrol_from_cracking = extra_petrol - processing_for_petrol * PETROL_PER_PROCESS light_to_crack = petrol_from_cracking / petrol_per_light light_from_cracking = light_to_crack - processing_for_petrol * LIGHT_PER_PROCESS oil_processing += processing_for_petrol heavy_cracking += light_from_cracking light_cracking += petrol_from_cracking if excesses: raise ValueError("Handling exccess oil products is not implemeted: {}".format(excesses)) new_processes = [ Process('oil products', refinery_recipe, oil_processing, outputs={ 'heavy oil': HEAVY_PER_PROCESS, 'light oil': LIGHT_PER_PROCESS, 'petroleum': PETROL_PER_PROCESS, }), Process('heavy oil cracking', heavy_crack_recipe, heavy_cracking, outputs={'light oil': 1}), Process('light oil cracking', light_crack_recipe, light_cracking, outputs={'petroleum': 1}), ] new_processes = {p.item: p for p in new_processes if p.throughput} new_inputs = {} for process in new_processes.values(): merge_into(new_inputs, process.inputs()) for item in ('heavy oil', 'light oil', 'petroleum'): new_inputs.pop(item, None) merge_processes_into(processes, new_processes) return processes, new_inputs def solve_with_oil(self, items): results = self.solve_all(items) results, further_inputs = self.solve_oil(results) merge_processes_into(results, self.solve_all(further_inputs)) return results
MIT License
dmlc/gluon-nlp
src/gluonnlp/base.py
use_einsum_optimization
python
def use_einsum_optimization(): flag = os.environ.get('GLUONNLP_USE_EINSUM', False) return flag
Whether to use einsum for attention. This will potentially accelerate the attention cell Returns ------- flag The use einsum flag
https://github.com/dmlc/gluon-nlp/blob/5d4bc9eba7226ea9f9aabbbd39e3b1e886547e48/src/gluonnlp/base.py#L73-L84
import os import numpy as np __all__ = ['get_home_dir', 'get_data_home_dir'] INT_TYPES = (int, np.int32, np.int64) FLOAT_TYPES = (float, np.float16, np.float32, np.float64) def get_home_dir(): _home_dir = os.environ.get('GLUONNLP_HOME', os.path.join('~', '.gluonnlp')) _home_dir = os.path.expanduser(_home_dir) return _home_dir def get_data_home_dir(): home_dir = get_home_dir() return os.path.join(home_dir, 'datasets') def get_model_zoo_home_dir(): home_dir = get_home_dir() return os.path.join(home_dir, 'models') def get_model_zoo_checksum_dir(): curr_dir = os.path.realpath(os.path.dirname(os.path.realpath(__file__))) check_sum_dir = os.path.join(curr_dir, 'models', 'model_zoo_checksums') return check_sum_dir def get_repo_url(): default_repo = 's3://gluonnlp-numpy-data' repo_url = os.environ.get('GLUONNLP_REPO_URL', default_repo) if repo_url[-1] != '/': repo_url = repo_url + '/' return repo_url def get_repo_model_zoo_url(): repo_url = get_repo_url() model_zoo_url = repo_url + 'models/' return model_zoo_url
Apache License 2.0
ciscodevnet/virl2-client
virl2_client/models/lab.py
Lab.create_interface_local
python
def create_interface_local( self, iface_id, label, node, slot, iface_type="physical" ): if iface_id not in self._interfaces: iface = Interface(iface_id, node, label, slot, iface_type) self._interfaces[iface_id] = iface else: self._interfaces[iface_id].node = node self._interfaces[iface_id].label = label self._interfaces[iface_id].slot = slot self._interfaces[iface_id].iface_type = iface_type return self._interfaces[iface_id]
Helper function to create an interface in the client library.
https://github.com/ciscodevnet/virl2-client/blob/b1e6f5b40375f5154b40fce5d3d4fffdd67e7977/virl2_client/models/lab.py#L695-L707
import json import logging import time from .node import Node from .interface import Interface from .link import Link from ..exceptions import LabNotFound, LinkNotFound, NodeNotFound from .cl_pyats import ClPyats logger = logging.getLogger(__name__) class Lab: def __init__( self, title, lab_id, context, username, password, auto_sync=True, auto_sync_interval=1.0, wait=True, ): self.username = username self.password = password self._title = title self._description = "" self._notes = "" self._lab_id = lab_id self._nodes = {} self._context = context self._owner = username self._links = {} self._interfaces = {} self.events = [] self.pyats = ClPyats(self) self.auto_sync = auto_sync self.auto_sync_interval = auto_sync_interval self._last_sync_statistics_time = 0 self._last_sync_state_time = 0 self._last_sync_l3_address_time = 0 self._last_sync_topology_time = 0 self._initialized = False self.wait_for_convergence = wait def __len__(self): return len(self._nodes) def __str__(self): return "Lab: {}".format(self._title) def __repr__(self): return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._title, self._lab_id, self._context, self.auto_sync, self.auto_sync_interval, self.wait_for_convergence, ) def need_to_wait(self, local_wait): if local_wait is None: return self.wait_for_convergence if not isinstance(local_wait, bool): raise ValueError return local_wait def sync_statistics_if_outdated(self): timestamp = time.time() if ( self.auto_sync and timestamp - self._last_sync_statistics_time > self.auto_sync_interval ): self.sync_statistics() def sync_states_if_outdated(self): timestamp = time.time() if ( self.auto_sync and timestamp - self._last_sync_state_time > self.auto_sync_interval ): self.sync_states() def sync_l3_addresses_if_outdated(self): timestamp = time.time() if ( self.auto_sync and timestamp - self._last_sync_l3_address_time > self.auto_sync_interval ): self.sync_layer3_addresses() def sync_topology_if_outdated(self): timestamp = time.time() if ( self.auto_sync and timestamp - self._last_sync_topology_time > self.auto_sync_interval ): self._sync_topology(exclude_configurations=True) @property def id(self): return self._lab_id @property def title(self): self.sync_topology_if_outdated() return self._title @title.setter def title(self, value): url = self.lab_base_url + "/title" response = self.session.put(url, data=value) response.raise_for_status() self._title = value @property def notes(self): self.sync_topology_if_outdated() return self._notes @notes.setter def notes(self, value): url = self.lab_base_url + "/notes" response = self.session.put(url, data=value) response.raise_for_status() self._notes = value @property def description(self): self.sync_topology_if_outdated() return self._description @description.setter def description(self, value): url = self.lab_base_url + "/description" response = self.session.put(url, data=value) response.raise_for_status() self._description = value @property def client_uuid(self): return self._context.uuid @property def session(self): return self._context.session @property def owner(self): self.sync_topology_if_outdated() return self._owner def nodes(self): self.sync_topology_if_outdated() return list(self._nodes.values()) def links(self): self.sync_topology_if_outdated() return list(self._links.values()) def interfaces(self): self.sync_topology_if_outdated() return list(self._interfaces.values()) @property def lab_base_url(self): return self._context.base_url + "labs/{}".format(self._lab_id) @property def statistics(self): return { "nodes": len(self._nodes), "links": len(self._links), "interfaces": len(self._interfaces), } def get_node_by_id(self, node_id): self.sync_topology_if_outdated() try: return self._nodes[node_id] except KeyError: raise NodeNotFound(node_id) def get_node_by_label(self, label): self.sync_topology_if_outdated() for node in self._nodes.values(): if node.label == label: return node else: raise NodeNotFound(label) def get_link_by_nodes(self, node1, node2): self.sync_topology_if_outdated() for link in self.links(): link_node_pair = (link.interface_a.node, link.interface_b.node) if (node1, node2) == link_node_pair: return link elif (node2, node1) == link_node_pair: return link else: raise LinkNotFound() def get_link_by_interfaces(self, iface1, iface2): self.sync_topology_if_outdated() for link in self.links(): link_iface_pair = (link.interface_a, link.interface_b) if (iface1, iface2) == link_iface_pair: return link elif (iface2, iface1) == link_iface_pair: return link else: raise LinkNotFound() def find_nodes_by_tag(self, tag): self.sync_topology_if_outdated() return [node for node in self.nodes() if tag in node.tags()] def create_node( self, label, node_definition, x=0, y=0, wait=None, populate_interfaces=False ): url = self.lab_base_url + "/nodes" if populate_interfaces: url += "?populate_interfaces=true" data = { "label": label, "node_definition": node_definition, "x": x, "y": y, } response = self.session.post(url, json=data) result = response.json() response.raise_for_status() node_id = result["id"] config = "" if not self._initialized: self._initialized = True image_definition = None config = "" if self.need_to_wait(wait): self.wait_until_lab_converged() node = self.add_node_local( node_id, label, node_definition, image_definition, config, x, y ) return node def add_node_local( self, node_id, label, node_definition, image_definition, config, x, y, ram=0, cpus=0, cpu_limit=100, data_volume=0, boot_disk_size=0, tags=None, ): if tags is None: tags = [] node = Node( self, node_id, label, node_definition, image_definition, config, x, y, ram, cpus, cpu_limit, data_volume, boot_disk_size, tags, ) self._nodes[node.id] = node return node def remove_node(self, node, wait=None): node.remove_on_server() for iface in node.interfaces(): for lnk in iface.links(): try: del self._links[lnk.id] except KeyError: pass try: del self._interfaces[iface.id] except KeyError: pass try: del self._nodes[node.id] except KeyError: pass if self.need_to_wait(wait): self.wait_until_lab_converged() logger.debug("%s node removed from lab %s", node.id, self._lab_id) def remove_nodes(self, wait=None): for node in list(self._nodes.values()): self.remove_node(node, wait=False) if self.need_to_wait(wait): self.wait_until_lab_converged() logger.debug("all nodes removed from lab %s", self._lab_id) def remove_link(self, link, wait=None): link.remove_on_server() try: del self._links[link.id] except KeyError: pass if self.need_to_wait(wait): self.wait_until_lab_converged() logger.debug("link %s removed from lab %s", link.id, self._lab_id) def remove_interface(self, iface, wait=None): iface.remove_on_server() for lnk in iface.links(): try: del self._links[lnk.id] except KeyError: pass try: del self._interfaces[iface.id] except KeyError: pass if self.need_to_wait(wait): self.wait_until_lab_converged() logger.debug("interface %s removed from lab %s", iface.id, self._lab_id) def create_link(self, i1, i2, wait=None): url = self.lab_base_url + "/links" data = { "src_int": i1.id, "dst_int": i2.id, } response = self.session.post(url, json=data) result = response.json() link_id = result["id"] if self.need_to_wait(wait): self.wait_until_lab_converged() link = self.create_link_local(i1, i2, link_id) return link def connect_two_nodes(self, node1, node2): iface1 = node1.next_available_interface() or node1.create_interface() iface2 = node2.next_available_interface() or node2.create_interface() return self.create_link(iface1, iface2) def create_link_local(self, i1, i2, link_id): link = Link(self, link_id, i1, i2) self._links[link_id] = link return link def create_interface(self, node, slot=None, wait=None): url = self.lab_base_url + "/interfaces" payload = {"node": node.id} if slot is not None: payload["slot"] = slot response = self.session.post(url, json=payload) response.raise_for_status() result = response.json() if isinstance(result, dict): result = [result] if self.need_to_wait(wait): self.wait_until_lab_converged() desired_interface = None for iface in result: lab_interface = self.create_interface_local( iface_id=iface["id"], label=iface["label"], node=node, slot=iface["slot"], ) if slot == iface["slot"] or slot is None: desired_interface = lab_interface return desired_interface
Apache License 2.0
emorynlp/bert-2019
bertsota/common/data.py
ParserVocabulary._add_pret_words
python
def _add_pret_words(self, pret_file): words_in_train_data = set(self._id2word) with open(pret_file) as f: for line in f: line = line.strip().split() if line: word = line[0] if word not in words_in_train_data: self._id2word.append(word)
Read pre-trained embedding file for extending vocabulary Parameters ---------- pret_file : str path to pre-trained embedding file
https://github.com/emorynlp/bert-2019/blob/228b2046d92084ea3cd7c900c1d8af1f0a925cfe/bertsota/common/data.py#L197-L213
import pickle from collections import Counter import numpy as np from bertsota.common.k_means import KMeans from bertsota.common.savable import Savable class ConllWord(object): def __init__(self, id, form, lemma=None, cpos=None, pos=None, feats=None, head=None, relation=None, phead=None, pdeprel=None): self.id = id self.form = form self.cpos = cpos self.pos = pos self.head = head self.relation = relation self.lemma = lemma self.feats = feats self.phead = phead self.pdeprel = pdeprel def __str__(self): values = [str(self.id), self.form, self.lemma, self.cpos, self.pos, self.feats, str(self.head), self.relation, self.phead, self.pdeprel] return '\t'.join(['_' if v is None else v for v in values]) class ConllSentence(object): def __init__(self, words): super().__init__() self.words = words def __str__(self): return '\n'.join([word.__str__() for word in self.words]) def __len__(self): return len(self.words) def __getitem__(self, index): return self.words[index] def __iter__(self): return (line for line in self.words) class ParserVocabulary(Savable): PAD, ROOT, UNK = 0, 1, 2 def __init__(self, input_file, pret_file, min_occur_count=2, root='root', shared_vocab=None): super().__init__() word_counter = Counter() tag_set = set() rel_set = set() if input_file.endswith('.conllu'): with open(input_file) as f: for line in f: if line.startswith('#'): continue cell = line.strip().split() if cell: word, tag = cell[1].lower(), cell[3] word_counter[word] += 1 tag_set.add(tag) token = cell[8] if token != '_': token = token.split('|') for edge in token: head, rel = edge.split(':', 1) if rel != root: rel_set.add(rel) else: with open(input_file) as f: for line in f: info = line.strip().split() if info: if len(info) == 10: arc_offset = 6 rel_offset = 7 elif len(info) == 8: arc_offset = 5 rel_offset = 6 word, tag, head, rel = info[1].lower(), info[3], int(info[arc_offset]), info[rel_offset] word_counter[word] += 1 tag_set.add(tag) if rel != root: rel_set.add(rel) self._id2word = ['<pad>', '<root>', '<unk>'] self._id2tag = ['<pad>', '<root>', '<unk>'] self._id2rel = ['<pad>', root] self.root = root reverse = lambda x: dict(list(zip(x, list(range(len(x)))))) for word, count in word_counter.most_common(): if count > min_occur_count: self._id2word.append(word) self._pret_file = pret_file self._words_in_train_data = len(self._id2word) if shared_vocab: self._id2word = shared_vocab._id2word self._id2tag = shared_vocab._id2tag self._word2id = shared_vocab._word2id self._tag2id = shared_vocab._tag2id else: if pret_file: self._add_pret_words(pret_file) self._id2tag += list(sorted(tag_set)) self._word2id = reverse(self._id2word) self._tag2id = reverse(self._id2tag) self._id2rel += list(sorted(rel_set)) self._rel2id = reverse(self._id2rel) def log_info(self, logger): logger.info('#words in training set: %d' % self._words_in_train_data) logger.info("Vocab info: #words %d, #tags %d #rels %d" % (self.vocab_size, self.tag_size, self.rel_size))
Apache License 2.0
openstack/rally-openstack
tests/functional/test_task_samples.py
TestTaskSamples._skip
python
def _skip(self, validation_output): skip_lst = ["[Ss]ervice is not available", "is not installed. To install it run", "extension.* is not configured"] for check_str in skip_lst: if re.search(check_str, validation_output) is not None: return True return False
Help to decide do we want to skip this result or not. :param validation_output: string representation of the error that we want to check :return: True if we want to skip this error of task sample validation, otherwise False.
https://github.com/openstack/rally-openstack/blob/d52e165320d87860930d6fbcca105e19bec0d879/tests/functional/test_task_samples.py#L39-L54
import copy import json import os import re import traceback import unittest from rally import api from rally.cli import yamlutils as yaml from rally.common import broker from rally import plugins import rally_openstack as rally_openstack_module from rally_openstack.common import consts from rally_openstack.common import credential from tests.functional import utils class TestTaskSamples(unittest.TestCase): NUMBER_OF_THREADS = 20
Apache License 2.0
ddorn/gui
GUI/base.py
BaseWidget.__init__
python
def __init__(self, pos, size, anchor=CENTER): self.__verify(pos) self.__verify(size) super().__init__((0, 0), (0, 0)) self._anchor = anchor self._pos = pos self._size = size self._focus = False self.clicked = False
Creates a Basic Widget with... nothing The pos, size and anchor can be tuples or funcions that returns a tuple (an anchostr for the anchor)
https://github.com/ddorn/gui/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/base.py#L24-L40
import pygame from GUI.locals import CENTER, TOPLEFT, TOPRIGHT, MIDTOP, MIDLEFT, MIDRIGHT, BOTTOMRIGHT, MIDBOTTOM, BOTTOMLEFT from pygame.event import EventType class BaseWidget(pygame.Rect):
MIT License
vovanec/supervisor_checks
supervisor_checks/bin/tcp_check.py
_make_argument_parser
python
def _make_argument_parser(): parser = argparse.ArgumentParser( description='Run TCP check program.') parser.add_argument('-n', '--check-name', dest='check_name', type=str, required=True, default=None, help='Check name.') parser.add_argument('-g', '--process-group', dest='process_group', type=str, default=None, help='Supervisor process group name.') parser.add_argument('-N', '--process-name', dest='process_name', type=str, default=None, help='Supervisor process name. Process group argument is ignored if this ' + 'is passed in') parser.add_argument( '-p', '--port', dest='port', type=str, default=None, required=True, help='TCP port to query. Can be integer or regular expression which ' 'will be used to extract port from a process name.') parser.add_argument( '-t', '--timeout', dest='timeout', type=int, required=False, default=tcp.DEFAULT_TIMEOUT, help='Connection timeout. Default: %s' % (tcp.DEFAULT_TIMEOUT,)) parser.add_argument( '-r', '--num-retries', dest='num_retries', type=int, default=tcp.DEFAULT_RETRIES, required=False, help='Connection retries. Default: %s' % (tcp.DEFAULT_RETRIES,)) return parser
Create the option parser.
https://github.com/vovanec/supervisor_checks/blob/f44b105659d60d0e1f1845b111fec148ae5514e5/supervisor_checks/bin/tcp_check.py#L20-L50
import argparse import sys from supervisor_checks import check_runner from supervisor_checks.check_modules import tcp __author__ = '[email protected]'
MIT License
ibm/superglue-mtl
data_utils.py
COPALoader.get_labels
python
def get_labels(self): return [0]
this is set to [0] because we treat each subinstance as a logistic regression task
https://github.com/ibm/superglue-mtl/blob/1eb3e581c0ef3b4c261e0256ec26116d2b657c40/data_utils.py#L633-L635
import os import json import logging import torch import numpy as np from torch.utils.data import TensorDataset, Dataset logging.getLogger().setLevel(logging.INFO) DATA_PATH = os.environ["SG_DATA"] EXP_PATH = os.environ["SG_MTL_EXP"] BERT_LARGE_MNLI_PATH = "/datastor/xhua/Experiments/mnli_bert_large_no_transfer_seed_42_lr_1e-5_max_seq_len_256/pytorch_model.bin_epoch_4_train_loss_0.0049_dev_loss_0.5259_acc_86.52" BERT_LARGE_SQUAD_NLI_PATH = "/dccstor/xhua11/Experiments/squadnli_bert-large_transfer-wwm_lr-1e-5_seed-42_max-len-256/pytorch_model.bin_epoch_1_train_loss_0.2614_val_loss_0.2474" BERT_LARGE_SQUAD_1_PATH = "/u/avi/Projects/dccstor_avi5/nq/trained_squad_models/using_mglass_pretraining/bert_large_sq1.bin" BERT_LARGE_SQUAD_2_PATH = "/u/avi/Projects/dccstor_avi5/nq/trained_squad_models/bert_large_sq2_mglass_pretrained.bin" BERT_LARGE_WWM_SQUAD_2_PATH = "/dccstor/panlin2/squad2/expts/Pan_squad2_whole_word_32bs/output/pytorch_model.bin" TRANSFER_PATH = { "squad1-bert-large" : BERT_LARGE_SQUAD_1_PATH, "squad2-bert-large" : BERT_LARGE_SQUAD_2_PATH, "squad2-bert-large-wwm" : BERT_LARGE_WWM_SQUAD_2_PATH, "squad2nli-roberta-large" : ROBERTA_LARGE_SQUAD_NLI_PATH, } class InputExample(object): def __init__(self, guid, text_hyp, text_pre=None, label=None): self.guid = guid self.text_hyp = text_hyp self.text_pre = text_pre self.label = label def featurize_example(self, *kargs, **kwargs): raise NotImplementedError class DefaultInputExample(InputExample): def __init__(self, guid, text_hyp, text_pre, label): super(DefaultInputExample, self).__init__(guid, text_hyp, text_pre, label) def featurize_example(self, tokenizer, max_seq_length=128, label_map=None, output_mode="classification", model_type="bert", print_example=False, task=None): tokens_a = tokenizer.tokenize(self.text_hyp) if self.text_pre: tokens_b = tokenizer.tokenize(self.text_pre) special_tokens_count = 4 if model_type == "roberta" else 3 _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count) else: special_tokens_count = 3 if model_type == "roberta" else 2 if len(tokens_a) > max_seq_length - special_tokens_count: tokens_a = tokens_a[:max_seq_length - special_tokens_count] tokens = tokens_a + [tokenizer.sep_token] if model_type == "roberta": tokens += [tokenizer.sep_token] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + [tokenizer.sep_token] segment_ids += [1] * (len(tokens_b) + 1) tokens = [tokenizer.cls_token] + tokens segment_ids = [0] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) padding_length = max_seq_length - len(input_ids) input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) input_mask = input_mask + [0] * padding_length segment_ids = segment_ids + [0] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[self.label] elif output_mode == "regression": label_id = float(self.label) else: raise KeyError if print_example: logging.info("*** Example (%s) ***" % task) logging.info("guid: %s" % (self.guid)) logging.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logging.info("label: %s (id = %s)" % (str(self.label), str(label_id))) return InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) class WSCInputExample(InputExample): def __init__(self, guid, text, span_1, span_2, label): super(WSCInputExample, self).__init__(guid, text_hyp=text, text_pre=None, label=label) self.spans = [span_1, span_2] def featurize_example(self, tokenizer, max_seq_length=128, label_map=None, model_type="bert", print_example=False, task=None): tokens_a = tokenizer.tokenize(self.text_hyp) token_word_ids = _get_word_ids(tokens_a, model_type) span_1_tok_ids = _get_token_ids(token_word_ids, self.spans[0][0], offset=1) span_2_tok_ids = _get_token_ids(token_word_ids, self.spans[1][0], offset=1) special_tokens_count = 2 if len(tokens_a) > max_seq_length - special_tokens_count: tokens_a = tokens_a[:max_seq_length - special_tokens_count] tokens = tokens_a + [tokenizer.sep_token] segment_ids = [0] * len(tokens) tokens = [tokenizer.cls_token] + tokens segment_ids = [0] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) padding_length = max_seq_length - len(input_ids) input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) input_mask = input_mask + [0] * padding_length segment_ids = segment_ids + [0] * padding_length span_1_mask = [0] * len(input_ids) for k in span_1_tok_ids: span_1_mask[k] = 1 span_2_mask = [0] * len(input_ids) for k in span_2_tok_ids: span_2_mask[k] = 1 assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(span_1_mask) == max_seq_length assert len(span_2_mask) == max_seq_length if self.label is not None: label_id = int(self.label) else: label_id = None if print_example: logging.info("*** Example (%s) ***" % task) logging.info("guid: %s" % (self.guid)) logging.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logging.info("label: %s (id = %s)" % (str(self.label), str(label_id))) return InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, span_1_mask=span_1_mask, span_1_text=self.spans[0][1], span_2_mask=span_2_mask, span_2_text=self.spans[1][1], label_id=label_id) class COPAInputExample(InputExample): def __init__(self, guid, text_pre, text_choice_1, text_choice_2, question, label=None): super(COPAInputExample, self).__init__(guid=guid, text_hyp=None, text_pre=text_pre, label=label) self.text_choice_1 = text_choice_1 self.text_choice_2 = text_choice_2 self.question = question def featurize_example(self, tokenizer, max_seq_length=128, label_map=None, model_type="bert", print_example=False, task=None): def _featurize_example(text_a, text_b, text_c, cur_label=None, print_example=False): tokens_a = tokenizer.tokenize(text_a) tokens_b = tokenizer.tokenize(text_b) tokens_c = tokenizer.tokenize(text_c) special_tokens_count = 6 if model_type == "roberta" else 4 _truncate_seq_pair(tokens_a, tokens_c, max_seq_length - special_tokens_count - len(tokens_b)) tokens = tokens_a + [tokenizer.sep_token] if model_type == "roberta": tokens += [tokenizer.sep_token] segment_ids = [0] * len(tokens) tokens += tokens_b + [tokenizer.sep_token] segment_ids += [1] * (len(tokens_b) + 1) if model_type == "roberta": tokens += [tokenizer.sep_token] segment_ids += [1] tokens += tokens_c + [tokenizer.sep_token] segment_ids += [2] * (len(tokens_c) + 1) tokens = [tokenizer.cls_token] + tokens segment_ids = [0] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) padding_length = max_seq_length - len(input_ids) input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) input_mask = input_mask + [0] * padding_length segment_ids = segment_ids + [0] * padding_length label_id = float(cur_label) if cur_label is not None else None assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if print_example: logging.info("*** Example (COPA) ***") logging.info("guid: %s" % (self.guid)) logging.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logging.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logging.info("label: %s (id = %s)" % (str(cur_label), str(label_id))) return InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id) feat_ex_1 = _featurize_example(self.text_choice_1, self.question, self.text_pre, cur_label=int(self.label == 0), print_example=print_example) feat_ex_2 = _featurize_example(self.text_choice_2, self.question, self.text_pre, cur_label=int(self.label == 1), print_example=print_example) return feat_ex_1, feat_ex_2 class InputFeatures(object): def __init__(self, input_ids, input_mask, segment_ids, label_id, **kwargs): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.__dict__.update(kwargs) class SuperGlueDataset(Dataset): def __init__(self, task_loader, task_samples, tokenizer, max_seq_length, model_type="bert"): label_map = {label: i for i, label in enumerate(task_loader.get_labels())} label_map[None] = None features = [] self.all_guids = [] self.task_name = task_loader.task_name for (ex_index, example) in enumerate(task_samples): print_example = True if ex_index < 1 else False featurized_example = example.featurize_example(tokenizer, max_seq_length=max_seq_length, label_map=label_map, model_type=model_type, print_example=print_example, task=task_loader.task_name) if task_loader.task_name == "COPA": features.append(featurized_example[0]) features.append(featurized_example[1]) self.all_guids.append(str(example.guid) + "_0") self.all_guids.append(str(example.guid) + "_1") else: features.append(featurized_example) self.all_guids.append(str(example.guid)) self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) if task_loader.task_name in ["WSC", "WiC"]: self.all_span_1_mask = torch.tensor([f.span_1_mask for f in features], dtype=torch.long) self.all_span_1_text = [f.span_1_text for f in features] self.all_span_2_mask = torch.tensor([f.span_2_mask for f in features], dtype=torch.long) self.all_span_2_text = [f.span_2_text for f in features] if features[0].label_id is None: self.all_label_ids = torch.tensor([0 for _ in features], dtype=torch.long) elif task_loader.task_name == "COPA": self.all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) else: self.all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) def __len__(self): return len(self.all_guids) def __getitem__(self, index): item = (self.all_guids[index], self.all_input_ids[index], self.all_input_mask[index], self.all_segment_ids[index], self.all_label_ids[index]) if self.task_name in ["WSC", "WiC"]: item = item + (self.all_span_1_mask[index], self.all_span_1_text[index], self.all_span_2_mask[index], self.all_span_2_text[index]) return item def load_jsonl_raw(dataset, demo=False, set_type="train"): fname = "%s.jsonl" % set_type lines = [] for ln in open(os.path.join(DATA_PATH, dataset, fname)): lines.append(json.loads(ln)) if demo and len(lines) >= 500: break return lines def load_squadnli_raw(demo=False, set_type="train"): fname = "squadnli.%s.jsonl" % set_type lines = [] for ln in open(os.path.join(DATA_PATH + "../../squadnli/", fname)): cur_obj = json.loads(ln) context = cur_obj["context"] for item in cur_obj["statements"]: lines.append({"premise": context, "hypothesis": item[1], "label": item[-1], "id": item[0]}) if demo and len(lines) >= 100: break return lines class DataLoader(object): task_name = None def get_train_examples(self, demo=False): return self._create_examples(load_jsonl_raw(dataset=self.task_name, demo=demo, set_type="train")) def get_test_examples(self, demo=False, test_set="val"): return self._create_examples(load_jsonl_raw(dataset=self.task_name, demo=demo, set_type=test_set)) class SQuADNLILoader(DataLoader): task_name = "SQuADNLI" def get_train_examples(self, demo=False): return self._create_examples(load_squadnli_raw(demo=demo, set_type="train")) def get_test_examples(self, demo=False, test_set="val"): return self._create_examples(load_squadnli_raw(demo=demo, set_type=test_set)) def get_labels(self): return [False, True] def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text_hyp = line["hypothesis"] text_pre = line["premise"] if "label" in line: label = line["label"] else: label = None examples.append( DefaultInputExample(guid=line["id"], text_hyp=text_hyp, text_pre=text_pre, label=label) ) return examples class BoolQLoader(DataLoader): task_name = "BoolQ" def get_labels(self): return [False, True] def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text_hyp = line["question"] text_pre = line["passage"] if "label" in line: label = line["label"] else: label = None examples.append( DefaultInputExample(guid=line["idx"], text_hyp=text_hyp, text_pre=text_pre, label=label) ) return examples class BoolQNLILoader(BoolQLoader): task_name = "BoolQNLI" def get_train_examples(self, demo=False): return self._create_examples(load_jsonl_raw(dataset="BoolQNLI", demo=demo, set_type="train")) def get_test_examples(self, demo=False, test_set="val"): return self._create_examples(load_jsonl_raw(dataset="BoolQNLI", demo=demo, set_type=test_set)) def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text_hyp = line["question"] text_pre = line["passage"] if "label" in line: label = line["label"] else: label = None examples.append( DefaultInputExample(guid=str(i), text_hyp=text_hyp, text_pre=text_pre, label=label) ) return examples class RTELoader(DataLoader): task_name = "RTE" def get_train_examples(self, demo=False): return self._create_examples(load_jsonl_raw(dataset="RTE", demo=demo, set_type="train")) def get_test_examples(self, demo=False, test_set="val"): return self._create_examples(load_jsonl_raw(dataset="RTE", demo=demo, set_type=test_set)) def get_labels(self): return ["entailment", "not_entailment"] def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text_hyp = line["hypothesis"] text_pre = line["premise"] guid = line["idx"] if "label" in line: label = line["label"] else: label = None examples.append( DefaultInputExample(guid=guid, text_hyp=text_hyp, text_pre=text_pre, label=label) ) return examples class CBLoader(DataLoader): task_name = "CB" def get_train_examples(self, demo=False): return self._create_examples(load_jsonl_raw(dataset="CB", demo=demo, set_type="train")) def get_test_examples(self, demo=False, test_set="val"): return self._create_examples(load_jsonl_raw(dataset="CB", demo=demo, set_type=test_set)) def get_labels(selfself): return ["entailment", "neutral", "contradiction"] def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text_hyp = line["hypothesis"] text_pre = line["premise"] guid = line["idx"] if "label" in line: label = line["label"] else: label = None examples.append( DefaultInputExample(guid=guid, text_hyp=text_hyp, text_pre=text_pre, label=label) ) return examples class WSCLoader(DataLoader): task_name = "WSC" def get_labels(self): return [0] def _create_examples(self, lines): examples = [] for (i, line) in enumerate(lines): text = line["text"] span_1 = (line["target"]["span1_index"], line["target"]["span1_text"]) span_2 = (line["target"]["span2_index"], line["target"]["span2_text"]) if "label" in line: label = line["label"] else: label = None examples.append( WSCInputExample(guid=line["idx"], text=text, span_1=span_1, span_2=span_2, label=label) ) return examples class COPALoader(DataLoader): task_name = "COPA"
Apache License 2.0
seldonio/alibi
alibi/utils/lang_model.py
BertBaseUncased.__init__
python
def __init__(self, preloading: bool = True): super().__init__("bert-base-uncased", preloading)
Initialize BertBaseUncased. Parameters ---------- preloading See `LanguageModel` constructor.
https://github.com/seldonio/alibi/blob/ef757b9579f85ef2e3dfc7088211969616ee3fdb/alibi/utils/lang_model.py#L343-L352
import abc import numpy as np from pathlib import Path from typing import List, Optional, Tuple, Union import tensorflow as tf import transformers from transformers import TFAutoModelForMaskedLM, AutoTokenizer class LanguageModel(abc.ABC): SUBWORD_PREFIX = '' def __init__(self, model_path: str, preloading: bool = True): self.model_path = model_path self.model, self.caller, self.tokenizer = None, None, None if preloading: self.model = TFAutoModelForMaskedLM.from_pretrained(model_path) self.caller = tf.function(self.model.call, experimental_relax_shapes=True) self.tokenizer = AutoTokenizer.from_pretrained(model_path) def from_disk(self, path: Union[str, Path]): self.model = TFAutoModelForMaskedLM.from_pretrained(path, local_files_only=True) self.caller = tf.function(self.model.call, experimental_relax_shapes=True) self.tokenizer = AutoTokenizer.from_pretrained(path, local_files_only=True) def to_disk(self, path: Union[str, Path]): if self.model: self.model.save_pretrained(path) if self.tokenizer: self.tokenizer.save_pretrained(path) @abc.abstractmethod def is_subword_prefix(self, token: str) -> bool: pass def select_word(self, tokenized_text: List[str], start_idx: int, punctuation: str) -> str: end_idx = start_idx + 1 while end_idx < len(tokenized_text): if (not self.is_subword_prefix(tokenized_text[end_idx])) or self.is_punctuation(tokenized_text[end_idx], punctuation): break end_idx += 1 word = self.tokenizer.convert_tokens_to_string(tokenized_text[start_idx:end_idx]) return word def is_stop_word(self, tokenized_text: List[str], start_idx: int, punctuation: str, stopwords: Optional[List[str]]) -> bool: if not stopwords: return False if self.is_subword_prefix(tokenized_text[start_idx]): return False word = self.select_word(tokenized_text, start_idx=start_idx, punctuation=punctuation).strip() return word.lower() in stopwords def is_punctuation(self, token: str, punctuation: str) -> bool: token = token.replace(self.SUBWORD_PREFIX, '').strip() return all([c in punctuation for c in token]) @property @abc.abstractmethod def mask(self) -> str: pass @property def mask_id(self) -> int: return self.tokenizer.mask_token_id @property def max_num_tokens(self) -> int: return self.model.config.max_position_embeddings def head_tail_split(self, text: str) -> Tuple[str, str, List[str], List[str]]: text = text.strip() if len(text) == 0: raise ValueError("The text is empty.") tokens: List[str] = self.tokenizer.tokenize(text) if self.max_num_tokens == -1 or len(tokens) <= self.max_num_tokens: return text, '', tokens, [] head_num_tokens = self.max_num_tokens while (head_num_tokens > 0) and self.is_subword_prefix(tokens[head_num_tokens]): head_num_tokens -= 1 if head_num_tokens == 0: raise ValueError("Check the first word in the sentence. Seems it is a very long word") ids = self.tokenizer.convert_tokens_to_ids(tokens[:head_num_tokens]) head_text = self.tokenizer.decode(ids).strip() tail_text = '' if len(tokens) >= head_num_tokens: ids = self.tokenizer.convert_tokens_to_ids(tokens[head_num_tokens:]) tail_text = self.tokenizer.decode(ids).strip() return head_text, tail_text, tokens[:head_num_tokens], tokens[head_num_tokens:] def predict_batch_lm(self, x: transformers.tokenization_utils_base.BatchEncoding, vocab_size: int, batch_size: int) -> np.ndarray: n, m = x['input_ids'].shape y = np.zeros((n, m, vocab_size), dtype=np.float32) n_minibatch = int(np.ceil(n / batch_size)) for i in range(n_minibatch): istart, istop = i * batch_size, min((i + 1) * batch_size, n) x_batch = dict() if 'input_ids' in x.keys(): x_batch['input_ids'] = x['input_ids'][istart:istop] if 'token_type_ids' in x.keys(): x_batch['token_type_ids'] = x['token_type_ids'][istart:istop] if 'attention_mask' in x.keys(): x_batch['attention_mask'] = x['attention_mask'][istart:istop] y[istart:istop] = self.caller(**x_batch).logits.numpy() return y class DistilbertBaseUncased(LanguageModel): SUBWORD_PREFIX = '##' def __init__(self, preloading: bool = True): super().__init__("distilbert-base-uncased", preloading) @property def mask(self) -> str: return self.tokenizer.mask_token def is_subword_prefix(self, token: str) -> bool: return token.startswith(DistilbertBaseUncased.SUBWORD_PREFIX) class BertBaseUncased(LanguageModel): SUBWORD_PREFIX = '##'
Apache License 2.0
uok-psychology/django-questionnaire
questionnaire/forms.py
generate_radioselect_field
python
def generate_radioselect_field(): return ChoiceField(widget=RadioSelect,choices=[])
@return radioselect field no default set TODO: this isn't actually true, it returns a ChoiceField that has a RadioSelect widget
https://github.com/uok-psychology/django-questionnaire/blob/50c85328306533554e0f9c56794f54422cb12bc9/questionnaire/forms.py#L54-L58
from django import forms from django.forms.fields import CharField,BooleanField,ChoiceField,MultipleChoiceField, TypedChoiceField from django.forms.widgets import RadioSelect, CheckboxInput from questionnaire.models import AnswerSet def get_choices(question): choices_list = question.selectoptions if choices_list == None: return None choices= [(x,x) for x in choices_list] return choices def generate_charfield(): return CharField(max_length=100,widget=forms.TextInput(attrs={'size':'40'})) def generate_textfield(): return CharField(widget = forms.Textarea(attrs={'rows':'4','cols':'40',})) def generate_boolean_field(): return TypedChoiceField( choices=((1,'Yes'),(0,'No')), widget=forms.RadioSelect, coerce=int ) def generate_select_dropdown_field(): return ChoiceField(choices=[])
MIT License
vitorazor/lidar_rgb_detector
second/builder/dataset_builder.py
build
python
def build(input_reader_config, model_config, training, voxel_generator, target_assigner, multi_gpu=False): if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') prep_cfg = input_reader_config.preprocess dataset_cfg = input_reader_config.dataset num_point_features = model_config.num_point_features out_size_factor = get_downsample_factor(model_config) assert out_size_factor > 0 cfg = input_reader_config db_sampler_cfg = prep_cfg.database_sampler db_sampler = None if len(db_sampler_cfg.sample_groups) > 0 or db_sampler_cfg.database_info_path != "": db_sampler = dbsampler_builder.build(db_sampler_cfg) grid_size = voxel_generator.grid_size feature_map_size = grid_size[:2] // out_size_factor feature_map_size = [*feature_map_size, 1][::-1] print("feature_map_size", feature_map_size) assert all([n != '' for n in target_assigner.classes]), "you must specify class_name in anchor_generators." dataset_cls = get_dataset_class(dataset_cfg.dataset_class_name) assert dataset_cls.NumPointFeatures >= 3, "you must set this to correct value" assert dataset_cls.NumPointFeatures == num_point_features, "currently you need keep them same" prep_func = partial( prep_pointcloud, root_path=dataset_cfg.kitti_root_path, voxel_generator=voxel_generator, target_assigner=target_assigner, training=training, max_voxels=prep_cfg.max_number_of_voxels, remove_outside_points=False, remove_unknown=prep_cfg.remove_unknown_examples, create_targets=training, shuffle_points=prep_cfg.shuffle_points, gt_rotation_noise=list(prep_cfg.groundtruth_rotation_uniform_noise), gt_loc_noise_std=list(prep_cfg.groundtruth_localization_noise_std), global_rotation_noise=list(prep_cfg.global_rotation_uniform_noise), global_scaling_noise=list(prep_cfg.global_scaling_uniform_noise), global_random_rot_range=list( prep_cfg.global_random_rotation_range_per_object), global_translate_noise_std=list(prep_cfg.global_translate_noise_std), db_sampler=db_sampler, num_point_features=dataset_cls.NumPointFeatures, anchor_area_threshold=prep_cfg.anchor_area_threshold, gt_points_drop=prep_cfg.groundtruth_points_drop_percentage, gt_drop_max_keep=prep_cfg.groundtruth_drop_max_keep_points, remove_points_after_sample=prep_cfg.remove_points_after_sample, remove_environment=prep_cfg.remove_environment, use_group_id=prep_cfg.use_group_id, out_size_factor=out_size_factor, multi_gpu=multi_gpu, min_points_in_gt=prep_cfg.min_num_of_points_in_gt, random_flip_x=prep_cfg.random_flip_x, random_flip_y=prep_cfg.random_flip_y, sample_importance=prep_cfg.sample_importance) ret = target_assigner.generate_anchors(feature_map_size) class_names = target_assigner.classes anchors_dict = target_assigner.generate_anchors_dict(feature_map_size) anchors_list = [] for k, v in anchors_dict.items(): anchors_list.append(v["anchors"]) anchors = np.concatenate(anchors_list, axis=0) anchors = anchors.reshape([-1, target_assigner.box_ndim]) assert np.allclose(anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim)) matched_thresholds = ret["matched_thresholds"] unmatched_thresholds = ret["unmatched_thresholds"] anchors_bv = box_np_ops.rbbox2d_to_near_bbox( anchors[:, [0, 1, 3, 4, 6]]) anchor_cache = { "anchors": anchors, "anchors_bv": anchors_bv, "matched_thresholds": matched_thresholds, "unmatched_thresholds": unmatched_thresholds, "anchors_dict": anchors_dict, } prep_func = partial(prep_func, anchor_cache=anchor_cache) dataset = dataset_cls( info_path=dataset_cfg.kitti_info_path, root_path=dataset_cfg.kitti_root_path, class_names=class_names, prep_func=prep_func) return dataset
Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified.
https://github.com/vitorazor/lidar_rgb_detector/blob/5308ba24a90d6e8d73940be4b40d31eccb4df94b/second/builder/dataset_builder.py#L34-L135
from second.protos import input_reader_pb2 from second.data.dataset import get_dataset_class from second.data.preprocess import prep_pointcloud from second.core import box_np_ops import numpy as np from second.builder import dbsampler_builder from functools import partial from second.utils.config_tool import get_downsample_factor
MIT License
rapid7/vm-console-client-python
rapid7vmconsole/models/site.py
Site.risk_score
python
def risk_score(self, risk_score): self._risk_score = risk_score
Sets the risk_score of this Site. The risk score (with criticality adjustments) of the site. # noqa: E501 :param risk_score: The risk_score of this Site. # noqa: E501 :type: float
https://github.com/rapid7/vm-console-client-python/blob/55e1f573967bce27cc9a2d10c12a949b1142c2b3/rapid7vmconsole/models/site.py#L310-L319
import pprint import re import six class Site(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'assets': 'int', 'connection_type': 'str', 'description': 'str', 'id': 'int', 'importance': 'str', 'last_scan_time': 'str', 'links': 'list[Link]', 'name': 'str', 'risk_score': 'float', 'scan_engine': 'int', 'scan_template': 'str', 'type': 'str', 'vulnerabilities': 'Vulnerabilities' } attribute_map = { 'assets': 'assets', 'connection_type': 'connectionType', 'description': 'description', 'id': 'id', 'importance': 'importance', 'last_scan_time': 'lastScanTime', 'links': 'links', 'name': 'name', 'risk_score': 'riskScore', 'scan_engine': 'scanEngine', 'scan_template': 'scanTemplate', 'type': 'type', 'vulnerabilities': 'vulnerabilities' } def __init__(self, assets=None, connection_type=None, description=None, id=None, importance=None, last_scan_time=None, links=None, name=None, risk_score=None, scan_engine=None, scan_template=None, type=None, vulnerabilities=None): self._assets = None self._connection_type = None self._description = None self._id = None self._importance = None self._last_scan_time = None self._links = None self._name = None self._risk_score = None self._scan_engine = None self._scan_template = None self._type = None self._vulnerabilities = None self.discriminator = None if assets is not None: self.assets = assets if connection_type is not None: self.connection_type = connection_type if description is not None: self.description = description if id is not None: self.id = id if importance is not None: self.importance = importance if last_scan_time is not None: self.last_scan_time = last_scan_time if links is not None: self.links = links if name is not None: self.name = name if risk_score is not None: self.risk_score = risk_score if scan_engine is not None: self.scan_engine = scan_engine if scan_template is not None: self.scan_template = scan_template if type is not None: self.type = type if vulnerabilities is not None: self.vulnerabilities = vulnerabilities @property def assets(self): return self._assets @assets.setter def assets(self, assets): self._assets = assets @property def connection_type(self): return self._connection_type @connection_type.setter def connection_type(self, connection_type): allowed_values = ["activesync-ldap", "activesync-office365", "activesync-powershell", "aws", "dhcp", "sonar", "vsphere"] if connection_type not in allowed_values: raise ValueError( "Invalid value for `connection_type` ({0}), must be one of {1}" .format(connection_type, allowed_values) ) self._connection_type = connection_type @property def description(self): return self._description @description.setter def description(self, description): self._description = description @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def importance(self): return self._importance @importance.setter def importance(self, importance): self._importance = importance @property def last_scan_time(self): return self._last_scan_time @last_scan_time.setter def last_scan_time(self, last_scan_time): self._last_scan_time = last_scan_time @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def risk_score(self): return self._risk_score @risk_score.setter
MIT License
gadsbyfly/pybiomed
PyBioMed/PyMolecule/fingerprint.py
CalculateAtomPairsFingerprint
python
def CalculateAtomPairsFingerprint(mol): res = Pairs.GetAtomPairFingerprint(mol) return res.GetLength(), res.GetNonzeroElements(), res
################################################################# Calculate atom pairs fingerprints Usage: result=CalculateAtomPairsFingerprint(mol) Input: mol is a molecule object. Output: result is a tuple form. The first is the number of fingerprints. The second is a dict form whose keys are the position which this molecule has some substructure. The third is the DataStructs which is used for calculating the similarity. #################################################################
https://github.com/gadsbyfly/pybiomed/blob/8db017961390dbcdcb7060fec758b9b8b9fc604f/PyBioMed/PyMolecule/fingerprint.py#L219-L241
from openbabel import pybel from rdkit import Chem, DataStructs from rdkit.Chem import AllChem, ChemicalFeatures, MACCSkeys from rdkit.Chem.AtomPairs import Pairs, Torsions from rdkit.Chem.Fingerprints import FingerprintMols from rdkit.Chem.Pharm2D import Generate from rdkit.Chem.Pharm2D.SigFactory import SigFactory from PyBioMed.PyMolecule.estate import CalculateEstateFingerprint as EstateFingerprint from PyBioMed.PyMolecule.ghosecrippen import GhoseCrippenFingerprint from PyBioMed.PyMolecule.PubChemFingerprints import calcPubChemFingerAll Version = 1.0 similaritymeasure = [i[0] for i in DataStructs.similarityFunctions] def CalculateFP2Fingerprint(mol): res = {} NumFinger = 1024 temp = mol.calcfp().bits for i in temp: res.update({i: 1}) return NumFinger, res def CalculateFP3Fingerprint(mol): res = {} NumFinger = 210 temp = mol.calcfp("FP3").bits for i in temp: res.update({i: 1}) return NumFinger, res def CalculateFP4Fingerprint(mol): res = {} NumFinger = 307 temp = mol.calcfp("FP4").bits for i in temp: res.update({i: 1}) return NumFinger, res def CalculateDaylightFingerprint(mol): res = {} NumFinger = 2048 bv = FingerprintMols.FingerprintMol(mol) temp = tuple(bv.GetOnBits()) for i in temp: res.update({i: 1}) return NumFinger, res, bv def CalculateMACCSFingerprint(mol): res = {} NumFinger = 166 bv = MACCSkeys.GenMACCSKeys(mol) temp = tuple(bv.GetOnBits()) for i in temp: res.update({i: 1}) return NumFinger, res, bv def CalculateEstateFingerprint(mol): NumFinger = 79 res = {} temp = EstateFingerprint(mol) for i in temp: if temp[i] > 0: res[i[7:]] = 1 return NumFinger, res, temp
BSD 3-Clause New or Revised License
saketkc/pysradb
pysradb/utils.py
order_dataframe
python
def order_dataframe(df, columns): remaining_columns = [w for w in df.columns if w not in columns] df = df[columns + remaining_columns] return df
Order a dataframe Order a dataframe by moving the `columns` in the front Parameters ---------- df: Dataframe Dataframe columns: list List of columns that need to be put in front
https://github.com/saketkc/pysradb/blob/bce1726813a104ff83eb1221679bf93074252af6/pysradb/utils.py#L177-L191
import errno import gzip import io import ntpath import os import shlex import subprocess import urllib.request as urllib_request import warnings import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from tqdm.autonotebook import tqdm from .exceptions import IncorrectFieldException warnings.simplefilter(action="ignore", category=FutureWarning) tqdm.pandas() def path_leaf(path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def requests_3_retries(): session = requests.Session() retry = Retry( total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504], ) adapter = HTTPAdapter(max_retries=retry) session.mount("http://", adapter) session.mount("https://", adapter) return session def scientific_name_to_taxid(name): r = requests.get( "https://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/scientific-name/" + name, timeout=5, ) if r.status_code == 404: raise IncorrectFieldException(f"Unknown scientific name: {name}") r.raise_for_status() return r.json()[0]["taxId"] def unique(sequence): visited = set() return [x for x in sequence if not (x in visited or visited.add(x))] class TqdmUpTo(tqdm): def update_to(self, b=1, bsize=1, tsize=None): if tsize is not None: self.total = tsize self.update(b * bsize - self.n) def _extract_first_field(data): return list(next(iter(zip(*data)))) def _find_aspera_keypath(aspera_dir=None): if aspera_dir is None: aspera_dir = os.path.join(os.path.expanduser("~"), ".aspera") aspera_keypath = os.path.join( aspera_dir, "connect", "etc", "asperaweb_id_dsa.openssh" ) if os.path.isfile(aspera_keypath): return aspera_keypath def mkdir_p(path): if path: try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
BSD 3-Clause New or Revised License
napari/napari
napari/_qt/containers/_base_item_model.py
_BaseEventedItemModel._on_end_insert
python
def _on_end_insert(self, e): self.endInsertRows()
Must be called after insert operation to update model.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/_qt/containers/_base_item_model.py#L244-L246
from __future__ import annotations from collections.abc import MutableSequence from typing import TYPE_CHECKING, Any, Generic, Tuple, TypeVar, Union from qtpy.QtCore import QAbstractItemModel, QModelIndex, Qt from ...utils.events import disconnect_events from ...utils.events.containers import SelectableEventedList from ...utils.translations import trans if TYPE_CHECKING: from qtpy.QtWidgets import QWidget ItemType = TypeVar("ItemType") ItemRole = Qt.UserRole SortRole = Qt.UserRole + 1 class _BaseEventedItemModel(QAbstractItemModel, Generic[ItemType]): _root: SelectableEventedList[ItemType] def __init__( self, root: SelectableEventedList[ItemType], parent: QWidget = None ): super().__init__(parent=parent) self.setRoot(root) def parent(self, index): return QModelIndex() def data(self, index: QModelIndex, role: Qt.ItemDataRole) -> Any: if role == Qt.DisplayRole: return str(self.getItem(index)) if role == ItemRole: return self.getItem(index) if role == SortRole: return index.row() return None def flags(self, index: QModelIndex) -> Qt.ItemFlags: if ( not index.isValid() or index.row() >= len(self._root) or index.model() is not self ): return Qt.ItemIsDropEnabled base_flags = ( Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled | Qt.ItemIsEnabled ) if isinstance(self.getItem(index), MutableSequence): return base_flags | Qt.ItemIsDropEnabled return base_flags | Qt.ItemNeverHasChildren def columnCount(self, parent: QModelIndex) -> int: return 1 def rowCount(self, parent: QModelIndex = QModelIndex()) -> int: try: return len(self.getItem(parent)) except TypeError: return 0 def index( self, row: int, column: int = 0, parent: QModelIndex = QModelIndex() ) -> QModelIndex: return ( self.createIndex(row, column, self.getItem(parent)[row]) if self.hasIndex(row, column, parent) else QModelIndex() ) def supportedDropActions(self) -> Qt.DropActions: return Qt.MoveAction def setRoot(self, root: SelectableEventedList[ItemType]): if not isinstance(root, SelectableEventedList): raise TypeError( trans._( "root must be an instance of {class_name}", deferred=True, class_name=SelectableEventedList, ) ) current_root = getattr(self, "_root", None) if root is current_root: return if current_root is not None: disconnect_events(self._root.events, self) self._root = root self._root.events.removing.connect(self._on_begin_removing) self._root.events.removed.connect(self._on_end_remove) self._root.events.inserting.connect(self._on_begin_inserting) self._root.events.inserted.connect(self._on_end_insert) self._root.events.moving.connect(self._on_begin_moving) self._root.events.moved.connect(self._on_end_move) self._root.events.connect(self._process_event) def _split_nested_index( self, nested_index: Union[int, Tuple[int, ...]] ) -> Tuple[QModelIndex, int]: if isinstance(nested_index, int): return QModelIndex(), nested_index par = QModelIndex() *_p, idx = nested_index for i in _p: par = self.index(i, 0, par) return par, idx def _on_begin_inserting(self, event): par, idx = self._split_nested_index(event.index) self.beginInsertRows(par, idx, idx)
BSD 3-Clause New or Revised License
compas-dev/compas
src/compas/data/data.py
Data.from_json
python
def from_json(cls, filepath): data = compas.json_load(filepath) return cls.from_data(data)
Construct an object from serialized data contained in a JSON file. Parameters ---------- filepath : path string, file-like object or URL string The path, file or URL to the file for serialization. Returns ------- :class:`compas.data.Data` An object of the type of ``cls``.
https://github.com/compas-dev/compas/blob/d795a8bfe9f21ffa124d09e37e9c0ed2e3520057/src/compas/data/data.py#L170-L184
from __future__ import print_function from __future__ import absolute_import from __future__ import division import os import json from uuid import uuid4 from copy import deepcopy import compas from compas.data.encoders import DataEncoder from compas.data.encoders import DataDecoder class Data(object): def __init__(self, name=None): self._guid = None self._name = None self._jsondefinitions = None self._JSONSCHEMA = None self._jsonvalidator = None if name: self.name = name def __getstate__(self): return {'__dict__': self.__dict__, 'dtype': self.dtype, 'data': self.data} def __setstate__(self, state): self.__dict__.update(state['__dict__']) self.data = state['data'] @property def DATASCHEMA(self): raise NotImplementedError @property def JSONSCHEMANAME(self): raise NotImplementedError @property def JSONSCHEMA(self): if not self._JSONSCHEMA: schema_filename = '{}.json'.format(self.JSONSCHEMANAME.lower()) schema_path = os.path.join(os.path.dirname(__file__), 'schemas', schema_filename) with open(schema_path, 'r') as fp: self._JSONSCHEMA = json.load(fp) return self._JSONSCHEMA @property def jsondefinitions(self): if not self._jsondefinitions: schema_path = os.path.join(os.path.dirname(__file__), 'schemas', 'compas.json') with open(schema_path, 'r') as fp: self._jsondefinitions = json.load(fp) return self._jsondefinitions @property def jsonvalidator(self): if not self._jsonvalidator: from jsonschema import RefResolver, Draft7Validator resolver = RefResolver.from_schema(self.jsondefinitions) self._jsonvalidator = Draft7Validator(self.JSONSCHEMA, resolver=resolver) return self._jsonvalidator @property def dtype(self): return '{}/{}'.format('.'.join(self.__class__.__module__.split('.')[:2]), self.__class__.__name__) @property def data(self): raise NotImplementedError @data.setter def data(self, data): raise NotImplementedError @property def jsonstring(self): return compas.json_dumps(self.data) @property def guid(self): if not self._guid: self._guid = uuid4() return self._guid @property def name(self): if not self._name: self._name = self.__class__.__name__ return self._name @name.setter def name(self, name): self._name = name @classmethod def from_data(cls, data): obj = cls() obj.data = data return obj def to_data(self): return self.data @classmethod
MIT License
gaa-uam/scikit-fda
skfda/exploratory/visualization/_utils.py
_get_axes_shape
python
def _get_axes_shape( n_axes: int, n_rows: Optional[int] = None, n_cols: Optional[int] = None, ) -> Tuple[int, int]: if ( (n_rows is not None and n_cols is not None) and ((n_rows * n_cols) < n_axes) ): raise ValueError( f"The number of rows ({n_rows}) multiplied by " f"the number of columns ({n_cols}) " f"is less than the number of required " f"axes ({n_axes})", ) if n_rows is None and n_cols is None: new_n_cols = int(math.ceil(math.sqrt(n_axes))) new_n_rows = int(math.ceil(n_axes / new_n_cols)) elif n_rows is None and n_cols is not None: new_n_cols = n_cols new_n_rows = int(math.ceil(n_axes / n_cols)) elif n_cols is None and n_rows is not None: new_n_cols = int(math.ceil(n_axes / n_rows)) new_n_rows = n_rows return new_n_rows, new_n_cols
Get the number of rows and columns of the subplots.
https://github.com/gaa-uam/scikit-fda/blob/1a6fc2c01e39871e09fd2ec6d0b14d378d6b069f/skfda/exploratory/visualization/_utils.py#L113-L140
import io import math import re from itertools import repeat from typing import Optional, Sequence, Tuple, TypeVar, Union import matplotlib.backends.backend_svg import matplotlib.pyplot as plt from matplotlib.axes import Axes from matplotlib.figure import Figure from typing_extensions import Protocol from ...representation._functional_data import FData non_close_text = '[^>]*?' svg_width_regex = re.compile( f'(<svg {non_close_text}width="){non_close_text}("{non_close_text}>)', ) svg_width_replacement = r'\g<1>100%\g<2>' svg_height_regex = re.compile( f'(<svg {non_close_text})height="{non_close_text}"({non_close_text}>)', ) svg_height_replacement = r'\g<1>\g<2>' ColorLike = Union[ Tuple[float, float, float], Tuple[float, float, float, float], str, Sequence[float], ] K = TypeVar('K', contravariant=True) V = TypeVar('V', covariant=True) class Indexable(Protocol[K, V]): def __getitem__(self, __key: K) -> V: pass def __len__(self) -> int: pass def _create_figure() -> Figure: return plt.figure() def _figure_to_svg(figure: Figure) -> str: old_canvas = figure.canvas matplotlib.backends.backend_svg.FigureCanvas(figure) output = io.BytesIO() figure.savefig(output, format='svg') figure.set_canvas(old_canvas) data = output.getvalue() decoded_data = data.decode('utf-8') new_data = svg_width_regex.sub( svg_width_replacement, decoded_data, count=1, ) return svg_height_regex.sub( svg_height_replacement, new_data, count=1, ) def _get_figure_and_axes( chart: Union[Figure, Axes, Sequence[Axes], None] = None, fig: Optional[Figure] = None, axes: Union[Axes, Sequence[Axes], None] = None, ) -> Tuple[Figure, Sequence[Axes]]: num_defined = sum(e is not None for e in (chart, fig, axes)) if num_defined > 1: raise ValueError( "Only one of chart, fig and axes parameters" "can be passed as an argument.", ) if chart is not None: if isinstance(chart, matplotlib.figure.Figure): fig = chart else: axes = chart if fig is None and axes is None: new_fig = _create_figure() new_axes = [] elif fig is not None: new_fig = fig new_axes = fig.axes else: assert axes is not None if isinstance(axes, Axes): axes = [axes] new_fig = axes[0].figure new_axes = axes return new_fig, new_axes
BSD 3-Clause New or Revised License
thecesrom/ignition
src/system/nav.py
openWindow
python
def openWindow(path, params=None): print(path, params) return FPMIWindow("Opened Window")
Opens the window with the given path. If the window is already open, brings it to the front. The optional params dictionary contains key:value pairs which will be used to set the target window's root container's dynamic variables. Args: path (str): The path to the window to open. params (dict): A dictionary of parameters to pass into the window. The keys in the dictionary must match dynamic property names on the target window's root container. The values for each key will be used to set those properties. Optional. Returns: FPMIWindow: A reference to the opened window.
https://github.com/thecesrom/ignition/blob/c784e573530a217f4c430bd110889ce569152747/src/system/nav.py#L135-L154
from __future__ import print_function __all__ = [ "centerWindow", "closeParentWindow", "closeWindow", "desktop", "getCurrentWindow", "goBack", "goForward", "goHome", "openWindow", "openWindowInstance", "swapTo", "swapWindow", ] from com.inductiveautomation.factorypmi.application import FPMIWindow from com.inductiveautomation.factorypmi.application.script.builtin import ( NavUtilities, ) from java.util import EventObject def centerWindow(arg): print(arg) def closeParentWindow(event): print(event) def closeWindow(arg): print(arg) def desktop(handle="primary"): print(handle) return NavUtilities() def getCurrentWindow(): return "Path/To/Maximized Window" def goBack(): return FPMIWindow("Back") def goForward(): return FPMIWindow("Forward") def goHome(): return FPMIWindow("Home")
MIT License
jdasoftwaregroup/kartothek
kartothek/io/eager.py
delete_dataset
python
def delete_dataset(dataset_uuid=None, store=None, factory=None): ds_factory = _ensure_factory( dataset_uuid=dataset_uuid, load_schema=False, store=store, factory=factory, load_dataset_metadata=False, ) garbage_collect_dataset(factory=ds_factory) delete_indices(dataset_factory=ds_factory) for metapartition in dispatch_metapartitions_from_factory(ds_factory): metapartition = cast(MetaPartition, metapartition) metapartition.delete_from_store(dataset_uuid=dataset_uuid, store=store) delete_common_metadata(dataset_factory=ds_factory) delete_top_level_metadata(dataset_factory=ds_factory)
Delete the entire dataset from the store. Parameters ----------
https://github.com/jdasoftwaregroup/kartothek/blob/6bc7e868435e98cbda0b695900f29d1ff7d49110/kartothek/io/eager.py#L71-L101
import warnings from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast import pandas as pd from simplekv import KeyValueStore from kartothek.core.common_metadata import ( empty_dataframe_from_schema, make_meta, store_schema_metadata, ) from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBuilder from kartothek.core.docs import default_docs from kartothek.core.factory import DatasetFactory, _ensure_factory from kartothek.core.naming import ( DEFAULT_METADATA_STORAGE_FORMAT, DEFAULT_METADATA_VERSION, METADATA_BASE_SUFFIX, METADATA_FORMAT_JSON, PARQUET_FILE_SUFFIX, get_partition_file_prefix, ) from kartothek.core.typing import StoreInput from kartothek.core.utils import lazy_store from kartothek.io.iter import store_dataframes_as_dataset__iter from kartothek.io_components.delete import ( delete_common_metadata, delete_indices, delete_top_level_metadata, ) from kartothek.io_components.gc import delete_files, dispatch_files_to_gc from kartothek.io_components.index import update_indices_from_partitions from kartothek.io_components.metapartition import ( SINGLE_TABLE, MetaPartition, parse_input_to_metapartition, ) from kartothek.io_components.read import dispatch_metapartitions_from_factory from kartothek.io_components.update import update_dataset_from_partitions from kartothek.io_components.utils import ( _ensure_compatible_indices, align_categories, normalize_args, sort_values_categorical, validate_partition_keys, ) from kartothek.io_components.write import raise_if_dataset_exists from kartothek.serialization import DataFrameSerializer from kartothek.serialization._parquet import ParquetSerializer from kartothek.utils.ktk_adapters import get_dataset_keys from kartothek.utils.store import copy_rename_keys __all__ = ( "delete_dataset", "read_dataset_as_dataframes", "read_table", "commit_dataset", "store_dataframes_as_dataset", "create_empty_dataset_header", "write_single_partition", "update_dataset_from_dataframes", "build_dataset_indices", "garbage_collect_dataset", "copy_dataset", ) @default_docs @normalize_args
MIT License
asdf-format/asdf
asdf/util.py
BinaryStruct.pack
python
def pack(self, **kwargs): fields = [0] * len(self._names) for key, val in kwargs.items(): if key not in self._offsets: raise KeyError("No header field '{0}'".format(key)) i = self._names.index(key) fields[i] = val return struct.pack(self._fmt, *fields)
Pack the given arguments, which are given as kwargs, and return the binary struct.
https://github.com/asdf-format/asdf/blob/ee34d21e2d0e8834128716cc72fd47f31856d00e/asdf/util.py#L171-L182
import enum import inspect import math import struct import types import importlib.util import re from functools import lru_cache from urllib.request import pathname2url import numpy as np from . import constants urllib_parse_spec = importlib.util.find_spec('urllib.parse') patched_urllib_parse = importlib.util.module_from_spec(urllib_parse_spec) urllib_parse_spec.loader.exec_module(patched_urllib_parse) del urllib_parse_spec patched_urllib_parse.uses_relative.append('asdf') patched_urllib_parse.uses_netloc.append('asdf') __all__ = ['human_list', 'get_array_base', 'get_base_uri', 'filepath_to_url', 'iter_subclasses', 'calculate_padding', 'resolve_name', 'NotSet', 'is_primitive', 'uri_match', 'get_class_name'] def human_list(l, separator="and"): if len(l) == 1: return l[0] else: return ', '.join(l[:-1]) + ' ' + separator + ' ' + l[-1] def get_array_base(arr): base = arr while isinstance(base.base, np.ndarray): base = base.base return base def get_base_uri(uri): parts = patched_urllib_parse.urlparse(uri) return patched_urllib_parse.urlunparse(list(parts[:5]) + ['']) def filepath_to_url(path): return patched_urllib_parse.urljoin('file:', pathname2url(path)) def iter_subclasses(cls): for x in cls.__subclasses__(): yield x for y in iter_subclasses(x): yield y def calculate_padding(content_size, pad_blocks, block_size): if not pad_blocks: return 0 if pad_blocks is True: pad_blocks = 1.1 new_size = content_size * pad_blocks new_size = int((math.ceil( float(new_size) / block_size) + 1) * block_size) return max(new_size - content_size, 0) class BinaryStruct: def __init__(self, descr, endian='>'): self._fmt = [endian] self._offsets = {} self._names = [] i = 0 for name, fmt in descr: self._fmt.append(fmt) self._offsets[name] = (i, (endian + fmt).encode('ascii')) self._names.append(name) i += struct.calcsize(fmt.encode('ascii')) self._fmt = ''.join(self._fmt).encode('ascii') self._size = struct.calcsize(self._fmt) @property def size(self): return self._size
BSD 3-Clause New or Revised License
jahjajaka/afternoon_cleaner
object_detection/dataset_tools/create_coco_tf_record.py
create_tf_example
python
def create_tf_example(image, annotations_list, image_dir, category_index, include_masks=False): image_height = image['height'] image_width = image['width'] filename = image['file_name'] image_id = image['id'] full_path = os.path.join(image_dir, filename) with tf.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = PIL.Image.open(encoded_jpg_io) key = hashlib.sha256(encoded_jpg).hexdigest() xmin = [] xmax = [] ymin = [] ymax = [] is_crowd = [] category_names = [] category_ids = [] area = [] encoded_mask_png = [] num_annotations_skipped = 0 for object_annotations in annotations_list: (x, y, width, height) = tuple(object_annotations['bbox']) if width <= 0 or height <= 0: num_annotations_skipped += 1 continue if x + width > image_width or y + height > image_height: num_annotations_skipped += 1 continue xmin.append(float(x) / image_width) xmax.append(float(x + width) / image_width) ymin.append(float(y) / image_height) ymax.append(float(y + height) / image_height) is_crowd.append(object_annotations['iscrowd']) category_id = int(object_annotations['category_id']) category_ids.append(category_id) category_names.append(category_index[category_id]['name'].encode('utf8')) area.append(object_annotations['area']) if include_masks: run_len_encoding = mask.frPyObjects(object_annotations['segmentation'], image_height, image_width) binary_mask = mask.decode(run_len_encoding) if not object_annotations['iscrowd']: binary_mask = np.amax(binary_mask, axis=2) pil_image = PIL.Image.fromarray(binary_mask) output_io = io.BytesIO() pil_image.save(output_io, format='PNG') encoded_mask_png.append(output_io.getvalue()) feature_dict = { 'image/height': dataset_util.int64_feature(image_height), 'image/width': dataset_util.int64_feature(image_width), 'image/filename': dataset_util.bytes_feature(filename.encode('utf8')), 'image/source_id': dataset_util.bytes_feature(str(image_id).encode('utf8')), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(category_names), 'image/object/is_crowd': dataset_util.int64_list_feature(is_crowd), 'image/object/area': dataset_util.float_list_feature(area), } if include_masks: feature_dict['image/object/mask'] = ( dataset_util.bytes_list_feature(encoded_mask_png)) example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) return key, example, num_annotations_skipped
Converts image and annotations to a tf.Example proto. Args: image: dict with keys: [u'license', u'file_name', u'coco_url', u'height', u'width', u'date_captured', u'flickr_url', u'id'] annotations_list: list of dicts with keys: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box coordinates in the official COCO dataset are given as [x, y, width, height] tuples using absolute coordinates where x, y represent the top-left (0-indexed) corner. This function converts to the format expected by the Tensorflow Object Detection API (which is which is [ymin, xmin, ymax, xmax] with coordinates normalized relative to image size). image_dir: directory containing the image files. category_index: a dict containing COCO category information keyed by the 'id' field of each category. See the label_map_util.create_category_index function. include_masks: Whether to include instance segmentations masks (PNG encoded) in the result. default: False. Returns: example: The converted tf.Example num_annotations_skipped: Number of (invalid) annotations that were ignored. Raises: ValueError: if the image pointed to by data['filename'] is not a valid JPEG
https://github.com/jahjajaka/afternoon_cleaner/blob/590bdf58a216cbc6cfc47ef8f49d7af3df3703b7/object_detection/dataset_tools/create_coco_tf_record.py#L73-L191
from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import io import json import os import contextlib2 import numpy as np import PIL.Image from pycocotools import mask import tensorflow as tf from object_detection.dataset_tools import tf_record_creation_util from object_detection.utils import dataset_util from object_detection.utils import label_map_util flags = tf.app.flags tf.flags.DEFINE_boolean('include_masks', False, 'Whether to include instance segmentations masks ' '(PNG encoded) in the result. default: False.') tf.flags.DEFINE_string('train_image_dir', '', 'Training image directory.') tf.flags.DEFINE_string('val_image_dir', '', 'Validation image directory.') tf.flags.DEFINE_string('test_image_dir', '', 'Test image directory.') tf.flags.DEFINE_string('train_annotations_file', '', 'Training annotations JSON file.') tf.flags.DEFINE_string('val_annotations_file', '', 'Validation annotations JSON file.') tf.flags.DEFINE_string('testdev_annotations_file', '', 'Test-dev annotations JSON file.') tf.flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.') FLAGS = flags.FLAGS tf.logging.set_verbosity(tf.logging.INFO)
MIT License
edinburghnlp/nematus
nematus/server_translator.py
Translator._load_model_options
python
def _load_model_options(self): self._options = [] for model in self._models: config = load_config_from_json_file(model) setattr(config, 'reload', model) self._options.append(config) _, _, _, self._num_to_target = util.load_dictionaries(self._options[0])
Loads config options for each model.
https://github.com/edinburghnlp/nematus/blob/d55074a2e342a33a4d5b0288cbad6269bd47271d/nematus/server_translator.py#L63-L74
import logging import sys import time from multiprocessing import Process, Queue from collections import defaultdict from queue import Empty import numpy from beam_search_sampler import BeamSearchSampler from config import load_config_from_json_file import exception import model_loader import rnn_model from transformer import Transformer as TransformerModel import translate_utils import util class Translation(object): def __init__(self, source_words, target_words, sentence_id=None, score=0, hypothesis_id=None): self.source_words = source_words self.target_words = target_words self.sentence_id = sentence_id self.score = score self.hypothesis_id = hypothesis_id class QueueItem(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) class Translator(object): def __init__(self, settings): self._models = settings.models self._num_processes = settings.num_processes self._verbose = settings.verbose self._retrieved_translations = defaultdict(dict) self._batch_size = settings.minibatch_size self._load_model_options() self._init_queues() self._init_processes()
BSD 3-Clause New or Revised License
webrecorder/wacz-format
py-wacz/wacz/util.py
get_py_wacz_version
python
def get_py_wacz_version(): return pkg_resources.get_distribution("wacz").version
Get version of the py-wacz package
https://github.com/webrecorder/wacz-format/blob/80083019bb0cbac645df356ff2d223e37671abe7/py-wacz/wacz/util.py#L30-L32
import hashlib, datetime, json from warcio.timeutils import iso_date_to_timestamp import pkg_resources WACZ_VERSION = "1.1.1" def check_http_and_https(url, ts, pages_dict): url_body = url.split(":")[1] checks = [ f"http:{url_body}", f"https:{url_body}", f"{ts}/http:{url_body}", f"{ts}/https:{url_body}", ] for check in checks: if check in pages_dict: return check return ""
MIT License
hsuxu/magic-vnet
magic_vnet/blocks/squeeze_excitation.py
ChannelSELayer3D.__init__
python
def __init__(self, num_channels, reduction_ratio=2, act_type=nn.ReLU): super(ChannelSELayer3D, self).__init__() self.avg_pool = nn.AdaptiveAvgPool3d(1) num_channels_reduced = num_channels // reduction_ratio self.reduction_ratio = reduction_ratio self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True) self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True) self.act = act_type() self.sigmoid = nn.Sigmoid()
:param num_channels: No of input channels :param reduction_ratio: By how much should the num_channels should be reduced
https://github.com/hsuxu/magic-vnet/blob/6958932f3974d268e93bd6443369a3f43c497ed3/magic_vnet/blocks/squeeze_excitation.py#L13-L25
import torch from torch import nn from torch.nn import functional as F class ChannelSELayer3D(nn.Module):
MIT License
lbtcio/lbtc-lightwallet-server
wallet/bip32.py
PrivKey._privkey_secret_exponent
python
def _privkey_secret_exponent(cls, privkey): if not isinstance(privkey, (bytes, bytearray)): raise TypeError('privkey must be raw bytes') if len(privkey) != 32: raise ValueError('privkey must be 32 bytes') exponent = bytes_to_int(privkey) if not 1 <= exponent < cls.CURVE.order: raise ValueError('privkey represents an invalid exponent') return exponent
Return the private key as a secret exponent if it is a valid private key.
https://github.com/lbtcio/lbtc-lightwallet-server/blob/4fe64576fb0c45c41cbf72de2390d23ebebfc9c3/wallet/bip32.py#L189-L200
import struct import ecdsa import ecdsa.ellipticcurve as EC import ecdsa.numbertheory as NT from lib.coins import Coin from lib.hash import Base58, hmac_sha512, hash160 from lib.util import cachedproperty, bytes_to_int, int_to_bytes class DerivationError(Exception): class _KeyBase(object): CURVE = ecdsa.SECP256k1 def __init__(self, chain_code, n, depth, parent): if not isinstance(chain_code, (bytes, bytearray)): raise TypeError('chain code must be raw bytes') if len(chain_code) != 32: raise ValueError('invalid chain code') if not 0 <= n < 1 << 32: raise ValueError('invalid child number') if not 0 <= depth < 256: raise ValueError('invalid depth') if parent is not None: if not isinstance(parent, type(self)): raise TypeError('parent key has bad type') self.chain_code = chain_code self.n = n self.depth = depth self.parent = parent def _hmac_sha512(self, msg): hmac = hmac_sha512(self.chain_code, msg) return hmac[:32], hmac[32:] def _extended_key(self, ver_bytes, raw_serkey): if not isinstance(ver_bytes, (bytes, bytearray)): raise TypeError('ver_bytes must be raw bytes') if len(ver_bytes) != 4: raise ValueError('ver_bytes must have length 4') if not isinstance(raw_serkey, (bytes, bytearray)): raise TypeError('raw_serkey must be raw bytes') if len(raw_serkey) != 33: raise ValueError('raw_serkey must have length 33') return (ver_bytes + bytes([self.depth]) + self.parent_fingerprint() + struct.pack('>I', self.n) + self.chain_code + raw_serkey) def fingerprint(self): return self.identifier()[:4] def parent_fingerprint(self): return self.parent.fingerprint() if self.parent else bytes(4) def extended_key_string(self, coin): return Base58.encode_check(self.extended_key(coin)) class PubKey(_KeyBase): def __init__(self, pubkey, chain_code, n, depth, parent=None): super().__init__(chain_code, n, depth, parent) if isinstance(pubkey, ecdsa.VerifyingKey): self.verifying_key = pubkey else: self.verifying_key = self._verifying_key_from_pubkey(pubkey) self.addresses = {} @classmethod def _verifying_key_from_pubkey(cls, pubkey): if not isinstance(pubkey, (bytes, bytearray)): raise TypeError('pubkey must be raw bytes') if len(pubkey) != 33: raise ValueError('pubkey must be 33 bytes') if pubkey[0] not in (2, 3): raise ValueError('invalid pubkey prefix byte') curve = cls.CURVE.curve is_odd = pubkey[0] == 3 x = bytes_to_int(pubkey[1:]) a, b, p = curve.a(), curve.b(), curve.p() y2 = pow(x, 3, p) + b assert a == 0 y = NT.square_root_mod_prime(y2 % p, p) if bool(y & 1) != is_odd: y = p - y point = EC.Point(curve, x, y) return ecdsa.VerifyingKey.from_public_point(point, curve=cls.CURVE) @cachedproperty def pubkey_bytes(self): point = self.verifying_key.pubkey.point prefix = bytes([2 + (point.y() & 1)]) padded_bytes = _exponent_to_bytes(point.x()) return prefix + padded_bytes def address(self, coin): address = self.addresses.get(coin) if not address: address = coin.P2PKH_address_from_pubkey(self.pubkey_bytes) self.addresses[coin] = address return address def ec_point(self): return self.verifying_key.pubkey.point def child(self, n): if not 0 <= n < (1 << 31): raise ValueError('invalid BIP32 public key child number') msg = self.pubkey_bytes + struct.pack('>I', n) L, R = self._hmac_sha512(msg) curve = self.CURVE L = bytes_to_int(L) if L >= curve.order: raise DerivationError point = curve.generator * L + self.ec_point() if point == EC.INFINITY: raise DerivationError verkey = ecdsa.VerifyingKey.from_public_point(point, curve=curve) return PubKey(verkey, R, n, self.depth + 1, self) def identifier(self): return hash160(self.pubkey_bytes) def extended_key(self, coin): return self._extended_key(coin.XPUB_VERBYTES, self.pubkey_bytes) class PrivKey(_KeyBase): HARDENED = 1 << 31 def __init__(self, privkey, chain_code, n, depth, parent=None): super().__init__(chain_code, n, depth, parent) if isinstance(privkey, ecdsa.SigningKey): self.signing_key = privkey else: self.signing_key = self._signing_key_from_privkey(privkey) @classmethod def _signing_key_from_privkey(cls, privkey): exponent = cls._privkey_secret_exponent(privkey) return ecdsa.SigningKey.from_secret_exponent(exponent, curve=cls.CURVE) @classmethod
MIT License
apache/cassandra-dtest
upgrade_tests/upgrade_manifest.py
VersionMeta.clone_with_local_env_version
python
def clone_with_local_env_version(self): cassandra_dir, cassandra_version = cassandra_dir_and_version(CONFIG) if cassandra_version: return self._replace(version=cassandra_version) return self._replace(version="clone:{}".format(cassandra_dir))
Returns a new object cloned from this one, with the version replaced with the local env version.
https://github.com/apache/cassandra-dtest/blob/7c3333958e2b2bd53018a50ab1f529e1a2cca173/upgrade_tests/upgrade_manifest.py#L144-L151
import logging from collections import namedtuple from dtest import RUN_STATIC_UPGRADE_MATRIX from conftest import cassandra_dir_and_version import ccmlib.repository from ccmlib.common import get_version_from_build from enum import Enum logger = logging.getLogger(__name__) UpgradePath = namedtuple('UpgradePath', ('name', 'starting_version', 'upgrade_version', 'starting_meta', 'upgrade_meta')) VERSION_FAMILY = None CONFIG = None CASSANDRA_2_0 = '2.0' CASSANDRA_2_1 = '2.1' CASSANDRA_2_2 = '2.2' CASSANDRA_3_0 = '3.0' CASSANDRA_3_11 = '3.11' CASSANDRA_4_0 = '4.0' CASSANDRA_4_0_X = '4.0.1' CASSANDRA_4_1 = '4.1' TRUNK = CASSANDRA_4_1 def is_same_family_current_to_indev(origin, destination): return origin.family == destination.family and origin.variant == "current" and destination.variant == "indev" class VersionSelectionStrategies(Enum): ALL=(lambda origin, destination: True,) BOTH=(lambda origin, destination: (origin.variant == destination.variant) or is_same_family_current_to_indev(origin,destination),) INDEV=(lambda origin, destination: origin.variant == 'indev' and destination.variant == 'indev' or is_same_family_current_to_indev(origin, destination),) RELEASES=(lambda origin, destination: not VersionSelectionStrategies.INDEV.value[0](origin, destination) or is_same_family_current_to_indev(origin, destination),) def set_config(config): global CONFIG CONFIG = config set_version_family() def set_version_family(): cassandra_version_slug = CONFIG.getoption("--cassandra-version") cassandra_dir = CONFIG.getoption("--cassandra-dir") or CONFIG.getini("cassandra_dir") if cassandra_version_slug: ccm_repo_cache_dir, _ = ccmlib.repository.setup(cassandra_version_slug) current_version = get_version_from_build(ccm_repo_cache_dir) else: current_version = get_version_from_build(cassandra_dir) if current_version.vstring.startswith('2.0'): version_family = CASSANDRA_2_0 elif current_version.vstring.startswith('2.1'): version_family = CASSANDRA_2_1 elif current_version.vstring.startswith('2.2'): version_family = CASSANDRA_2_2 elif current_version.vstring.startswith('3.0'): version_family = CASSANDRA_3_0 elif current_version.vstring.startswith('3.11'): version_family = CASSANDRA_3_11 elif current_version.vstring.startswith('4.0.0') or current_version.vstring.startswith('4.0-'): version_family = CASSANDRA_4_0 elif current_version.vstring.startswith('4.0'): version_family = CASSANDRA_4_0_X elif current_version.vstring.startswith('4.1'): version_family = CASSANDRA_4_1 else: raise RuntimeError("Testing upgrades from/to version %s is not supported. Please use a custom manifest (see upgrade_manifest.py)" % current_version.vstring) global VERSION_FAMILY VERSION_FAMILY = version_family logger.info("Setting version family to %s\n" % VERSION_FAMILY) class VersionMeta(namedtuple('_VersionMeta', ('name', 'family', 'variant', 'version', 'min_proto_v', 'max_proto_v', 'java_versions'))): @property def java_version(self): return max(self.java_versions) @property def matches_current_env_version_family(self): return self.family == VERSION_FAMILY @property def matches_current_env_version_family_and_is_indev(self): return self.family == VERSION_FAMILY and self.variant == "indev"
Apache License 2.0
ns1/ns1-python
ns1/__init__.py
NS1.zones
python
def zones(self): import ns1.rest.zones return ns1.rest.zones.Zones(self.config)
Return a new raw REST interface to zone resources :rtype: :py:class:`ns1.rest.zones.Zones`
https://github.com/ns1/ns1-python/blob/0fed6588108dc1bfe683286dd422541e75f74ca0/ns1/__init__.py#L46-L54
from .config import Config version = "0.16.1" class NS1: def __init__(self, apiKey=None, config=None, configFile=None, keyID=None): self.config = config if self.config is None: self._loadConfig(apiKey, configFile) if keyID: self.config.useKeyID(keyID) def _loadConfig(self, apiKey, configFile): self.config = Config() if apiKey: self.config.createFromAPIKey(apiKey) else: configFile = ( Config.DEFAULT_CONFIG_FILE if not configFile else configFile ) self.config.loadFromFile(configFile)
MIT License
square/bionic
bionic/persistence.py
Inventory.find_entry
python
def find_entry(self, provenance): logger.debug("In %s inventory for %r, searching ...", self.tier, provenance) n_prior_attempts = 0 while True: if n_prior_attempts in (10, 100, 1000, 10000, 100000, 1000000): message = f""" While searching in the {self.tier} cache for an entry matching {provenance!r}, found {n_prior_attempts} invalid metadata files; either a lot of artifact files were manually deleted, or there's a bug in the cache code """ if n_prior_attempts == 1000000: raise AssertionError("Giving up: " + oneline(message)) else: logger.warn(oneline(message)) n_prior_attempts += 1 match = self._find_best_match(provenance) if not match: logger.debug( "... in %s inventory for %r, found no match", self.tier, provenance ) return InventoryEntry( tier=self.tier, provenance=None, exactly_matches_provenance=False, artifact=None, ) metadata_record = self._load_metadata_if_valid_else_delete( match.metadata_url ) if metadata_record is None: continue logger.debug( "... in %s inventory for %r, found %s match at %s", self.tier, provenance, match.level, match.metadata_url, ) return InventoryEntry( tier=self.tier, provenance=metadata_record.provenance, exactly_matches_provenance=(match.level == "exact"), artifact=metadata_record.artifact, )
Returns an InventoryEntry describing the closest match to the provided Provenance.
https://github.com/square/bionic/blob/357da8e2806996427e0aa6efd08f7ea8c5198f9b/bionic/persistence.py#L480-L535
import attr import cattr import os import shutil import tempfile from typing import List, Optional, Tuple import yaml import warnings from uuid import uuid4 from pathlib import Path from .datatypes import CodeFingerprint, Artifact from .utils.files import ( ensure_dir_exists, ensure_parent_dir_exists, ) from .utils.misc import hash_simple_obj_to_hex, oneline from .utils.urls import ( bucket_and_object_names_from_gs_url, derelativize_url, path_from_url, relativize_url, url_from_path, ) import logging logger = logging.getLogger(__name__) try: YamlDumper = yaml.CDumper YamlLoader = yaml.CLoader except AttributeError: running_under_readthedocs = os.environ.get("READTHEDOCS") == "True" if not running_under_readthedocs: warnings.warn( oneline( """ Failed to find LibYAML bindings; falling back to slower Python implementation. This may reduce performance on large flows. Installing LibYAML should resolve this.""" ) ) YamlDumper = yaml.Dumper YamlLoader = yaml.Loader class PersistentCache: def __init__(self, local_store, cloud_store): self._local_store = local_store self._cloud_store = cloud_store def get_accessor(self, task_key, provenance): return CacheAccessor(self, task_key, provenance) class CacheAccessor: def __init__(self, parent_cache, task_key, provenance): self.task_key = task_key self.provenance = provenance self._local = parent_cache._local_store self._cloud = parent_cache._cloud_store self._stored_local_entry = None self._stored_cloud_entry = None def flush_stored_entries(self): self._stored_local_entry = None self._stored_cloud_entry = None def can_load(self): try: return self._get_nearest_entry_with_artifact() is not None except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def load_provenance(self): try: entry = self._get_nearest_entry_with_artifact() if entry is None: return None return entry.provenance except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def load_artifact(self): try: entry = self._get_nearest_entry_with_artifact() if entry is None: return None return entry.artifact except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def replicate_and_load_local_artifact(self): try: entry = self._get_nearest_entry_with_artifact() if entry is None: return None if entry.tier == "local": local_artifact = entry.artifact elif entry.tier == "cloud": local_artifact = self._local_artifact_from_cloud(entry.artifact) else: raise AssertionError("Unrecognized tier: " + entry.tier) self._save_or_reregister_artifact(local_artifact) return local_artifact except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def save_local_artifact(self, artifact): try: self._save_or_reregister_artifact(artifact) except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def update_provenance(self): try: self._save_or_reregister_artifact(None) except InternalCacheStateError as e: self.raise_state_error_with_explanation(e) def generate_unique_local_dir_path(self): return self._local.generate_unique_dir_path(self.provenance) def raise_state_error_with_explanation(self, source_exc, preamble_message=None): stores = [self._local] if self._cloud: stores.append(self._cloud) inventory_root_urls = " and ".join(store.inventory.root_url for store in stores) message = f""" {preamble_message} Cached data may be in an invalid state; this should be impossible but could have resulted from either a bug or a change to the cached files. You should be able to repair the problem by removing all cached files under {inventory_root_urls}.""" if preamble_message is None: final_message = oneline(message) else: final_message = oneline(preamble_message) + "\n" + oneline(message) raise InvalidCacheStateError(final_message) from source_exc def _save_or_reregister_artifact(self, artifact): local_entry = self._get_local_entry() cloud_entry = self._get_cloud_entry() self.flush_stored_entries() local_artifact = artifact cloud_artifact = None if local_artifact is None: if local_entry.artifact is not None: local_artifact = local_entry.artifact else: if cloud_entry is None or cloud_entry.artifact is None: raise AssertionError( oneline( """ Attempted to register metadata with no artifact argument and no previously saved values; this suggests we called update_provenance() without previously finding a cached value, which shouldn't happen.""" ) ) cloud_artifact = cloud_entry.artifact local_artifact = self._local_artifact_from_cloud(cloud_artifact) if not local_entry.exactly_matches_provenance: local_entry = self._local.inventory.register_artifact( self.provenance, local_artifact, ) self._stored_local_entry = local_entry if self._cloud: assert cloud_entry is not None if not cloud_entry.exactly_matches_provenance: if cloud_artifact is None: if cloud_entry.artifact is not None: cloud_artifact = cloud_entry.artifact else: cloud_artifact = self._cloud_artifact_from_local(local_artifact) cloud_entry = self._cloud.inventory.register_artifact( self.provenance, cloud_artifact, ) self._stored_cloud_entry = cloud_entry def _get_nearest_entry_with_artifact(self): local_entry = self._get_local_entry() if local_entry.artifact is not None: return local_entry cloud_entry = self._get_cloud_entry() if cloud_entry is not None and cloud_entry.artifact is not None: return cloud_entry return None def _get_local_entry(self): if self._stored_local_entry is None: self._stored_local_entry = self._local.inventory.find_entry(self.provenance) return self._stored_local_entry def _get_cloud_entry(self): if self._stored_cloud_entry is None: if self._cloud is None: return None self._stored_cloud_entry = self._cloud.inventory.find_entry(self.provenance) return self._stored_cloud_entry def _local_artifact_from_cloud(self, cloud_artifact): dir_path = self._local.generate_unique_dir_path(self.provenance) filename = path_from_url(cloud_artifact.url).name file_path = dir_path / filename ensure_parent_dir_exists(file_path) logger.info("Downloading %s from GCS ...", self.task_key) try: self._cloud.download(file_path, cloud_artifact.url) except Exception as e: raise InternalCacheStateError.from_failure( "artifact blob", cloud_artifact.url, e, ) return Artifact( url=url_from_path(file_path), content_hash=cloud_artifact.content_hash, ) def _cloud_artifact_from_local(self, local_artifact): url_prefix = self._cloud.generate_unique_url_prefix(self.provenance) file_path = path_from_url(local_artifact.url) blob_url = url_prefix + "/" + file_path.name logger.info("Uploading %s to GCS ...", self.task_key) try: self._cloud.upload(file_path, blob_url) except Exception as e: raise InternalCacheStateError.from_failure("artifact file", file_path, e) return Artifact( url=blob_url, content_hash=local_artifact.content_hash, ) @attr.s(frozen=True) class NullableWrapper: value = attr.ib() @attr.s(frozen=True) class InventoryEntry: tier = attr.ib() provenance = attr.ib() exactly_matches_provenance = attr.ib() artifact = attr.ib() @attr.s(frozen=True) class MetadataMatch: metadata_url = attr.ib() level = attr.ib() @attr.s(frozen=True) class ExternalCacheItem: inventory = attr.ib() abs_artifact_url = attr.ib() abs_metadata_url = attr.ib() descriptor = attr.ib() class Inventory: def __init__(self, name, tier, filesystem): self.name = name self.tier = tier self._fs = filesystem self.root_url = filesystem.root_url def register_artifact(self, provenance, artifact): logger.debug( "In %s inventory for %r, saving artifact URL %s ...", self.tier, provenance, artifact.url, ) expected_metadata_url = self._exact_metadata_url_for_provenance(provenance) metadata_record = None if self._fs.exists(expected_metadata_url): logger.warn( "In %s cache, attempted to create duplicate entry mapping %r " "to %s", self.tier, provenance, artifact.url, ) metadata_record = self._load_metadata_if_valid_else_delete( expected_metadata_url, ) if metadata_record is None: metadata_url, metadata_record = self._create_and_write_metadata( provenance, artifact ) assert metadata_url == expected_metadata_url logger.debug( "... in %s inventory for %r, created metadata record at %s", self.tier, provenance, metadata_url, ) return InventoryEntry( tier=self.tier, provenance=metadata_record.provenance, exactly_matches_provenance=True, artifact=artifact, )
Apache License 2.0
lawsie/guizero
guizero/base.py
BaseWindow.__init__
python
def __init__(self, master, tk, title, width, height, layout, bg, visible): super(BaseWindow, self).__init__(master, tk, layout, False) self.tk.title( str(title) ) self.tk.geometry(str(width)+"x"+str(height)) self._on_close = None self._full_screen = False self._icon = None self._icon_cascade = True self.bg = bg self.tk.wm_protocol("WM_DELETE_WINDOW", self._close_window) self.visible = visible self.tk.update()
Base class for objects which use windows e.g. `App` and `Window`
https://github.com/lawsie/guizero/blob/7744c41a1e747ade2e5913638586073c27b4db9b/guizero/base.py#L478-L498
from .tkmixins import ( ScheduleMixin, DestroyMixin, EnableMixin, FocusMixin, DisplayMixin, TextMixin, ColorMixin, SizeMixin, LayoutMixin, EventsMixin) from . import utilities as utils from .event import EventManager from . import dialog from tkinter import BOTH, X, Y, YES class Base(): def __init__(self, tk): self._tk = tk self._tk_defaults = {} for key in self.tk.keys(): self._tk_defaults[key] = self.tk[key] @property def tk(self): return self._tk def _has_tk_config(self, key): return key in self.tk.keys() def _get_tk_config(self, key, default=False): if default: return self._tk_defaults[key] else: return self.tk[key] def _set_tk_config(self, keys, value): if isinstance(keys, str): keys = [keys] for key in keys: if key in self.tk.keys(): if value is None: self.tk[key] = self._tk_defaults[key] else: self.tk[key] = value def __repr__(self): return "guizero.{} object".format(self.__class__.__name__) class Component( Base, ScheduleMixin, DestroyMixin, FocusMixin, ColorMixin, EventsMixin): def __init__(self, master, tk, displayable): super(Component, self).__init__(tk) self._master = master self._events = EventManager(self, tk) self._displayable = displayable self._when_resized = None self._actual_height = None self._actual_width = None self.events.set_event("<Component.Configure>", "<Configure>", self._on_configure_change) if self.master is not None: if isinstance(master, Container): self.master._add_child(self) else: utils.raise_error("{}\nMaster is not an [App], [Window] or [Box]".format(self.description)) @property def master(self): return self._master @property def description(self): return "[{}] object".format(self.__class__.__name__) def __str__(self): return self.description @property def events(self): return self._events @property def displayable(self): return self._displayable @property def when_resized(self): return self._when_resized @when_resized.setter def when_resized(self, value): self._when_resized = value def destroy(self): if self.master is not None: self.master._remove_child(self) self.tk.destroy() def _on_configure_change(self, event): if event.tk_event.widget == self.tk: if self._actual_height != event.tk_event.height or self._actual_width != event.tk_event.width: self._actual_height = event.tk_event.height self._actual_width = event.tk_event.height if self._when_resized is not None: args_expected = utils.no_args_expected(self._when_resized) if args_expected == 0: self._when_resized() elif args_expected == 1: self._when_resized(event) else: utils.error_format("An event callback function must accept either 0 or 1 arguments.\nThe current callback has {} arguments.".format(args_expected)) class Container(Component): def __init__(self, master, tk, layout, displayable): super(Container, self).__init__(master, tk, displayable) self._children = [] self._layout_manager = layout self._bg = None self._text_color = None self._text_size = None self._font = None self._enabled = True if self.master is not None: self.bg = master.bg self.text_color = master.text_color self.text_size = master.text_size self.font = master.font @property def layout(self): return self._layout_manager @property def bg(self): return self._bg @bg.setter def bg(self, value): self._bg = utils.convert_color(value) super(Container, self.__class__).bg.fset(self, self._bg) for child in self.children: if isinstance(child, (Container, Widget)): child.bg = self._bg @property def text_color(self): return self._text_color @text_color.setter def text_color(self, value): self._text_color = utils.convert_color(value) for child in self.children: if isinstance(child, (Container, TextWidget)): child.text_color = self.text_color @property def text_size(self): return self._text_size @text_size.setter def text_size(self, value): self._text_size = value for child in self.children: if isinstance(child, (Container, TextWidget)): child.text_size = self.text_size @property def font(self): return self._font @font.setter def font(self, value): self._font = value for child in self.children: if isinstance(child, (Container, TextWidget)): child.font = self.font @property def children(self): return self._children def add_tk_widget(self, tk_widget, grid=None, align=None, visible=True, enabled=None, width=None, height=None): if self.tk is not tk_widget.master: utils.error_format("The tk widget's master is not '{}'.\nIt may not display correctly.".format(self.description)) return Widget(self, tk_widget, grid, align, visible, enabled, width, height) def _add_child(self, child): self.children.append(child) def _remove_child(self, child): self.children.remove(child) def display_widgets(self): for child in self.children: if child.displayable: if self.layout != "grid": child.tk.pack_forget() else: child.tk.grid_forget() if child.visible: if self.layout != "grid": self._pack_widget(child) else: self._grid_widget(child) def _pack_widget(self, widget): pack_params={} if widget.width == "fill" and widget.height == "fill": pack_params["fill"] = BOTH pack_params["expand"] = YES elif widget.width == "fill": pack_params["fill"] = X elif widget.height == "fill": pack_params["fill"] = Y if widget.align is not None: pack_params["side"] = widget.align if pack_params.get("side") is None and pack_params.get("fill") == Y: pack_params["expand"] = YES if pack_params.get("side") in ["top", "bottom"] and pack_params.get("fill") == Y: pack_params["expand"] = YES if pack_params.get("side") in ["left", "right"] and pack_params.get("fill") == X: pack_params["expand"] = YES widget.tk.pack(**pack_params) def _grid_widget(self, widget): grid_params = { "column": widget.grid[0], "row": widget.grid[1] } if len(widget.grid) == 4: grid_params["columnspan"] = widget.grid[2] grid_params["rowspan"] = widget.grid[3] if widget.align is not None: directions = {"top": "N", "bottom": "S", "left": "W", "right": "E"} grid_params["sticky"] = directions[widget.align] widget.tk.grid(**grid_params) @property def enabled(self): return self._enabled @enabled.setter def enabled(self, value): if value: self.enable() else: self.disable() def disable(self): self._enabled = False for child in self.children: if isinstance(child, (Container, Widget)): child.disable() def enable(self): self._enabled = True for child in self.children: if isinstance(child, (Container, Widget)): child.enable() class BaseWindow(Container):
BSD 3-Clause New or Revised License
altosaar/deep-exponential-families-gluon
common/util.py
score_grad_variance_callback
python
def score_grad_variance_callback(my_model): params = my_model.collect_params() param_grads = collections.defaultdict(lambda: []) for name, param in params.items(): if param.grad_req != 'null': grads = np.stack(param_grads[name]) param.grad_variance = np.mean(np.var(grads, axis=0)) param.grad_norm = np.mean(np.linalg.norm(grads, axis=-1)) for block in my_model.sequential._children: print(block.name, ':') print([(name, p.data().asnumpy().tolist()) for name, p in filter( lambda x: 'weight' in x[0] or 'bias' in x[0], block.collect_params().items())]) for child_block in block._children: print(child_block.name, ':') print('mean:', child_block.get_param_not_repeated('mean').asnumpy()) if hasattr(child_block, 'variance'): print('variance: ', child_block.get_param_not_repeated( 'variance').asnumpy())
Get score function gradient variance.
https://github.com/altosaar/deep-exponential-families-gluon/blob/80d69b54081f622c0012bb181aa6d8ab9a740f15/common/util.py#L101-L121
import os import logging import mxnet as mx import numpy as np import collections from mxnet import nd def log_to_file(filename): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-4s %(levelname)-4s %(message)s', datefmt='%m-%d %H:%M', filename=filename, filemode='a') console = logging.StreamHandler() console.setLevel(logging.INFO) logging.getLogger('').addHandler(console) def flatten(l): return [item for sublist in l for item in sublist] def softplus(x): return nd.Activation(x, act_type='softrelu') def np_softplus(x): return np.log(1. + np.exp(x)) class Softplus(object): def __init__(self): pass def __call__(self, x: nd.NDArray) -> nd.NDArray: return nd.Activation(x, act_type='softrelu') def backward(self, x): return nd.sigmoid(x) def sigmoid(x): return nd.Activation(x, act_type='sigmoid') def np_inverse_softplus(x): return np.log(np.exp(x) - 1.) def latest_checkpoint(directory): files = [f for f in os.listdir(directory) if 'params' in f] if len(files) > 0 and any('params' in f for f in files): l = sorted((int(f.split('-')[-1]), i) for i, f in enumerate(files)) return os.path.join(directory, files[l[-1][-1]]), l[-1][0] else: return None, None def repeat_emb(param, emb): res = nd.expand_dims(emb, 0) param.repeated = nd.repeat(res, repeats=param.n_repeats, axis=0) param.repeated.attach_grad() return param.repeated def pathwise_grad_variance_callback(my_model, data_batch): param_grads = collections.defaultdict(lambda: []) params = my_model.collect_params() n_samples_stats = 10 for i in range(n_samples_stats): with mx.autograd.record(): log_q_sum, elbo, sample = my_model(data_batch) my_model.compute_gradients(elbo, data_batch, log_q_sum) for name, param in params.items(): if param.grad_req != 'null': param_grads[name].append(param.grad().asnumpy()) def callback_elbo_sample(my_model, data_batch): n_samples_stats = 10 _, elbo, sample = my_model(data_batch) for _ in range(n_samples_stats): tmp_sample = nd.zeros_like(sample) tmp_elbo = nd.zeros_like(elbo) for _ in range(n_samples_stats): _, elbo, sample = my_model(data_batch) tmp_sample += sample tmp_elbo += elbo tmp_sample /= n_samples_stats tmp_elbo /= n_samples_stats tmp_sample = np.mean(tmp_sample.asnumpy(), 0) tmp_elbo = np.mean(tmp_elbo.asnumpy()) return tmp_elbo, tmp_sample
MIT License
chandler37/immaculater
pyatdllib/ui/uicmd.py
_PerformLs
python
def _PerformLs(current_obj, location, state, recursive, show_uid, show_all, show_timestamps, view_filter_override=None): if show_all and isinstance(current_obj, container.Container): state.Print(_ListingForOneItem( show_uid, show_timestamps, current_obj, state.ToDoList(), '.')) state.Print(_ListingForOneItem( show_uid, show_timestamps, _FindParentOf(state, current_obj), state.ToDoList(), '..')) if hasattr(current_obj, 'items'): items = list(current_obj.items) if current_obj is state.ToDoList().root: items.insert(0, state.ToDoList().inbox) else: items = [current_obj] if state.CurrentSorting() == 'alpha' and not isinstance(current_obj, prj.Prj): items.sort(key=lambda x: '' if x.uid == 1 else x.name) items.sort(key=lambda x: 0 if isinstance(x, folder.Folder) or x.uid == 1 else 1) to_recurse = [] for item in items: the_view_filter = view_filter_override if view_filter_override is not None else state.ViewFilter() if show_all or the_view_filter.Show(item): q = _ListingForOneItem(show_uid, show_timestamps, item, state.ToDoList()) state.Print(q) if recursive and isinstance(item, container.Container): to_recurse.append((item, location)) for obj, loc in to_recurse: state.Print('') if loc: state.Print('%s%s%s:' % (loc if loc != FLAGS.pyatdl_separator else '', FLAGS.pyatdl_separator, obj.name)) else: state.Print('%s:' % obj.name) _PerformLs(obj, '%s%s%s' % (loc if loc != FLAGS.pyatdl_separator else '', FLAGS.pyatdl_separator, obj.name), state, recursive, show_uid, show_all, show_timestamps, view_filter_override)
Performs 'ls'. Args: current_obj: AuditableObject location: basestring state: State recursive: bool show_uid: bool show_all: bool show_timestamps: bool view_filter_override: None|ViewFilter
https://github.com/chandler37/immaculater/blob/13bfe8c949a16945d2195920375ad6d522664208/pyatdllib/ui/uicmd.py#L734-L792
from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import base64 import datetime import json import pipes import pytz import random import re import six from six.moves import xrange import time from absl import flags from google.protobuf import text_format from third_party.google.apputils.google.apputils import app from third_party.google.apputils.google.apputils import appcommands from typing import Any, Dict, List, Optional, Union from ..core import action from ..core import auditable_object from ..core import common from ..core import container from ..core import ctx from ..core import folder from ..core import prj from ..core import tdl from ..core import uid from ..core import view_filter from . import appcommandsutil from . import lexer from . import serialization from . import state as state_module FLAGS = flags.FLAGS flags.DEFINE_string('no_context_display_string', '<none>', 'Text to indicate the lack of a context') flags.DEFINE_string('timezone', 'UTC', 'Time zone in pytz format, e.g. "US/Eastern", "UTC", "US/Pacific"') flags.DEFINE_string('time_format', '%Y/%m/%d-%H:%M:%S', 'Format string for timestamps, used in, e.g., "ls"') flags.DEFINE_bool('pyatdl_allow_exceptions_in_batch_mode', False, 'In batch mode, allow exceptions? If so, they will be ' 'printed but execution will continue. If not, execution will ' 'be aborted.') flags.DEFINE_bool('seed_upon_creation', False, 'Run the command "seed" after creation of a to-do list?') class Error(Exception): class BadArgsError(Error, appcommandsutil.IncorrectUsageError): class NoSuchContainerError(Error): def _ProjectString(project: prj.Prj, path: List[folder.Folder]) -> str: ps = FLAGS.pyatdl_separator.join( state_module.State.SlashEscaped(x.name) for x in reversed(path)) return FLAGS.pyatdl_separator.join([ps, project.name]) def _CompleteStr(is_complete: bool) -> str: return '---COMPLETE---' if is_complete else '--incomplete--' def _ActiveStr(is_active: bool) -> str: return '---active---' if is_active else '--INACTIVE--' def _ListingForContext(show_uid: bool, show_timestamps: bool, context: Optional[ctx.Ctx]) -> str: if context is None: uid_str = 'uid=0 ' if show_uid else '' else: uid_str = ('uid=%s ' % context.uid) if show_uid else '' return '--context-- %s%s%s%s %s' % ( uid_str, _DeletedStr(context.is_deleted if context is not None else False), _ConcatenatedTimestampStr(context, show_timestamps), _ActiveStr(True if context is None else context.is_active), pipes.quote(context.name if context is not None else FLAGS.no_context_display_string)) def _ListingForOneItem(show_uid: bool, show_timestamps: bool, item: Union[folder.Folder, prj.Prj, action.Action], to_do_list: tdl.ToDoList, name_override: str = None, in_context_override: str = None) -> str: by_type = {folder.Folder: '--folder--- ', prj.Prj: '--project-- ', action.Action: '--action--- ', ctx.Ctx: '--context-- '} deleted_str = _DeletedStr(item.is_deleted) type_str = by_type[type(item)] lead = '%s%s%s' % ( type_str, deleted_str, _ConcatenatedTimestampStr(item, show_timestamps)) completed_str = '' if isinstance(item, (action.Action, prj.Prj)): completed_str = '%s ' % _CompleteStr(item.is_complete) active_str = '' if isinstance(item, (prj.Prj, ctx.Ctx)): active_str = '%s ' % _ActiveStr(item.is_active) in_context = '' if isinstance(item, action.Action): if item.ctx_uid is None: in_context = ' --in-context-- %s' % pipes.quote( FLAGS.no_context_display_string) else: context = to_do_list.ContextByUID(item.ctx_uid) if context is None: raise AssertionError( "The protobuf has a bad Context association with an Action. item.ctx_uid=%s item.uid=%s" % (item.ctx_uid, item.uid)) in_context = ' --in-context-- %s' % pipes.quote(context.name) effective_name = item.name or '' if name_override is not None: effective_name = name_override return '%s%s%s%s%s%s' % ( lead, 'uid=%s ' % item.uid if show_uid else '', completed_str, active_str, pipes.quote(effective_name), in_context_override if in_context_override is not None else in_context) def _JsonForOneItem(item: Optional[Union[folder.Folder, prj.Prj, action.Action, ctx.Ctx]], to_do_list: tdl.ToDoList, number_of_items: int, *, name_override: str = None, in_context_override: str = None, path_leaf_first: List[container.Container] = None, in_prj: str = None) -> Dict[str, Any]: name = FLAGS.no_context_display_string if item is None else item.name rv = { 'is_deleted': False if item is None else item.is_deleted, 'ctime': 0 if item is None else item.ctime, 'dtime': None if item is None or not item.is_deleted else item.dtime, 'mtime': 0 if item is None else item.mtime, 'is_complete': item.is_complete if isinstance(item, (prj.Prj, action.Action)) else False, 'uid': str(0 if item is None else item.uid), 'name': name_override if name_override is not None else name, 'number_of_items': number_of_items } if in_prj is not None: rv['in_prj'] = in_prj if isinstance(item, prj.Prj): rv['needsreview'] = bool(item.NeedsReview()) rv['default_context_uid'] = str(0 if item.default_context_uid is None else item.default_context_uid) if isinstance(item, action.Action): if item.ctx_uid is None: in_context = FLAGS.no_context_display_string else: context = to_do_list.ContextByUID(item.ctx_uid) if context is not None: in_context = context.name else: raise AssertionError( "The protobuf has a bad Context association with an Action. item.ctx_uid=%s item.uid=%s" % (item.ctx_uid, item.uid)) rv['in_context'] = in_context_override if in_context_override is not None else in_context rv['in_context_uid'] = str(item.ctx_uid) if item.ctx_uid is not None else None if item is None: rv['is_active'] = True if isinstance(item, (prj.Prj, ctx.Ctx)): rv['is_active'] = item.is_active if item is not None: rv['has_note'] = bool(item.note) if path_leaf_first is not None: rv['path'] = FLAGS.pyatdl_separator.join( state_module.State.SlashEscaped(x.name) for x in reversed(path_leaf_first)) if not rv['path']: rv['path'] = FLAGS.pyatdl_separator return rv def _TimestampStr(epoch_sec_or_none: Optional[float]) -> str: if epoch_sec_or_none is None: return '' tz = pytz.timezone(FLAGS.timezone) return datetime.datetime.fromtimestamp(epoch_sec_or_none, tz).strftime( FLAGS.time_format) def _ConcatenatedTimestampStr(c, show_timestamps): ctime_str = 'ctime=%s ' % _TimestampStr(0 if c is None else c.ctime) if show_timestamps else '' mtime_str = 'mtime=%s ' % _TimestampStr(0 if c is None else c.mtime) if show_timestamps else '' if show_timestamps and c is not None and c.dtime: dtime_str = 'dtime=%s ' % _TimestampStr(c.dtime) else: dtime_str = '' return '%s%s%s' % (mtime_str, ctime_str, dtime_str) def _DeletedStr(is_deleted): return '--DELETED-- ' if is_deleted else '' def NewToDoList(): t = tdl.ToDoList() if FLAGS.seed_upon_creation: APP_NAMESPACE.FindCmdAndExecute( state_module.State(lambda _: None, t, APP_NAMESPACE), ['seed']) return t def _RemovePrefix(prefix, text): match = re.match(prefix, text) if match is None: return text return text[len(match.group()):] def _Inboxize(state, note): def essence(line): return _RemovePrefix(r'@xfer\b', line.strip().lstrip(':-\u2013\u2014').lstrip()).lstrip() first_action = None note = note.replace(r'\n', '\n') beginning = 'You chose to process' for line in note.splitlines(): action = essence(line).strip() if not action: continue if first_action is None: first_action = action if action.startswith(beginning): raise BadArgsError('You already turned this note into Actions in the Inbox.') APP_NAMESPACE.FindCmdAndExecute( state, ['do', action]) if first_action is None: return '' return "\\n".join( [f'{beginning} the note that was here into', 'a sequence of Actions in the Inbox.', '', 'The first such action was the following:', f'\t- {first_action}', ]) def _LookupProject(state, argument): try: the_uid = lexer.ParseSyntaxForUID(argument) except lexer.Error as e: raise BadArgsError(e) if the_uid is not None: x = state.ToDoList().ProjectByUID(the_uid) if x is None: raise NoSuchContainerError('No Project exists with UID %s' % the_uid) the_project, the_path = x if not the_path: return the_project, None return the_project, the_path[0] try: dirname = state.DirName(argument) basename = state.BaseName(argument) if not basename: raise BadArgsError('Unexpected trailing "%s"' % FLAGS.pyatdl_separator) except state_module.InvalidPathError as e: raise BadArgsError(e) if not dirname and basename == '.': return (state.CurrentWorkingContainer(), _FindParentOf(state, state.CurrentWorkingContainer())) containr = state.GetContainerFromPath(dirname) if containr is state.ToDoList().root and basename == FLAGS.inbox_project_name: return state.ToDoList().inbox, None project_names = [] for item in containr.items: if isinstance(item, prj.Prj): if not item.is_deleted: project_names.append(item.name) if the_uid == item.uid or item.name == basename: return item, containr for item in containr.items: if isinstance(item, prj.Prj): if item.is_deleted and item.name == basename: return item, containr if project_names: raise BadArgsError( 'No such Project "%s". Choices: %s' % (basename, ' '.join(sorted(project_names)))) else: raise BadArgsError( 'No such Project "%s". There are no Projects in the current Folder.' % basename) def _LookupFolder(state, argument): try: the_uid = lexer.ParseSyntaxForUID(argument) except lexer.Error as e: raise BadArgsError(e) if the_uid is not None: x = state.ToDoList().FolderByUID(the_uid) if x is None: raise NoSuchContainerError('No Folder exists with UID %s' % the_uid) the_folder, unused_path = x return the_folder try: dirname = state.DirName(argument) basename = state.BaseName(argument) if not basename: raise BadArgsError('Unexpected trailing "%s"' % FLAGS.pyatdl_separator) except state_module.InvalidPathError as e: raise BadArgsError(e) containr = state.GetContainerFromPath(dirname) folder_names = [] for item in containr.items: if isinstance(item, folder.Folder): folder_names.append(item.name) if item.name == basename: return item if folder_names: raise NoSuchContainerError( 'No such Folder "%s". Choices: %s' % (basename, ' '.join(sorted(folder_names)))) else: raise NoSuchContainerError( 'No such Folder "%s". There are no Folders within the specified Folder.' % basename) def _LookupAction(state, argument): try: the_uid = lexer.ParseSyntaxForUID(argument) except lexer.Error as e: raise BadArgsError(e) if the_uid is not None: x = state.ToDoList().ActionByUID(the_uid) if x is None: raise NoSuchContainerError('No Action with UID %s exists.' % the_uid) return x try: dirname = state.DirName(argument) basename = state.BaseName(argument) if not basename: raise BadArgsError('Unexpected trailing "%s"' % FLAGS.pyatdl_separator) except state_module.Error as e: raise BadArgsError(e) try: containr = state.GetContainerFromPath(dirname) except state_module.Error as e: raise BadArgsError(e) if not isinstance(containr, prj.Prj): raise BadArgsError( 'This command only makes sense inside a Project, not inside "%s". See "help pwd".' % (containr.name if containr.name else FLAGS.pyatdl_separator,)) action_names = [] for item in containr.items: assert isinstance(item, action.Action), str(item) action_names.append(item.name) if the_uid == item.uid or item.name == basename: return item, containr if action_names: raise BadArgsError( 'No such Action "%s". Choices: %s' % (basename, ' '.join(sorted(action_names)))) else: raise BadArgsError( 'No such Action "%s". There are no Actions in the current Project.' % basename) def _LookupContext(state, argument): if argument == 'uid=0' or argument == FLAGS.no_context_display_string: return None try: the_uid = lexer.ParseSyntaxForUID(argument) except lexer.Error as e: raise BadArgsError(e) if the_uid is not None: return state.ToDoList().ContextByUID(the_uid) return state.ToDoList().ContextByName(argument) def _ExecuteUICmd(the_state: state_module.State, argv: List[str]) -> None: try: try: APP_NAMESPACE.FindCmdAndExecute(the_state, argv) except AssertionError as e: raise AssertionError('argv=%s err=%s' % (argv, str(e))) except (appcommandsutil.CmdNotFoundError, appcommandsutil.InvalidUsageError, appcommandsutil.IncorrectUsageError) as e: raise AssertionError('argv=%s error=%s' % (argv, str(e))) class UICmd(appcommands.Cmd): @staticmethod def RaiseUnlessNArgumentsGiven(n: int, args: List[str]) -> None: assert n >= 1 if len(args) != n + 1: if len(args) < 2: if n == 1: raise BadArgsError('Needs a single positional argument; found none') else: raise BadArgsError('Needs %d positional arguments; found none' % n) else: if n == 1: raise BadArgsError('Needs a single positional argument; found these: %s' % repr(args[1:])) else: raise BadArgsError('Needs %d positional arguments; found these: %s' % (n, repr(args[1:]))) @staticmethod def RaiseIfAnyArgumentsGiven(args): if len(args) != 1: raise BadArgsError( 'Takes no arguments; found these arguments: %s' % repr(args[1:])) class UICmdEcho(UICmd): def __init__(self, name, flag_values, **kargs): super().__init__(name, flag_values, **kargs) flags.DEFINE_bool('stdout', False, 'For debugging, output directly to stdout.', flag_values=flag_values) def Run(self, args): p = ' '.join(x for x in args[1:]) state = FLAGS.pyatdl_internal_state if FLAGS.stdout: print(p) else: state.Print(p) class UICmdEcholines(UICmd): def Run(self, args): state = FLAGS.pyatdl_internal_state for x in args[1:]: state.Print(x) class UICmdChclock(UICmd): def Run(self, args): self.RaiseUnlessNArgumentsGiven(1, args) arg = args[-1] relative_not_absolute = False if arg.startswith('+'): relative_not_absolute = True arg = arg[1:] if arg.startswith('+'): raise BadArgsError('A leading \'++\' makes no sense.') assert arg, arg try: a_float = float(arg) except ValueError: raise BadArgsError( 'Needs a numeric argument, seconds since the epoch (1970 CE). To move ' 'the clock relative to the old clock, prepend the argument with \'+\'. The argument: %s' % (repr(arg),)) if a_float < 0 and not relative_not_absolute: raise BadArgsError('Minimum value is 0, a.k.a. 1970 CE.') if relative_not_absolute: old_time = time.time def NewTime(): return old_time() + a_float time.time = NewTime else: def AbsoluteNewTime(): return a_float time.time = AbsoluteNewTime class UICmdLs(UICmd): def __init__(self, name, flag_values, **kargs): super().__init__(name, flag_values, **kargs) flags.DEFINE_bool('show_all', False, 'Additionally lists everything, even hidden objects, ' 'overriding the view filter', short_name='a', flag_values=flag_values) flags.DEFINE_bool('recursive', False, 'Additionally lists subdirectories/subprojects recursively', short_name='R', flag_values=flag_values) flags.DEFINE_bool('show_timestamps', False, 'Additionally lists timestamps ctime, dtime, mtime', short_name='l', flag_values=flag_values) flags.DEFINE_enum('view_filter', None, sorted(view_filter.CLS_BY_UI_NAME), 'Instead of using the global view filter (see "help ' 'view"), override it and use this view filter. Note: ' 'this is ignored in --show_all mode', short_name='v', flag_values=flag_values) def Run(self, args): state = FLAGS.pyatdl_internal_state override = None if FLAGS.view_filter: override = state.NewViewFilter( filter_cls=view_filter.CLS_BY_UI_NAME[FLAGS.view_filter]) def DoIt(obj, location): _PerformLs(obj, location, state, recursive=FLAGS.recursive, show_uid=FLAGS.pyatdl_show_uid, show_all=FLAGS.show_all, show_timestamps=FLAGS.show_timestamps, view_filter_override=override) if len(args) == 1: DoIt(state.CurrentWorkingContainer(), '.') else: for i, name in enumerate(args[1:]): try: dirname = state.DirName(name) basename = state.BaseName(name) if not basename and name != FLAGS.pyatdl_separator: raise BadArgsError( 'Unexpected trailing "%s"; dirname=%s and basename=%s' % (FLAGS.pyatdl_separator, dirname, basename)) obj = state.GetObjectFromPath(name) except state_module.InvalidPathError as e: raise BadArgsError(e) if isinstance(obj, container.Container) and len(args) > 2: state.Print('%s:' % name) DoIt(obj, dirname) if i < len(args) - 2: state.Print('') def _FindParentOf(state, obj): item = None if isinstance(obj, ctx.Ctx): return state.ToDoList().root for (c, path) in state.ToDoList().ContainersPreorder(): if c.uid == obj.uid: if path: item = path[0] else: item = state.ToDoList().root break for subitem in c.items: if subitem.uid == obj.uid: item = c break if item is not None: break else: raise AssertionError( 'Cannot happen. %s %s %s' % (state.CurrentWorkingContainer().name, str(state.ToDoList().root), obj.uid)) return item
Apache License 2.0
pinterest/kingpin
kingpin/manageddata/decider.py
Decider.is_hashed_id_in_experiment
python
def is_hashed_id_in_experiment(self, unique_id, experiment_name): decider_value = self.get_decider_value(experiment_name) hash_val = hashlib.md5("decider_%s%s" % (experiment_name, unique_id)).hexdigest() val = int(hash_val, 16) % 100 return val < decider_value
Checks if a decider should be active given an ID (e.g. of a user, random request, etc.). This function computes a hash of the user ID and the decider, so different deciders will get different random samples of users. Ex: decider.is_hashed_id_in_decider(context.viewing_user.id, config.decider.NEW_BOARD_PIN_TUPLES_READ)
https://github.com/pinterest/kingpin/blob/baea08ae941a4e57edb9129658fe3e7d40e4d0c3/kingpin/manageddata/decider.py#L99-L114
import logging import hashlib import random from managed_datastructures import ManagedHashMap from ..kazoo_utils.decorators import SingletonMetaclass log = logging.getLogger(__name__) class Decider(object): __metaclass__ = SingletonMetaclass def __init__(self): self.initialized = False def initialize(self, zk_hosts, aws_keyfile, s3_bucket, s3_endpoint="s3.amazonaws.com"): self.decider_map = ManagedHashMap("admin", "decider", "Decider Values", "Map of decider name and its value", zk_hosts, aws_keyfile, s3_bucket, s3_endpoint=s3_endpoint) def check_initialized(self): if not self.initialized: raise Exception("Decider not initialized! Please call initialize()") def get_decider_value(self, experiment_name, default=0): experiment_name = experiment_name.lower() decider_value = self.decider_map.get(experiment_name) if decider_value is None: decider_value = default return decider_value def decide_experiment(self, experiment_name, default=False): default_value = 100 if default else 0 exp_value = self.get_decider_value(experiment_name, default=default_value) if exp_value == 100: return True if exp_value == 0: return False return random.randrange(0, 100, 1) < exp_value def is_id_in_experiment(self, decider_id, experiment_name): decider_value = self.get_decider_value(experiment_name) decider_id %= 100 return decider_id < decider_value
Apache License 2.0
davemlz/eemont
eemont/extra.py
require
python
def require(module): return ee_require(module)
Loads and executes a JavaScript GEE module. All modules must be first installed before requiring them. After requiring the module, it can be used in the same way as it is used in the Code Editor. Warning ------- This method is highly :code:`experimental`. Please report any irregularities in the Issues Page of `eeExtra <https://github.com/r-earthengine/ee_extra>`_. Parameters ---------- module : str Path to the module in the Code Editor (e.g. "users/dmlmont/spectral:spectral"). Returns ------- BoxDict Loaded module. Methods and attributes can be accessed using dot notation. See Also -------- install : Installs a JavaScript GEE module. uninstall : Uninstalls a JavaScript GEE module. Examples -------- >>> import ee, eemont >>> ee.Authenticate() >>> ee.Initialize() >>> LandsatLST = ee.require("users/sofiaermida/landsat_smw_lst:modules/Landsat_LST.js")
https://github.com/davemlz/eemont/blob/f8eb4099b5c1d07d217d6c1be054dc33c9283a00/eemont/extra.py#L10-L43
import ee from ee_extra.JavaScript.install import install as ee_install from ee_extra.JavaScript.install import uninstall as ee_uninstall from ee_extra.JavaScript.main import ee_require from .extending import extend @extend(ee)
MIT License
a3data/hermione
hermione/module_templates/__IMPLEMENTED_SAGEMAKER__/src/ml/analysis/feature_selection.py
FeatureSelector.inverse_transform
python
def inverse_transform(self, df: pd.DataFrame): pass
Apply the invese_transform of vectorizer to each column Options: index, bag_of_words and tf_idf Parameters ---------- df : pd.DataFrame dataframe with columns to be unvectorizer Returns ------- pd.DataFrame
https://github.com/a3data/hermione/blob/4a833e96664fc91c65bdd28b2637c291f4f5a4d6/hermione/module_templates/__IMPLEMENTED_SAGEMAKER__/src/ml/analysis/feature_selection.py#L317-L333
from sklearn.feature_selection import VarianceThreshold from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import SelectPercentile from sklearn.feature_selection import RFE from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import SequentialFeatureSelector from mlxtend.feature_selection import ExhaustiveFeatureSelector from abc import ABC, abstractmethod import numpy as np import pandas as pd class SelectAlgorithm(ABC): def transform(self, df: pd.DataFrame): return df[df.columns[self.selected_columns]] def get_support(self): return self.selected_columns @abstractmethod def fit(self) -> None: pass class SelectCoefficients(SelectAlgorithm): def __init__(self, model, num_feat = None): self.model = model self.num_feat = num_feat def fit(self, X: pd.DataFrame, y = None): self.num_feat = int(X.shape[1]/2) if self.num_feat == None else self.num_feat trained_model = self.model.fit(X,y) self.selected_columns = np.argsort(np.abs(trained_model.coef_.ravel()))[-self.num_feat:] class SelectCorrelation(SelectAlgorithm): def __init__(self, threshold = 1.0): self.threshold = threshold def fit(self, X: pd.DataFrame, y = None): corr = X.corr() self.selected_columns = np.full((corr.shape[0],), True, dtype=bool) [self.check_correlation(corr.iloc[i,j],j) for i in range(corr.shape[0]) for j in range(i+1, corr.shape[0])] def check_correlation(self,corr,j): if np.abs(corr) >= self.threshold and self.selected_columns[j]: self.selected_columns[j] = False class MyExhaustiveFeatureSelector(ExhaustiveFeatureSelector): def get_support(self): return list(self.best_idx_) class SelectEnsemble(SelectAlgorithm): def __init__(self, dic_selection: dict, num_feat = None): self.dic_selection = dic_selection self.num_feat = num_feat def fit(self, X: pd.DataFrame, y = None): self.num_feat = int(X.shape[1]/2) if self.num_feat == None else self.num_feat self.column_dic = {} for i,column in enumerate(X.columns): self.column_dic[column] = i self.column_count = [0 for column in X.columns] selections = [FeatureSelector(selector,**self.dic_selection[selector]) for selector in self.dic_selection] [selection.fit(X,y) for selection in selections] [self.increment_count(column) for selection in selections for column in selection.selected_columns] self.selected_columns = np.argsort(self.column_count)[-self.num_feat:] def increment_count(self,column): self.column_count[self.column_dic[column]]+=1 class FeatureSelector: def __init__(self, selector, **kwargs): self.selector = selector self.selectors = {'variance': VarianceThreshold, 'univariate_kbest': SelectKBest, 'univariate_percentile': SelectPercentile, 'recursive': RFE, 'model':SelectFromModel, 'sequential':SequentialFeatureSelector, 'exaustive':MyExhaustiveFeatureSelector, 'correlation':SelectCorrelation, 'coefficients':SelectCoefficients, 'ensemble':SelectEnsemble} self.kwargs = kwargs self.fitted = False def fit(self, X: pd.DataFrame, y = None): self.columns = X.columns self.selection = self.selectors[self.selector](**self.kwargs) self.selection.fit(X,y) self.selected_columns = self.columns[self.selection.get_support()] self.fitted = True def transform(self, df: pd.DataFrame): if not self.fitted: raise Exception("Not yet trained.") return df[self.selected_columns]
Apache License 2.0
brendanhasz/probflow
src/probflow/utils/validation.py
ensure_tensor_like
python
def ensure_tensor_like(obj, name): if isinstance(obj, (int, float, np.ndarray, list)): return if get_backend() == "pytorch": import torch tensor_types = (torch.Tensor, BaseParameter) else: import tensorflow as tf tensor_types = (tf.Tensor, tf.Variable, BaseParameter) if not isinstance(obj, tensor_types): raise TypeError(name + " must be Tensor-like")
Determine whether an object can be cast to a Tensor
https://github.com/brendanhasz/probflow/blob/27fade8d85d37ffc0193862d0329c9255f3c74e7/src/probflow/utils/validation.py#L18-L35
import numpy as np from probflow.utils.base import BaseParameter from probflow.utils.settings import get_backend
MIT License
twidi/mixt
src/mixt/vendor/pytypes/util.py
get_class_that_defined_method
python
def get_class_that_defined_method(meth): if is_classmethod(meth): return meth.__self__ if hasattr(meth, 'im_class'): return meth.im_class elif hasattr(meth, '__qualname__'): try: cls_names = meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0].split('.') cls = inspect.getmodule(meth) for cls_name in cls_names: cls = getattr(cls, cls_name) if isinstance(cls, type): return cls except AttributeError: pass raise ValueError(str(meth)+' is not a method.')
Determines the class owning the given method.
https://github.com/twidi/mixt/blob/adeff652784f0d814835fd16a8cacab09f426922/src/mixt/vendor/pytypes/util.py#L416-L438
from mixt.vendor import pytypes import subprocess import hashlib import sys import os import inspect import traceback from warnings import warn_explicit _code_callable_dict = {} _sys_excepthook = sys.__excepthook__ def _check_python3_5_version(): try: ver = subprocess.check_output([pytypes.python3_5_executable, '--version']) ver = ver[:-1].split(' ')[-1].split('.') return (int(ver[0]) >= 3 and int(ver[1]) >= 5) except Exception: return False def _md5(fname): m = hashlib.md5() with open(fname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b''): m.update(chunk) return m.hexdigest() def _python_version_string(): try: impl = sys.subversion[0] except AttributeError: impl = sys.implementation.name if impl == 'cpython': impl = 'CPython' lst = [impl, '.'.join([str(x) for x in sys.version_info[:3]]), ' '.join([str(x) for x in sys.version_info[3:]])] return '%s %s %s' % tuple(lst) def _full_module_file_name_nosuffix(module_name): module = sys.modules[module_name] bn = os.path.basename(module.__file__).partition('.')[0] if not (module.__package__ is None or module.__package__ == ''): return module.__package__.replace('.', os.sep)+os.sep+bn else: return bn def _find_files(file_name, search_paths): res = [] if os.path.isfile(file_name): res.append(file_name) if search_paths is None: return res for path in search_paths: if not path.endswith(os.sep): file_path = path+os.sep+file_name else: file_path = path+file_name if os.path.isfile(file_path): res.append(file_path) return res def getargspecs(func): if func is None: raise TypeError('None is not a Python function') if hasattr(func, 'ch_func'): return getargspecs(func.ch_func) elif hasattr(func, 'ov_func'): return getargspecs(func.ov_func) if hasattr(inspect, 'getfullargspec'): return inspect.getfullargspec(func) else: return inspect.getargspec(func) def get_required_kwonly_args(argspecs): try: kwonly = argspecs.kwonlyargs if argspecs.kwonlydefaults is None: return kwonly res = [] for name in kwonly: if not name in argspecs.kwonlydefaults: res.append(name) return res except AttributeError: return [] def getargnames(argspecs, with_unbox=False): args = argspecs.args vargs = argspecs.varargs try: kw = argspecs.keywords except AttributeError: kw = argspecs.varkw try: kwonly = argspecs.kwonlyargs except AttributeError: kwonly = None res = [] if not args is None: res.extend(args) if not vargs is None: res.append('*'+vargs if with_unbox else vargs) if not kwonly is None: res.extend(kwonly) if not kw is None: res.append('**'+kw if with_unbox else kw) return res def getargskw(args, kw, argspecs): return _getargskw(args, kw, argspecs)[0] def _getargskw(args, kw, argspecs): res = [] err = False try: kwds = argspecs.keywords except AttributeError: kwds = argspecs.varkw if not kwds is None: used = set() if len(args) > len(argspecs.args): if not argspecs.varargs is None: res.extend(args[:len(argspecs.args)]) res.append(args[len(argspecs.args):]) else: err = True res.extend(args) elif len(args) < len(argspecs.args): res.extend(args) ipos = -len(argspecs.args)+len(res) for name in argspecs.args[len(args):]: if name in kw: res.append(kw[name]) if not kwds is None: used.add(name) elif not argspecs.defaults is None: res.append(argspecs.defaults[ipos]) else: err = True ipos += 1 if not argspecs.varargs is None: res.append(tuple()) else: res.extend(args) if not argspecs.varargs is None: res.append(tuple()) try: ipos = -len(argspecs.kwonlyargs) for name in argspecs.kwonlyargs: if name in kw: res.append(kw[name]) if not kwds is None: used.add(name) else: if not argspecs.kwonlydefaults is None and name in argspecs.kwonlydefaults: res.append(argspecs.kwonlydefaults[name]) else: err = True ipos += 1 except AttributeError: pass except TypeError: err = True if not kwds is None: if len(used) > 0: kw2 = {} if len(used) < len(kw): for name in kw: if not name in used: kw2[name] = kw[name] res.append(kw2) else: res.append(kw) return tuple(res), err def fromargskw(argskw, argspecs, slf_or_clsm = False): res_args = argskw try: kwds = argspecs.keywords except AttributeError: kwds = argspecs.varkw if not kwds is None: res_kw = argskw[-1] res_args = argskw[:-1] else: res_kw = None if not argspecs.varargs is None: vargs_pos = (len(argspecs.args)-1) if slf_or_clsm else len(argspecs.args) if vargs_pos > 0: res_lst = list(argskw[:vargs_pos]) res_lst.extend(argskw[vargs_pos]) res_args = tuple(res_lst) else: res_args = argskw[0] try: if len(argspecs.kwonlyargs) > 0: res_kw = {} if res_kw is None else dict(res_kw) ipos = -len(argspecs.kwonlyargs) - (0 if kwds is None else 1) for name in argspecs.kwonlyargs: res_kw[name] = argskw[ipos] ipos += 1 except AttributeError: pass if res_kw is None: res_kw = {} return res_args, res_kw def _unchecked_backend(func): if hasattr(func, 'ov_func'): return _unchecked_backend(func.ov_func) elif hasattr(func, 'ch_func'): return _unchecked_backend(func.ch_func) else: return func def _actualfunc(func, prop_getter = False): if type(func) == classmethod or type(func) == staticmethod: return _actualfunc(func.__func__, prop_getter) if isinstance(func, property): if prop_getter: return _actualfunc(func.fget, prop_getter) else: return _actualfunc(func.fget if func.fset is None else func.fset, prop_getter) elif hasattr(func, 'ov_func'): return _actualfunc((func.ov_func), prop_getter) elif hasattr(func, 'ch_func'): return _actualfunc((func.ch_func), prop_getter) return func def _get_class_nesting_list_for_staticmethod(staticmeth, module_or_class, stack, rec_set): if hasattr(module_or_class, _actualfunc(staticmeth).__name__): val = getattr(module_or_class, _actualfunc(staticmeth).__name__) bck = _unchecked_backend(staticmeth) try: if _unchecked_backend(val) is bck.__func__: return stack except AttributeError: pass if _unchecked_backend(val) is bck: return stack classes = [cl[1] for cl in inspect.getmembers(module_or_class, inspect.isclass)] mod_name = module_or_class.__module__ if inspect.isclass(module_or_class) else module_or_class.__name__ for cl in classes: if cl.__module__ == mod_name and not cl in rec_set: stack.append(cl) rec_set.add(cl) result = _get_class_nesting_list_for_staticmethod(staticmeth, cl, stack, rec_set) if not result is None: return result stack.pop() return None def _get_class_nesting_list_py2(cls, module_or_class, stack, rec_set): classes = [cl[1] for cl in inspect.getmembers(module_or_class, inspect.isclass)] mod_name = module_or_class.__module__ if inspect.isclass(module_or_class) else module_or_class.__name__ for cl in classes: if cl.__module__ == mod_name and not cl in rec_set: if cl is cls: return stack stack.append(cl) rec_set.add(cl) result = _get_class_nesting_list_py2(cls, cl, stack, rec_set) if not result is None: return result stack.pop() return None def _get_class_nesting_list(cls, module_or_class): if hasattr(cls, '__qualname__'): names = cls.__qualname__.split('.') cl = module_or_class res = [] for name in names[:-1]: cl = getattr(cl, name) res.append(cl) return res else: res = _get_class_nesting_list_py2(cls, module_or_class, [], set()) return [] if res is None else res def get_staticmethod_qualname(staticmeth): func = _actualfunc(staticmeth) module = sys.modules[func.__module__] nst = _get_class_nesting_list_for_staticmethod(staticmeth, module, [], set()) nst = [cl.__name__ for cl in nst] return '.'.join(nst)+'.'+func.__name__ def get_class_qualname(cls): if hasattr(cls, '__qualname__'): return cls.__qualname__ module = sys.modules[cls.__module__] if not hasattr(cls, '__name__'): res = cls._name if not cls._name is None else cls.__origin__.__name__ return res if hasattr(module, cls.__name__) and getattr(module, cls.__name__) is cls: return cls.__name__ else: nst = _get_class_nesting_list(cls, module) nst.append(cls) nst = [cl.__name__ for cl in nst] return '.'.join(nst) return cls.__name__ def search_class_module(cls, deep_search=True): for md_name in sys.modules: module = sys.modules[md_name] if hasattr(module, cls.__name__) and getattr(module, cls.__name__) is cls: return module if deep_search: for md_name in sys.modules: module = sys.modules[md_name] try: nst = _get_class_nesting_list(cls, module) if cls is nst[-1]: return module except: pass return None
MIT License
jeeftor/alfredtoday
src/lib/pyexchange/exchange2010/__init__.py
Exchange2010CalendarEvent.resend_invitations
python
def resend_invitations(self): if not self.id: raise TypeError(u"You can't send invites for an event that hasn't been created yet.") if self._dirty_attributes: raise ValueError(u"There are unsaved changes to this invite - please update it first: %r" % self._dirty_attributes) self.refresh_change_key() body = soap_request.update_item(self, [], calendar_item_update_operation_type=u'SendOnlyToAll') self.service.send(body) return self
Resends invites for an event. :: event = service.calendar().get_event(id='KEY HERE') event.resend_invitations() Anybody who has not declined this meeting will get a new invite.
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/pyexchange/exchange2010/__init__.py#L267-L289
import logging from ..base.calendar import BaseExchangeCalendarEvent, BaseExchangeCalendarService, ExchangeEventOrganizer, ExchangeEventResponse from ..base.folder import BaseExchangeFolder, BaseExchangeFolderService from ..base.soap import ExchangeServiceSOAP from ..exceptions import FailedExchangeException, ExchangeStaleChangeKeyException, ExchangeItemNotFoundException, ExchangeInternalServerTransientErrorException, ExchangeIrresolvableConflictException, InvalidEventType from ..compat import BASESTRING_TYPES from . import soap_request from lxml import etree from copy import deepcopy from datetime import date import warnings log = logging.getLogger("pyexchange") class Exchange2010Service(ExchangeServiceSOAP): def calendar(self, id="calendar"): return Exchange2010CalendarService(service=self, calendar_id=id) def mail(self): raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)") def contacts(self): raise NotImplementedError("Sorry - nothin' here. Feel like adding it? :)") def folder(self): return Exchange2010FolderService(service=self) def _send_soap_request(self, body, headers=None, retries=2, timeout=30, encoding="utf-8"): headers = { "Accept": "text/xml", "Content-type": "text/xml; charset=%s " % encoding } return super(Exchange2010Service, self)._send_soap_request(body, headers=headers, retries=retries, timeout=timeout, encoding=encoding) def _check_for_errors(self, xml_tree): super(Exchange2010Service, self)._check_for_errors(xml_tree) self._check_for_exchange_fault(xml_tree) def _check_for_exchange_fault(self, xml_tree): response_codes = xml_tree.xpath(u'//m:ResponseCode', namespaces=soap_request.NAMESPACES) if not response_codes: raise FailedExchangeException(u"Exchange server did not return a status response", None) for code in response_codes: if code.text == u"ErrorChangeKeyRequiredForWriteOperations": raise ExchangeStaleChangeKeyException(u"Exchange Fault (%s) from Exchange server" % code.text) elif code.text == u"ErrorItemNotFound": raise ExchangeItemNotFoundException(u"Exchange Fault (%s) from Exchange server" % code.text) elif code.text == u"ErrorIrresolvableConflict": raise ExchangeIrresolvableConflictException(u"Exchange Fault (%s) from Exchange server" % code.text) elif code.text == u"ErrorInternalServerTransientError": raise ExchangeInternalServerTransientErrorException(u"Exchange Fault (%s) from Exchange server" % code.text) elif code.text == u"ErrorCalendarOccurrenceIndexIsOutOfRecurrenceRange": pass elif code.text != u"NoError": raise FailedExchangeException(u"Exchange Fault (%s) from Exchange server" % code.text) class Exchange2010CalendarService(BaseExchangeCalendarService): def event(self, id=None, **kwargs): return Exchange2010CalendarEvent(service=self.service, id=id, **kwargs) def get_event(self, id): return Exchange2010CalendarEvent(service=self.service, id=id) def new_event(self, **properties): return Exchange2010CalendarEvent(service=self.service, calendar_id=self.calendar_id, **properties) def list_events(self, start=None, end=None, details=False): return Exchange2010CalendarEventList(service=self.service, start=start, end=end, details=details) class Exchange2010CalendarEventList(object): def __init__(self, service=None, start=None, end=None, details=False): self.service = service self.count = 0 self.start = start self.end = end self.events = list() self.event_ids = list() self.details = details body = soap_request.get_calendar_items(format=u'AllProperties', start=self.start, end=self.end) response_xml = self.service.send(body) self._parse_response_for_all_events(response_xml) for event in self.events: self.event_ids.append(event._id) if self.details: log.debug(u'Received request for all details, retrieving now!') self.load_all_details() return def _parse_response_for_all_events(self, response): items = response.xpath(u'//m:FindItemResponseMessage/m:RootFolder/t:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES) if not items: items = response.xpath(u'//m:GetItemResponseMessage/m:Items/t:CalendarItem', namespaces=soap_request.NAMESPACES) if items: self.count = len(items) log.debug(u'Found %s items' % self.count) for item in items: self._add_event(xml=soap_request.M.Items(deepcopy(item))) else: log.debug(u'No calendar items found with search parameters.') return self def _add_event(self, xml=None): log.debug(u'Adding new event to all events list.') event = Exchange2010CalendarEvent(service=self.service, xml=xml) log.debug(u'Subject of new event is %s' % event.subject) self.events.append(event) return self def load_all_details(self): log.debug(u"Loading all details") if self.count > 0: del(self.events[:]) log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids))) body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties') response_xml = self.service.send(body) self._parse_response_for_all_events(response_xml) return self class Exchange2010CalendarEvent(BaseExchangeCalendarEvent): def _init_from_service(self, id): log.debug(u'Creating new Exchange2010CalendarEvent object from ID') body = soap_request.get_item(exchange_id=id, format=u'AllProperties') response_xml = self.service.send(body) properties = self._parse_response_for_get_event(response_xml) self._update_properties(properties) self._id = id log.debug(u'Created new event object with ID: %s' % self._id) self._reset_dirty_attributes() return self def _init_from_xml(self, xml=None): log.debug(u'Creating new Exchange2010CalendarEvent object from XML') properties = self._parse_response_for_get_event(xml) self._update_properties(properties) self._id, self._change_key = self._parse_id_and_change_key_from_response(xml) log.debug(u'Created new event object with ID: %s' % self._id) self._reset_dirty_attributes() return self def as_json(self): raise NotImplementedError def validate(self): if self.recurrence is not None: if not (isinstance(self.recurrence_end_date, date)): raise ValueError('recurrence_end_date must be of type date') elif (self.recurrence_end_date < self.start.date()): raise ValueError('recurrence_end_date must be after start') if self.recurrence == u'daily': if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 999): raise ValueError('recurrence_interval must be an int in the range from 1 to 999') elif self.recurrence == u'weekly': if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99): raise ValueError('recurrence_interval must be an int in the range from 1 to 99') if self.recurrence_days is None: raise ValueError('recurrence_days is required') for day in self.recurrence_days.split(' '): if day not in self.WEEKLY_DAYS: raise ValueError('recurrence_days received unknown value: %s' % day) elif self.recurrence == u'monthly': if not (isinstance(self.recurrence_interval, int) and 1 <= self.recurrence_interval <= 99): raise ValueError('recurrence_interval must be an int in the range from 1 to 99') elif self.recurrence == u'yearly': pass else: raise ValueError('recurrence received unknown value: %s' % self.recurrence) super(Exchange2010CalendarEvent, self).validate() def create(self): self.validate() body = soap_request.new_event(self) response_xml = self.service.send(body) self._id, self._change_key = self._parse_id_and_change_key_from_response(response_xml) return self
MIT License
databand-ai/dbnd
modules/dbnd/src/dbnd/_vendor/dulwich/repo.py
BaseRepo._init_files
python
def _init_files(self, bare): from dbnd._vendor.dulwich.config import ConfigFile self._put_named_file("description", b"Unnamed repository") f = BytesIO() cf = ConfigFile() cf.set("core", "repositoryformatversion", "0") if self._determine_file_mode(): cf.set("core", "filemode", True) else: cf.set("core", "filemode", False) cf.set("core", "bare", bare) cf.set("core", "logallrefupdates", True) cf.write_to_file(f) self._put_named_file("config", f.getvalue()) self._put_named_file(os.path.join("info", "exclude"), b"")
Initialize a default set of named files.
https://github.com/databand-ai/dbnd/blob/ec0076f9a142b20e2f7afd886ed1a18683c553ec/modules/dbnd/src/dbnd/_vendor/dulwich/repo.py#L212-L229
from io import BytesIO import errno import os import sys import stat import time from dbnd._vendor.dulwich.errors import ( NoIndexPresent, NotBlobError, NotCommitError, NotGitRepository, NotTreeError, NotTagError, CommitError, RefFormatError, HookError, ) from dbnd._vendor.dulwich.file import GitFile from dbnd._vendor.dulwich.object_store import ( DiskObjectStore, MemoryObjectStore, ObjectStoreGraphWalker, ) from dbnd._vendor.dulwich.objects import ( check_hexsha, Blob, Commit, ShaFile, Tag, Tree, ) from dbnd._vendor.dulwich.pack import pack_objects_to_data from dbnd._vendor.dulwich.hooks import ( PreCommitShellHook, PostCommitShellHook, CommitMsgShellHook, ) from dbnd._vendor.dulwich.refs import ( check_ref_format, RefsContainer, DictRefsContainer, InfoRefsContainer, DiskRefsContainer, read_packed_refs, read_packed_refs_with_peeled, write_packed_refs, SYMREF, ) import warnings CONTROLDIR = ".git" OBJECTDIR = "objects" REFSDIR = "refs" REFSDIR_TAGS = "tags" REFSDIR_HEADS = "heads" INDEX_FILENAME = "index" COMMONDIR = "commondir" GITDIR = "gitdir" WORKTREES = "worktrees" BASE_DIRECTORIES = [ ["branches"], [REFSDIR], [REFSDIR, REFSDIR_TAGS], [REFSDIR, REFSDIR_HEADS], ["hooks"], ["info"], ] DEFAULT_REF = b"refs/heads/master" class InvalidUserIdentity(Exception): def __init__(self, identity): self.identity = identity def check_user_identity(identity): try: fst, snd = identity.split(b" <", 1) except ValueError: raise InvalidUserIdentity(identity) if b">" not in snd: raise InvalidUserIdentity(identity) def parse_graftpoints(graftpoints): grafts = {} for l in graftpoints: raw_graft = l.split(None, 1) commit = raw_graft[0] if len(raw_graft) == 2: parents = raw_graft[1].split() else: parents = [] for sha in [commit] + parents: check_hexsha(sha, "Invalid graftpoint") grafts[commit] = parents return grafts def serialize_graftpoints(graftpoints): graft_lines = [] for commit, parents in graftpoints.items(): if parents: graft_lines.append(commit + b" " + b" ".join(parents)) else: graft_lines.append(commit) return b"\n".join(graft_lines) class BaseRepo(object): def __init__(self, object_store, refs): self.object_store = object_store self.refs = refs self._graftpoints = {} self.hooks = {} def _determine_file_mode(self): raise NotImplementedError(self._determine_file_mode)
Apache License 2.0
hbldh/pyefd
pyefd.py
elliptic_fourier_descriptors
python
def elliptic_fourier_descriptors( contour, order=10, normalize=False, return_transformation=False ): dxy = np.diff(contour, axis=0) dt = np.sqrt((dxy ** 2).sum(axis=1)) t = np.concatenate([([0.0]), np.cumsum(dt)]) T = t[-1] phi = (2 * np.pi * t) / T orders = np.arange(1, order + 1) consts = T / (2 * orders * orders * np.pi * np.pi) phi = phi * orders.reshape((order, -1)) d_cos_phi = np.cos(phi[:, 1:]) - np.cos(phi[:, :-1]) d_sin_phi = np.sin(phi[:, 1:]) - np.sin(phi[:, :-1]) cos_phi = (dxy[:, 0] / dt) * d_cos_phi a = consts * np.sum(cos_phi, axis=1) b = consts * np.sum((dxy[:, 0] / dt) * d_sin_phi, axis=1) c = consts * np.sum((dxy[:, 1] / dt) * d_cos_phi, axis=1) d = consts * np.sum((dxy[:, 1] / dt) * d_sin_phi, axis=1) coeffs = np.concatenate( [ a.reshape((order, 1)), b.reshape((order, 1)), c.reshape((order, 1)), d.reshape((order, 1)), ], axis=1, ) if normalize: coeffs = normalize_efd(coeffs, return_transformation=return_transformation) return coeffs
Calculate elliptical Fourier descriptors for a contour. :param numpy.ndarray contour: A contour array of size ``[M x 2]``. :param int order: The order of Fourier coefficients to calculate. :param bool normalize: If the coefficients should be normalized; see references for details. :param bool return_transformation: If the normalization parametres should be returned. Default is ``False``. :return: A ``[order x 4]`` array of Fourier coefficients and optionally the transformation parametres ``scale``, ``psi_1`` (rotation) and ``theta_1`` (phase) :rtype: ::py:class:`numpy.ndarray` or (:py:class:`numpy.ndarray`, (float, float, float))
https://github.com/hbldh/pyefd/blob/17da03001365a24a9570790133ea39b58d5cd2c0/pyefd.py#L37-L84
from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import numpy as np try: _range = xrange except NameError: _range = range
MIT License
sdispater/orator
orator/orm/relations/relation.py
Relation.__init__
python
def __init__(self, query, parent): self._query = query self._parent = parent self._related = query.get_model() self._extra_query = None self.add_constraints()
:param query: A Builder instance :type query: orm.orator.Builder :param parent: The parent model :type parent: Model
https://github.com/sdispater/orator/blob/0666e522be914db285b6936e3c36801fc1a9c2e7/orator/orm/relations/relation.py#L13-L26
from contextlib import contextmanager from ...query.expression import QueryExpression from ..collection import Collection from ..builder import Builder class Relation(object): _constraints = True
MIT License
theetcher/fxpt
fxpt/side_utils/profilehooks.py
FuncTimer.__call__
python
def __call__(self, *args, **kw): fn = self.fn timer = self.timer self.ncalls += 1 try: start = timer() return fn(*args, **kw) finally: duration = timer() - start self.totaltime += duration if self.immediate: funcname = fn.__name__ filename = fn.__code__.co_filename lineno = fn.__code__.co_firstlineno sys.stderr.write("\n %s (%s:%s):\n %.3f seconds\n\n" % ( funcname, filename, lineno, duration )) sys.stderr.flush()
Profile a singe call to the function.
https://github.com/theetcher/fxpt/blob/d40c571885c7c4056f548a60140740c7beb8703d/fxpt/side_utils/profilehooks.py#L731-L749
__author__ = "Marius Gedminas <[email protected]>" __copyright__ = "Copyright 2004-2014 Marius Gedminas" __license__ = "MIT" __version__ = "1.7.1" __date__ = "2014-12-02" import atexit import inspect import sys import re from profile import Profile import pstats try: import hotshot import hotshot.stats except ImportError: hotshot = None import trace if hotshot is not None: import _hotshot import hotshot.log try: import cProfile except ImportError: cProfile = None import time AVAILABLE_PROFILERS = {} __all__ = ['coverage', 'coverage_with_hotshot', 'profile', 'timecall'] def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40, profiler=('cProfile', 'profile', 'hotshot')): if fn is None: def decorator(fn): return profile(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries, profiler=profiler) return decorator if isinstance(profiler, str): profiler = [profiler] for p in profiler: if p in AVAILABLE_PROFILERS: profiler_class = AVAILABLE_PROFILERS[p] break else: raise ValueError('only these profilers are available: %s' % ', '.join(sorted(AVAILABLE_PROFILERS))) fp = profiler_class(fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn def coverage(fn): fp = TraceFuncCoverage(fn) def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn def coverage_with_hotshot(fn): fp = HotShotFuncCoverage(fn) def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn class FuncProfile(object): in_profiler = False Profile = Profile def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40): self.fn = fn self.skip = skip self.filename = filename self.immediate = immediate self.dirs = dirs self.sort = sort or ('cumulative', 'time', 'calls') if isinstance(self.sort, str): self.sort = (self.sort, ) self.entries = entries self.reset_stats() atexit.register(self.atexit) def __call__(self, *args, **kw): self.ncalls += 1 if self.skip > 0: self.skip -= 1 self.skipped += 1 return self.fn(*args, **kw) if FuncProfile.in_profiler: return self.fn(*args, **kw) profiler = self.Profile() try: FuncProfile.in_profiler = True return profiler.runcall(self.fn, *args, **kw) finally: FuncProfile.in_profiler = False self.stats.add(profiler) if self.immediate: self.print_stats() self.reset_stats() def print_stats(self): funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** PROFILER RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) if self.skipped: skipped = " (%d calls not profiled)" % self.skipped else: skipped = "" print("function called %d times%s" % (self.ncalls, skipped)) print("") stats = self.stats if self.filename: stats.dump_stats(self.filename) if not self.dirs: stats.strip_dirs() stats.sort_stats(*self.sort) stats.print_stats(self.entries) def reset_stats(self): self.stats = pstats.Stats(Profile()) self.ncalls = 0 self.skipped = 0 def atexit(self): if not self.immediate: self.print_stats() AVAILABLE_PROFILERS['profile'] = FuncProfile if cProfile is not None: class CProfileFuncProfile(FuncProfile): Profile = cProfile.Profile AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile if hotshot is not None: class HotShotFuncProfile(FuncProfile): in_profiler = False def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False, sort=None, entries=40): if filename: self.logfilename = filename + ".raw" else: self.logfilename = fn.__name__ + ".prof" super(HotShotFuncProfile, self).__init__( fn, skip=skip, filename=filename, immediate=immediate, dirs=dirs, sort=sort, entries=entries) def __call__(self, *args, **kw): self.ncalls += 1 if self.skip > 0: self.skip -= 1 self.skipped += 1 return self.fn(*args, **kw) if HotShotFuncProfile.in_profiler: return self.fn(*args, **kw) if self.profiler is None: self.profiler = hotshot.Profile(self.logfilename) try: HotShotFuncProfile.in_profiler = True return self.profiler.runcall(self.fn, *args, **kw) finally: HotShotFuncProfile.in_profiler = False if self.immediate: self.print_stats() self.reset_stats() def print_stats(self): if self.profiler is None: self.stats = pstats.Stats(Profile()) else: self.profiler.close() self.stats = hotshot.stats.load(self.logfilename) super(HotShotFuncProfile, self).print_stats() def reset_stats(self): self.profiler = None self.ncalls = 0 self.skipped = 0 AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile class HotShotFuncCoverage: def __init__(self, fn): self.fn = fn self.logfilename = fn.__name__ + ".cprof" self.profiler = _hotshot.coverage(self.logfilename) self.ncalls = 0 atexit.register(self.atexit) def __call__(self, *args, **kw): self.ncalls += 1 old_trace = sys.gettrace() try: return self.profiler.runcall(self.fn, args, kw) finally: sys.settrace(old_trace) def atexit(self): self.profiler.close() funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** COVERAGE RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) print("function called %d times" % self.ncalls) print("") fs = FuncSource(self.fn) reader = hotshot.log.LogReader(self.logfilename) for what, (filename, lineno, funcname), tdelta in reader: if filename != fs.filename: continue if what == hotshot.log.LINE: fs.mark(lineno) if what == hotshot.log.ENTER: if lineno == fs.firstlineno: lineno = fs.firstcodelineno fs.mark(lineno) reader.close() print(fs) never_executed = fs.count_never_executed() if never_executed: print("%d lines were not executed." % never_executed) class TraceFuncCoverage: tracer = trace.Trace(count=True, trace=False, ignoredirs=[sys.prefix, sys.exec_prefix]) tracing = False def __init__(self, fn): self.fn = fn self.logfilename = fn.__name__ + ".cprof" self.ncalls = 0 atexit.register(self.atexit) def __call__(self, *args, **kw): self.ncalls += 1 if TraceFuncCoverage.tracing: return self.fn(*args, **kw) old_trace = sys.gettrace() try: TraceFuncCoverage.tracing = True return self.tracer.runfunc(self.fn, *args, **kw) finally: sys.settrace(old_trace) TraceFuncCoverage.tracing = False def atexit(self): funcname = self.fn.__name__ filename = self.fn.__code__.co_filename lineno = self.fn.__code__.co_firstlineno print("") print("*** COVERAGE RESULTS ***") print("%s (%s:%s)" % (funcname, filename, lineno)) print("function called %d times" % self.ncalls) print("") fs = FuncSource(self.fn) for (filename, lineno), count in self.tracer.counts.items(): if filename != fs.filename: continue fs.mark(lineno, count) print(fs) never_executed = fs.count_never_executed() if never_executed: print("%d lines were not executed." % never_executed) class FuncSource: blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$") def __init__(self, fn): self.fn = fn self.filename = inspect.getsourcefile(fn) self.sourcelines = {} self.source = [] self.firstlineno = self.firstcodelineno = 0 try: self.source, self.firstlineno = inspect.getsourcelines(fn) self.firstcodelineno = self.firstlineno self.find_source_lines() except IOError: self.filename = None def find_source_lines(self): if self.filename is None: return strs = trace.find_strings(self.filename) lines = trace.find_lines_from_code(self.fn.__code__, strs) for lineno in lines: self.sourcelines.setdefault(lineno, 0) if lines: self.firstcodelineno = min(lines) else: self.firstcodelineno = self.firstlineno def mark(self, lineno, count=1): self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count def count_never_executed(self): lineno = self.firstlineno counter = 0 for line in self.source: if self.sourcelines.get(lineno) == 0: if not self.blank_rx.match(line): counter += 1 lineno += 1 return counter def __str__(self): if self.filename is None: return "cannot show coverage data since co_filename is None" lines = [] lineno = self.firstlineno for line in self.source: counter = self.sourcelines.get(lineno) if counter is None: prefix = ' ' * 7 elif counter == 0: if self.blank_rx.match(line): prefix = ' ' * 7 else: prefix = '>' * 6 + ' ' else: prefix = '%5d: ' % counter lines.append(prefix + line) lineno += 1 return ''.join(lines) def timecall(fn=None, immediate=True, timer=None): if fn is None: def decorator(fn): return timecall(fn, immediate=immediate, timer=timer) return decorator if timer is None: timer = time.time fp = FuncTimer(fn, immediate=immediate, timer=timer) def new_fn(*args, **kw): return fp(*args, **kw) new_fn.__doc__ = fn.__doc__ new_fn.__name__ = fn.__name__ new_fn.__dict__ = fn.__dict__ new_fn.__module__ = fn.__module__ return new_fn class FuncTimer(object): def __init__(self, fn, immediate, timer): self.fn = fn self.ncalls = 0 self.totaltime = 0 self.immediate = immediate self.timer = timer if not immediate: atexit.register(self.atexit)
MIT License
adaptivepele/adaptivepele
AdaptivePELE/utilities/utilities.py
getMetricsFromReportsInEpoch
python
def getMetricsFromReportsInEpoch(reportName, outputFolder, nTrajs): metrics = [] for i in range(1, nTrajs): report = np.loadtxt(os.path.join(outputFolder, reportName % i)) if len(report.shape) < 2: metrics.append(report.tolist()+[i, 0]) else: traj_line = np.array([i] * report.shape[0]) snapshot_line = np.array(range(report.shape[0])) metrics.extend(np.hstack((report, traj_line[:, np.newaxis], snapshot_line[:, np.newaxis]))) return np.array(metrics)
Extract the metrics in report file from an epoch to a numpy array
https://github.com/adaptivepele/adaptivepele/blob/b7c908a53a2ba9ec19fa81a517377cc365176036/AdaptivePELE/utilities/utilities.py#L640-L653
from __future__ import absolute_import, division, print_function, unicode_literals import os import ast import sys import glob import json import errno import socket import shutil import string from builtins import range import six from six import reraise as raise_ import numpy as np import mdtraj as md from scipy import linalg try: import cPickle as pickle except ImportError: import pickle from AdaptivePELE.atomset import RMSDCalculator, atomset from AdaptivePELE.freeEnergies import utils try: import multiprocessing as mp PARALELLIZATION = True except ImportError: PARALELLIZATION = False class UnsatisfiedDependencyException(Exception): __module__ = Exception.__module__ class RequiredParameterMissingException(Exception): __module__ = Exception.__module__ class ImproperParameterValueException(Exception): __module__ = Exception.__module__ class UnspecifiedPELECrashException(Exception): __module__ = Exception.__module__ class Topology(object): def __init__(self, path): self.path = os.path.abspath(path) self.topologies = [] self.topologyMap = {} self.topologyFiles = [] def __getitem__(self, key): return self.topologies[key] def __iter__(self): for top in self.topologies: yield top def cleanTopologies(self): files = glob.glob(os.path.join(self.path, "topology*.pdb")) for f in files: os.remove(f) def writeTopologyObject(self): writeObject(os.path.join(self.path, "topologies.pkl"), self, protocol=2) def setTopologies(self, topologyFiles, cleanFiles=True): if self.topologies: self.topologies = [] self.topologyFiles = [] if cleanFiles: self.cleanTopologies() for top in topologyFiles: self.topologies.append(getTopologyFile(top)) self.topologyFiles.append(os.path.abspath(top)) def topologyFilesIterator(self): for top_file in self.topologyFiles: yield top_file def mapEpochTopologies(self, epoch, trajectoryMapping): mapping = trajectoryMapping[1:]+[trajectoryMapping[0]] self.topologyMap[epoch] = [self.topologyMap[i_epoch][i_traj-1] for i_epoch, i_traj, _ in mapping] def getTopology(self, epoch, trajectory_number): return self.topologies[self.topologyMap[epoch][trajectory_number-1]] def getTopologyFile(self, epoch, trajectory_number): return self.topologyFiles[self.topologyMap[epoch][trajectory_number-1]] def getTopologyFromIndex(self, index): return self.topologies[index] def getTopologyIndex(self, epoch, trajectory_number): return self.topologyMap[epoch][trajectory_number-1] def writeMappingToDisk(self, epochDir, epoch): with open(epochDir+"/topologyMapping.txt", "w") as f: f.write("%s\n" % ':'.join(map(str, self.topologyMap[epoch]))) def readMappingFromDisk(self, epochDir, epoch): try: with open(epochDir+"/topologyMapping.txt") as f: self.topologyMap[epoch] = list(map(int, f.read().rstrip().split(':'))) except IOError: sys.stderr.write("WARNING: topologyMapping.txt not found, you might not be able to recronstruct fine-grained pathways\n") class TopologyCompat(object): def __init__(self, pdb_file): self.topologyFiles = os.path.abspath(pdb_file) self.path = os.path.split(self.topologyFiles)[0] self.topologies = [getTopologyFile(self.topologyFiles)] def getTopologyFile(self, epoch, trajectory_number): return self.topologyFiles def topologyFilesIterator(self): yield self.topologyFiles def getTopologyIndex(self, epoch, trajectory_number): return 0 def getTopology(self, epoch, trajectory_number): return self.topologies[0] def cleanup(tmpFolder): try: shutil.rmtree(tmpFolder) except OSError as exc: if exc.errno != errno.ENOENT: raise def makeFolder(outputDir): try: os.makedirs(outputDir) except OSError as exc: if exc.errno != errno.EEXIST or not os.path.isdir(outputDir): print(exc.args) raise def getSnapshots(trajectoryFile, verbose=False, topology=None, use_pdb=False): ext = getFileExtension(trajectoryFile) if ext == ".pdb" or use_pdb: with open(trajectoryFile, "r") as inputFile: inputFileContent = inputFile.read() snapshots = inputFileContent.split("ENDMDL") if len(snapshots) > 1: snapshots = snapshots[:-1] if not verbose: return snapshots remarkInfo = "REMARK 000 File created using PELE++\nREMARK source : %s\nREMARK original model nr : %d\nREMARK First snapshot is 1, not 0 (as opposed to report)\n%s" snapshotsWithInfo = [remarkInfo % (trajectoryFile, i+1, snapshot) for i, snapshot in enumerate(snapshots)] elif ext == ".xtc": with md.formats.XTCTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _, _, _ = f.read() snapshotsWithInfo *= 10 elif ext == ".trr": with md.formats.TRRTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _, _, _, _ = f.read() snapshotsWithInfo *= 10 elif ext == ".dcd": with md.formats.DCDTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _, _ = f.read() elif ext == ".dtr": with md.formats.DTRTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _, _ = f.read() elif ext == ".mdcrd": with md.formats.MDCRDTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _ = f.read() elif ext == ".nc": with md.formats.NetCDFTrajectoryFile(trajectoryFile) as f: snapshotsWithInfo, _, _, _ = f.read() else: raise ValueError("Unrecongnized file extension for %s" % trajectoryFile) return snapshotsWithInfo def getTrajNum(trajFilename): return getReportNum(trajFilename) def getFileSuffix(filename, separator="_"): name, _ = os.path.splitext(filename) return name.split(separator)[-1] def isReport(reportFilename): return getFileSuffix(reportFilename).isdigit() def getReportNum(reportFilename): return int(getFileSuffix(reportFilename)) def getReportList(reportBase): return glob_sorted(reportBase, key=getReportNum, filter_func=isReport) def getPrmtopNum(prmtopFilename): return int(prmtopFilename.split("_")[-1][:-7]) def calculateContactMapEigen(contactMap): nLig, nCA = contactMap.shape extendedCM = np.zeros((nLig+nCA, nLig+nCA)) extendedCM[nCA:, :nCA] = contactMap extendedCM[:nCA, nCA:] = contactMap.T assert (extendedCM == extendedCM.T).all(), "Extended ContactMap not symmetric" eiv, eic = np.linalg.eigh(extendedCM) return eiv, eic def assertSymmetriesDict(symmetries, PDB): for group in symmetries: for key in group: assert key in PDB.atoms, "Symmetry atom %s not found in initial structure" % key if symmetries: print("Symmetry dictionary correctly defined!") def getRMSD(traj, nativePDB, resname, reschain, resnum, symmetries, topology=None): snapshots = getSnapshots(traj) rmsds = np.zeros(len(snapshots)) RMSDCalc = RMSDCalculator.RMSDCalculator(symmetries) for i, snapshot in enumerate(snapshots): snapshotPDB = atomset.PDB() snapshotPDB.initialise(snapshot, resname=resname, chain=reschain, resnum=resnum, topology=topology) rmsds[i] = RMSDCalc.computeRMSD(nativePDB, snapshotPDB) return rmsds def readClusteringObject(clusteringObjectPath): with open(clusteringObjectPath, 'rb') as f: try: if six.PY2: return pickle.load(f) elif six.PY3: return pickle.load(f, encoding="latin") except EOFError: t, v, tb = sys.exc_info() raise_(t, v, tb) def ensure_connectivity_msm(msm): if msm.nstates_full == msm.nstates: return msm.stationary_distribution else: counts = msm.count_matrix_full counts += 1/counts.shape[0] trans = utils.buildRevTransitionMatrix(counts) _, eic = getSortedEigen(trans) return getStationaryDistr(eic[:, 0]) def getStationaryDistr(lowestEigenvector): absStationary = np.abs(lowestEigenvector) return absStationary / absStationary.sum() def getSortedEigen(T): eigenvals, eigenvectors = linalg.eig(T, left=True, right=False) sortedIndices = np.argsort(eigenvals)[::-1] return eigenvals[sortedIndices], eigenvectors[:, sortedIndices] def get_epoch_folders(path): allFolders = os.listdir(path) folders = [epoch for epoch in allFolders if epoch.isdigit()] folders.sort(key=int) return folders def gen_atom_name(index): ind1 = index//6760 ind2 = (index % 6760) ind3 = ind2 % 260 return chr(65+ind1)+chr(65+ind2//260)+chr(65+ind3//10)+str(ind3 % 10) def join_coordinates_prob(coords, p): if len(p.shape) < 2: p = p[:, np.newaxis] return np.hstack((coords, p)) def write_PDB_clusters(pmf_xyzg, title="clusters.pdb", use_beta=False, elements=None): templateLine = "HETATM%s %s CLT L 502 %s%s%s 0.75%s %s \n" if elements is None: elements = ["H" for i in range(len(pmf_xyzg))] content = "" names = [] for i, line in enumerate(pmf_xyzg): number = str(i).rjust(5) number3 = gen_atom_name(i).ljust(4) names.append(number3) x = ("%.3f" % line[0]).rjust(8) y = ("%.3f" % line[1]).rjust(8) z = ("%.3f" % line[2]).rjust(8) element = elements[i].rjust(2) if use_beta: g = ("%.2f" % line[-1]).rjust(6) else: g = ("%.2f" % 0).rjust(6) content += templateLine % (number, number3, x, y, z, g, element) with open(title, 'w') as f: f.write(content) return names def distanceCOM(coords1, coords2): coords1 = np.array(coords1) coords2 = np.array(coords2) return np.linalg.norm(coords1-coords2) def sign(x, tol=1e-7): x[abs(x) < tol] = 0 return np.sign(x) def getAtomNames(values): names = ["O", "H", "N"] values += 1 return [names[int(value)] for value in values] def getReportAndTrajectoryWildcard(JSONdict): reportWildcard = os.path.split(JSONdict["commands"][0]["PELE_Output"]['reportPath'])[1] trajWildcard = os.path.split(JSONdict["commands"][0]["PELE_Output"]['trajectoryPath'])[1] trajWildcard = '_%d'.join(os.path.splitext(trajWildcard)) reportWildcard = '_%d'.join(os.path.splitext(reportWildcard)) return reportWildcard, trajWildcard def getPELEControlFileDict(templetizedControlFile): with open(templetizedControlFile) as fc: peleControlFile = fc.read() templateNames = {ele[1]: '"$%s"' % ele[1] for ele in string.Template.pattern.findall(peleControlFile)} templateNames.pop("OUTPUT_PATH", None) templateNames.pop("REPORT_NAME", None) templateNames.pop("TRAJECTORY_NAME", None) return json.loads(string.Template(peleControlFile).safe_substitute(templateNames)), templateNames
MIT License
iristyle/chocolateypackages
EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/capp_lint.py
LintChecker.block_comment
python
def block_comment(self): commentOpenCount = self.line.count('/*') commentOpenCount -= self.line.count('*/') if commentOpenCount: if self.verbose: print u'%d: BLOCK COMMENT START' % self.lineNum else: return match = None while not match and self.next_statement(expect_line=True, check_line=False): match = self.BLOCK_COMMENT_END_RE.match(self.line) if self.verbose: print u'%d: BLOCK COMMENT END' % self.lineNum
Find the end of a block comment
https://github.com/iristyle/chocolateypackages/blob/8c9833710577de6db6e8b1db5d9196e19e19d117/EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/libs/capp_lint.py#L512-L531
from __future__ import with_statement from optparse import OptionParser from string import Template import cgi import cStringIO import os import os.path import re import sys import unittest EXIT_CODE_SHOW_HTML = 205 EXIT_CODE_SHOW_TOOLTIP = 206 def exit_show_html(html): sys.stdout.write(html.encode('utf-8')) sys.exit(EXIT_CODE_SHOW_HTML) def exit_show_tooltip(text): sys.stdout.write(text) sys.exit(EXIT_CODE_SHOW_TOOLTIP) def within_textmate(): return os.getenv('TM_APP_PATH') is not None def tabs2spaces(text, positions=None): while True: index = text.find(u'\t') if index < 0: return text spaces = u' ' * (4 - (index % 4)) text = text[0:index] + spaces + text[index + 1:] if positions is not None: positions.append(index) def relative_path(basedir, filename): if filename.find(basedir) == 0: filename = filename[len(basedir) + 1:] return filename def string_replacer(line): r = [] in_quote = None escapes = 0 for i, c in enumerate(line): if in_quote: if not escapes and c == in_quote: in_quote = None r.append(c) continue else: if not escapes and (c == '"' or c == "'"): in_quote = c r.append(c) continue r.append(c) if c == '\\': escapes = (escapes + 1) % 2 else: escapes = 0 if in_quote: pass return "".join(r) class LintChecker(object): VAR_BLOCK_START_RE = re.compile(ur'''(?x) (?P<indent>\s*) # indent before a var keyword (?P<var>var\s+) # var keyword and whitespace after (?P<identifier>[a-zA-Z_$]\w*)\s* (?: (?P<assignment>=)\s* (?P<expression>.*) | (?P<separator>[,;+\-/*%^&|=\\]) ) ''') SEPARATOR_RE = re.compile(ur'''(?x) (?P<expression>.*) # Everything up to the line separator (?P<separator>[,;+\-/*%^&|=\\]) # The line separator \s* # Optional whitespace after $ # End of expression ''') INDENTED_EXPRESSION_RE_TEMPLATE = ur'''(?x) [ ]{%d} # Placeholder for indent of first identifier that started block (?P<expression>.+) # Expression ''' VAR_BLOCK_RE_TEMPLATE = ur'''(?x) [ ]{%d} # Placeholder for indent of first identifier that started block (?P<indent>\s*) # Capture any further indent (?: (?P<bracket>[\[\{].*) | (?P<identifier>[a-zA-Z_$]\w*)\s* (?: (?P<assignment>=)\s* (?P<expression>.*) | (?P<separator>[,;+\-/*%%^&|=\\]) ) | (?P<indented_expression>.+) ) ''' STATEMENT_RE = re.compile(ur'''(?x) \s*((continue|do|for|function|if|else|return|switch|while|with)\b|\[+\s*[a-zA-Z_$]\w*\s+[a-zA-Z_$]\w*\s*[:\]]) ''') TRAILING_WHITESPACE_RE = re.compile(ur'^.*(\s+)$') STRIP_LINE_COMMENT_RE = re.compile(ur'(.*)\s*(?://.*|/\*.*\*/\s*)$') LINE_COMMENT_RE = re.compile(ur'\s*(?:/\*.*\*/\s*|//.*)$') COMMENT_RE = re.compile(ur'/\*.*?\*/') BLOCK_COMMENT_START_RE = re.compile(ur'\s*/\*.*(?!\*/\s*)$') BLOCK_COMMENT_END_RE = re.compile(ur'.*?\*/') METHOD_RE = ur'[-+]\s*\([a-zA-Z_$]\w*\)\s*[a-zA-Z_$]\w*' FUNCTION_RE = re.compile(ur'\s*function\s*(?P<name>[a-zA-Z_$]\w*)?\(.*\)\s*\{?') RE_RE = re.compile(ur'(?<!\\)/.*?[^\\]/[gims]*') EMPTY_STRING_LITERAL_FUNCTION = lambda match: match.group(1) + (len(match.group(2)) * ' ') + match.group(1) EMPTY_SELF_STRING_LITERAL_FUNCTION = lambda self, match: match.group(1) + (len(match.group(2)) * ' ') + match.group(1) def noncapturing(regex): return ur'(?:%s)' % regex def optional(regex): return ur'(?:%s)?' % regex DECIMAL_DIGIT_RE = ur'[0-9]' NON_ZERO_DIGIT_RE = ur'[1-9]' DECIMAL_DIGITS_RE = DECIMAL_DIGIT_RE + ur'+' DECIMAL_DIGITS_OPT_RE = optional(DECIMAL_DIGIT_RE + ur'+') EXPONENT_INDICATOR_RE = ur'[eE]' SIGNED_INTEGER_RE = noncapturing(DECIMAL_DIGITS_RE) + ur'|' + noncapturing(ur'\+' + DECIMAL_DIGITS_RE) + ur'|' + noncapturing('-' + DECIMAL_DIGITS_RE) DECIMAL_INTEGER_LITERAL_RE = ur'0|' + noncapturing(NON_ZERO_DIGIT_RE + DECIMAL_DIGIT_RE + ur'*') EXPONENT_PART_RE = EXPONENT_INDICATOR_RE + noncapturing(SIGNED_INTEGER_RE) EXPONENT_PART_OPT_RE = optional(EXPONENT_PART_RE) DECIMAL_LITERAL_RE = re.compile(noncapturing(noncapturing(DECIMAL_INTEGER_LITERAL_RE) + ur'\.' + DECIMAL_DIGITS_OPT_RE + EXPONENT_PART_OPT_RE) + ur'|\.' + noncapturing(DECIMAL_DIGITS_RE + EXPONENT_PART_OPT_RE) + ur'|' + noncapturing(noncapturing(DECIMAL_INTEGER_LITERAL_RE) + EXPONENT_PART_OPT_RE)) ERROR_TYPE_ILLEGAL = 1 ERROR_TYPE_WARNING = 2 STD_IGNORES = ( {'regex': STRIP_LINE_COMMENT_RE, 'replace': ''}, {'function': string_replacer}, {'regex': COMMENT_RE, 'replace': ''}, {'regex': RE_RE, 'replace': '/ /'}, ) EXPONENTIAL_TO_SIMPLE = ( {'regex': DECIMAL_LITERAL_RE, 'replace': '42'}, ) LINE_CHECKLIST = ( { 'id': 'tabs', 'regex': re.compile(ur'[\t]'), 'error': 'line contains tabs', 'type': ERROR_TYPE_ILLEGAL }, { 'regex': re.compile(ur'([^\t -~])'), 'error': 'line contains non-ASCII characters', 'showPositionForGroup': 1, 'type': ERROR_TYPE_ILLEGAL, 'option': 'sublimelinter_objj_check_ascii', 'optionDefault': False }, { 'regex': re.compile(ur'^\s*(?:(?:else )?if|for|switch|while|with)(\()'), 'error': 'missing space between control statement and parentheses', 'showPositionForGroup': 1, 'type': ERROR_TYPE_WARNING }, { 'regex': re.compile(ur'^\s*(?:(?:else )?if|for|switch|while|with)\s*\(.+\)\s*(\{)\s*(?://.*|/\*.*\*/\s*)?$'), 'error': 'braces should be on their own line', 'showPositionForGroup': 1, 'type': ERROR_TYPE_ILLEGAL }, { 'regex': re.compile(ur'^\s*(?:(?:else )?if|for|switch|while|with)\s*\((\s+)?.+?(\s+)?\)\s*(?:(?:\{|//.*|/\*.*\*/)\s*)?$'), 'error': 'space inside parentheses', 'showPositionForGroup': [1, 2], 'type': ERROR_TYPE_ILLEGAL }, { 'regex': re.compile(ur'^\s*(?:(?:else )?if|for|switch|while|with)\s*\(.+\)\s*(?:[\w_]|\[).+(;)\s*(?://.*|/\*.*\*/\s*)?$'), 'error': 'dependent statements must be on their own line', 'showPositionForGroup': 1, 'type': ERROR_TYPE_ILLEGAL }, { 'regex': TRAILING_WHITESPACE_RE, 'error': 'trailing whitespace', 'showPositionForGroup': 1, 'type': ERROR_TYPE_ILLEGAL }, { 'filter': {'regex': re.compile(ur'(^@import\b|^\s*' + METHOD_RE + '|^\s*[a-zA-Z_$]\w*:\s*\([a-zA-Z_$][\w<>]*\)\s*\w+|[a-zA-Z_$]\w*(\+\+|--)|([ -+*/%^&|<>!]=?|&&|\|\||<<|>>>|={1,3}|!==?)\s*[-+][\w(\[])'), 'pass': False}, 'preprocess': STD_IGNORES + EXPONENTIAL_TO_SIMPLE, 'regex': re.compile(ur'(?<=[\w)\]"\']|([ ]))([-+*/%^]|&&?|\|\|?|<<|>>>?)(?=[\w({\["\']|(?(1)\b\b|[ ]))'), 'error': 'binary operator without surrounding spaces', 'showPositionForGroup': 2, 'type': ERROR_TYPE_WARNING }, { 'filter': {'regex': re.compile(ur'^\s*(?:@outlet\s+)?[a-zA-Z_$]\w*\s+[a-zA-Z_$]\w*\s+@accessors\b'), 'pass': False}, 'preprocess': STD_IGNORES, 'regex': re.compile(ur'(?<=[\w)\]"\']|([ ]))(=|[-+*/%^&|]=|<<=|>>>?=)(?=[\w({\["\']|(?(1)\b\b|[ ]))'), 'error': 'assignment operator without surrounding spaces', 'showPositionForGroup': 2, 'type': ERROR_TYPE_WARNING }, { 'filter': {'regex': re.compile(ur'^(@import\b|@implementation\b|\s*' + METHOD_RE + ')'), 'pass': False}, 'preprocess': STD_IGNORES, 'regex': re.compile(ur'(?<=[\w)\]"\']|([ ]))(===?|!==?|[<>]=?)(?=[\w({\["\']|(?(1)\b\b|[ ]))'), 'error': 'comparison operator without surrounding spaces', 'showPositionForGroup': 2, 'type': ERROR_TYPE_WARNING }, { 'regex': re.compile(ur'^(\s+)' + METHOD_RE + '|^\s*[-+](\()[a-zA-Z_$][\w]*\)\s*[a-zA-Z_$]\w*|^\s*[-+]\s*\([a-zA-Z_$][\w]*\)(\s+)[a-zA-Z_$]\w*'), 'error': 'extra or missing space in a method declaration', 'showPositionForGroup': 0, 'type': ERROR_TYPE_WARNING }, { 'regex': re.compile(ur'^(?:\s*[-+]\s*\([a-zA-Z_$]\w*\)|@implementation)\s*[a-zA-Z_$][\w]*.*?\s*(\{)\s*(?:$|//.*$)'), 'error': 'braces should be on their own line', 'showPositionForGroup': 0, 'type': ERROR_TYPE_ILLEGAL }, { 'regex': re.compile(ur'^\s*var\s+[a-zA-Z_$]\w*\s*=\s*function\s+([a-zA-Z_$]\w*)\s*\('), 'error': 'function name is ignored', 'showPositionForGroup': 1, 'skip': True, 'type': ERROR_TYPE_WARNING }, ) VAR_DECLARATIONS = ['none', 'single', 'strict'] VAR_DECLARATIONS_NONE = 0 VAR_DECLARATIONS_SINGLE = 1 VAR_DECLARATIONS_STRICT = 2 DIRS_TO_SKIP = ('.git', 'Frameworks', 'Build', 'Resources', 'CommonJS', 'Objective-J') ERROR_FORMATS = ('text', 'html') TEXT_ERROR_SINGLE_FILE_TEMPLATE = Template(u'$lineNum: $message.\n+$line\n') TEXT_ERROR_MULTI_FILE_TEMPLATE = Template(u'$filename:$lineNum: $message.\n+$line\n') def __init__(self, view=None, basedir='', var_declarations=VAR_DECLARATIONS_SINGLE, verbose=False): self.view = view self.basedir = unicode(basedir, 'utf-8') self.errors = [] self.errorFiles = [] self.filesToCheck = [] self.varDeclarations = var_declarations self.verbose = verbose self.sourcefile = None self.filename = u'' self.line = u'' self.lineNum = 0 self.varIndent = u'' self.identifierIndent = u'' self.fileChecklist = ( {'title': 'Check variable blocks', 'action': self.check_var_blocks}, ) def run_line_checks(self): for check in self.LINE_CHECKLIST: option = check.get('option') if option: default = check.get('optionDefault', False) if self.view and not self.view.settings().get(option, default): continue line = self.line originalLine = line lineFilter = check.get('filter') if lineFilter: match = lineFilter['regex'].search(line) if (match and not lineFilter['pass']) or (not match and lineFilter['pass']): continue preprocess = check.get('preprocess') if preprocess: if not isinstance(preprocess, (list, tuple)): preprocess = (preprocess,) for processor in preprocess: regex = processor.get('regex') if regex: line = regex.sub(processor.get('replace', ''), line) fnct = processor.get('function') if fnct: line = fnct(line) regex = check.get('regex') if not regex: continue match = regex.search(line) if not match: continue positions = [] groups = check.get('showPositionForGroup') if (check.get('id') == 'tabs'): line = tabs2spaces(line, positions=positions) elif groups is not None: line = tabs2spaces(line) if not isinstance(groups, (list, tuple)): groups = (groups,) for match in regex.finditer(line): for group in groups: if group > 0: start = match.start(group) if start >= 0: positions.append(start) else: for i in range(1, len(match.groups()) + 1): if match.start(i) >= 0: positions.append(match.start(i)) break if positions: self.error(check['error'], line=originalLine, positions=positions, type=check['type']) def next_statement(self, expect_line=False, check_line=True): try: while True: raw_line = self.sourcefile.next() if raw_line[-1] == '\n': raw_line = raw_line[:-1] try: self.line = unicode(raw_line, 'utf-8', 'strict') self.lineNum += 1 except UnicodeDecodeError: self.line = unicode(raw_line, 'utf-8', 'replace') self.lineNum += 1 self.error('line contains invalid unicode character(s)', type=self.ERROR_TYPE_ILLEGAL) if self.verbose: print u'%d: %s' % (self.lineNum, tabs2spaces(self.line)) if check_line: self.run_line_checks() if not self.is_statement(): continue return True except StopIteration: if expect_line: self.error('unexpected EOF', type=self.ERROR_TYPE_ILLEGAL) raise def is_statement(self): if len(self.line.strip()) == 0: return False match = self.LINE_COMMENT_RE.match(self.line) if match: return False match = self.BLOCK_COMMENT_START_RE.match(self.line) if match: self.block_comment() return False return True def is_expression(self): match = self.STATEMENT_RE.match(self.line) return match is None def strip_comment(self): match = self.STRIP_LINE_COMMENT_RE.match(self.expression) if match: self.expression = match.group(1) def get_expression(self, lineMatch): groupdict = lineMatch.groupdict() self.expression = groupdict.get('expression') if self.expression is None: self.expression = groupdict.get('bracket') if self.expression is None: self.expression = groupdict.get('indented_expression') if self.expression is None: self.expression = '' return self.expression = string_replacer(self.expression) self.strip_comment() self.expression = self.expression.strip()
MIT License
kytos/kytos
kytos/core/interface.py
Interface.get_next_available_tag
python
def get_next_available_tag(self): try: return self.available_tags.pop() except IndexError: return False
Get the next available tag from the interface. Return the next available tag if exists and remove from the available tags. If no tag is available return False.
https://github.com/kytos/kytos/blob/3b9731c08fe7550a27d159f4e2de71419c9445f1/kytos/core/interface.py#L167-L177
import json import logging from enum import IntEnum from pyof.v0x01.common.phy_port import Port as PortNo01 from pyof.v0x01.common.phy_port import PortFeatures as PortFeatures01 from pyof.v0x04.common.port import PortFeatures as PortFeatures04 from pyof.v0x04.common.port import PortNo as PortNo04 from kytos.core.common import GenericEntity from kytos.core.helpers import now __all__ = ('Interface',) LOG = logging.getLogger(__name__) class TAGType(IntEnum): VLAN = 1 VLAN_QINQ = 2 MPLS = 3 class TAG: def __init__(self, tag_type, value): self.tag_type = TAGType(tag_type) self.value = value def __eq__(self, other): if not other: return False return self.tag_type == other.tag_type and self.value == other.value def as_dict(self): return {'tag_type': self.tag_type, 'value': self.value} @classmethod def from_dict(cls, tag_dict): return cls(tag_dict.get('tag_type'), tag_dict.get('value')) @classmethod def from_json(cls, tag_json): return cls.from_dict(json.loads(tag_json)) def as_json(self): return json.dumps(self.as_dict()) def __repr__(self): return f"TAG({self.tag_type!r}, {self.value!r})" class Interface(GenericEntity): def __init__(self, name, port_number, switch, address=None, state=None, features=None, speed=None, config=None): self.name = name self.port_number = int(port_number) self.switch = switch self.address = address self.state = state self.features = features self.config = config self.nni = False self.endpoints = [] self.stats = None self.link = None self.lldp = True self._custom_speed = speed self.set_available_tags(range(1, 4096)) super().__init__() def __repr__(self): return f"Interface('{self.name}', {self.port_number}, {self.switch!r})" def __eq__(self, other): if isinstance(other, str): return self.address == other if isinstance(other, Interface): return self.port_number == other.port_number and self.switch.dpid == other.switch.dpid return False @property def id(self): return "{}:{}".format(self.switch.dpid, self.port_number) @property def uni(self): return not self.nni def set_available_tags(self, iterable): self.available_tags = [] for i in iterable: vlan = TAGType.VLAN tag = TAG(vlan, i) self.available_tags.append(tag) def enable(self): self.switch.enable() self._enabled = True def use_tag(self, tag): try: self.available_tags.remove(tag) except ValueError: return False return True def is_tag_available(self, tag): return tag in self.available_tags
MIT License
scrapinghub/exporters
exporters/writers/filebase_base_writer.py
FilebaseBaseWriter.create_filebase_name
python
def create_filebase_name(self, group_info, extension='gz', file_name=None): dirname = self.filebase.formatted_dirname(groups=group_info) if not file_name: file_name = self.filebase.prefix_template + '.' + extension return dirname, file_name
Return tuple of resolved destination folder name and file name
https://github.com/scrapinghub/exporters/blob/b14f70530826bbbd6163d9e56e74345e762a9189/exporters/writers/filebase_base_writer.py#L145-L152
import datetime import hashlib import os import re import uuid import six from exporters.write_buffers.grouping import GroupingBufferFilesTracker from exporters.write_buffers.utils import get_filename from exporters.writers.base_writer import BaseWriter MD5_FILE_NAME = 'md5checksum.md5' def md5_for_file(f, block_size=2**20): md5 = hashlib.md5() while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest() class Filebase(object): def __init__(self, filebase): self.input_filebase = filebase self.template = self._get_template() self.dirname_template, self.prefix_template = os.path.split(self.template) def _get_template(self): return datetime.datetime.now().strftime(self.input_filebase) def formatted_dirname(self, **format_info): try: dirname = self.dirname_template.format(**format_info) return dirname except KeyError as e: raise KeyError('filebase option should not contain {} key'.format(str(e))) def _has_key_info(self, key): return bool(re.findall('\{'+key+'\[\d\]\}', self.template)) def formatted_prefix(self, **format_info): prefix_name = self.prefix_template.format(**format_info) file_number = format_info.pop('file_number', 0) if prefix_name == self.prefix_template: prefix_name += '{:04d}'.format(file_number) for key, value in format_info.iteritems(): if value and not self._has_key_info(key): prefix_name = '{}-{}'.format(prefix_name, ''.join(value)) return prefix_name class FilebasedGroupingBufferFilesTracker(GroupingBufferFilesTracker): def __init__(self, formatter, filebase, compression_format, start_file_count=0, **kwargs): super(FilebasedGroupingBufferFilesTracker, self).__init__(formatter, compression_format, **kwargs) self.filebase = filebase self.start_file_count = start_file_count def create_new_group_file(self, key): group_files = self.grouping_info[key]['group_file'] group_folder = self._get_group_folder(group_files) current_file_count = len(group_files) + self.start_file_count group_info = self.grouping_info[key]['path_safe_keys'] name_without_ext = self.filebase.formatted_prefix( groups=group_info, file_number=current_file_count) file_name = get_filename(name_without_ext, self.file_extension, self.compression_format) file_name = os.path.join(group_folder, file_name) new_buffer_file = self._create_buffer_file(file_name=file_name) self.grouping_info.add_buffer_file_to_group(key, new_buffer_file) self.grouping_info.reset_key(key) return new_buffer_file def _get_group_folder(self, group_files): if group_files: return os.path.dirname(group_files[0].path) group_folder = os.path.join(self.tmp_folder, str(uuid.uuid4())) os.mkdir(group_folder) return group_folder class FilebaseBaseWriter(BaseWriter): supported_options = { 'filebase': {'type': six.string_types}, 'start_file_count': {'type': int, 'default': 0}, 'generate_md5': {'type': bool, 'default': False} } hash_algorithm = 'md5' def __init__(self, *args, **kwargs): super(FilebaseBaseWriter, self).__init__(*args, **kwargs) self.filebase = Filebase(self.read_option('filebase')) self.set_metadata('effective_filebase', self.filebase.template) self.generate_md5 = self.read_option('generate_md5') self.written_files = {} self.last_written_file = None self.generate_md5 = self.read_option('generate_md5') self.logger.info( '{} has been initiated. Writing to: {}'.format( self.__class__.__name__, self.filebase.template)) def _items_group_files_handler(self, write_buffer_class, **kwargs): group_files_class = write_buffer_class.filebased_group_files_tracker_class return group_files_class(self.export_formatter, filebase=Filebase(self.read_option('filebase')), start_file_count=self.read_option('start_file_count'), compression_format=self.read_option('compression'), **kwargs) def write(self, path, key, file_name=False): raise NotImplementedError def get_file_suffix(self, path, prefix): return str(uuid.uuid4())
BSD 3-Clause New or Revised License
sorsnce/red-team
1. Information Gathering/recon-ng/recon/core/framework.py
Framework.insert_locations
python
def insert_locations(self, latitude=None, longitude=None, street_address=None, mute=False): data = dict( latitude = latitude, longitude = longitude, street_address = street_address ) rowcount = self.insert('locations', data.copy(), data.keys()) if not mute: self._display(data, rowcount, '[location] %s, %s - %s', data.keys()) return rowcount
Adds a location to the database and returns the affected row count.
https://github.com/sorsnce/red-team/blob/5cd1932ccafcd2c1b92b8642e9a64fa0d2e99324/1. Information Gathering/recon-ng/recon/core/framework.py#L430-L439
from contextlib import closing import cmd import codecs import inspect import json import os import random import re import requests import socket import sqlite3 import string import subprocess import sys import traceback class FrameworkException(Exception): pass class Colors(object): N = '\033[m' R = '\033[31m' G = '\033[32m' O = '\033[33m' B = '\033[34m' class Options(dict): def __init__(self, *args, **kwargs): self.required = {} self.description = {} super(Options, self).__init__(*args, **kwargs) def __getitem__(self, name): name = self.__keytransform__(name) return super(Options, self).__getitem__(name) def __setitem__(self, name, value): name = self.__keytransform__(name) value = self._autoconvert(value) super(Options, self).__setitem__(name, value) def __delitem__(self, name): name = self.__keytransform__(name) super(Options, self).__delitem__(name) if name in self.required: del self.required[name] if name in self.description: del self.description[name] def __keytransform__(self, key): return key.upper() def _boolify(self, value): return {'true':True, 'false':False}[value.lower()] def _autoconvert(self, value): if value in (None, True, False): return value elif (isinstance(value, str)) and value.lower() in ('none', "''", '""'): return None orig = value for fn in (self._boolify, int, float): try: value = fn(value) break except ValueError: pass except KeyError: pass except AttributeError: pass if type(value) is int and '.' in str(orig): return float(orig) return value def init_option(self, name, value=None, required=False, description=''): name = self.__keytransform__(name) self[name] = value self.required[name] = required self.description[name] = description def serialize(self): data = {} for key in self: data[key] = self[key] return data class Framework(cmd.Cmd): prompt = '>>>' _script = 0 _load = 0 _global_options = Options() _loaded_modules = {} app_path = '' data_path = '' core_path = '' home_path = '' mod_path = '' spaces_path = '' workspace = '' _record = None _spool = None _summary_counts = {} def __init__(self, params): cmd.Cmd.__init__(self) self._modulename = params self.ruler = '-' self.spacer = ' ' self.time_format = '%Y-%m-%d %H:%M:%S' self.nohelp = f"{Colors.R}[!] No help on %s{Colors.N}" self.do_help.__func__.__doc__ = '''Displays this menu''' self.doc_header = 'Commands (type [help|?] <topic>):' self._exit = 0 def default(self, line): self.error(f"Invalid command: {line}") def emptyline(self): return 0 def precmd(self, line): if Framework._load: print('\r', end='') if Framework._script: print(f"{line}") if Framework._record: recorder = codecs.open(Framework._record, 'ab', encoding='utf-8') recorder.write(f"{line}{os.linesep}") recorder.flush() recorder.close() if Framework._spool: Framework._spool.write(f"{self.prompt}{line}{os.linesep}") Framework._spool.flush() return line def onecmd(self, line): cmd, arg, line = self.parseline(line) if not line: return self.emptyline() if line == 'EOF': sys.stdin = sys.__stdin__ Framework._script = 0 Framework._load = 0 return if cmd is None: return self.default(line) self.lastcmd = line if cmd == '': return self.default(line) else: try: func = getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) try: return func(arg) except Exception: self.print_exception() def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write(f"{header}{os.linesep}") if self.ruler: self.stdout.write(f"{self.ruler * len(header)}{os.linesep}") for cmd in cmds: self.stdout.write(f"{cmd.ljust(15)} {getattr(self, 'do_' + cmd).__doc__}{os.linesep}") self.stdout.write(os.linesep) def to_unicode_str(self, obj, encoding='utf-8'): if type(obj) not in (str, bytes): obj = str(obj) obj = self.to_unicode(obj, encoding) return obj def to_unicode(self, obj, encoding='utf-8'): if isinstance(obj, bytes): obj = obj.decode(encoding) return obj def is_hash(self, hashstr): hashdict = [ {'pattern': r'^[a-fA-F0-9]{32}$', 'type': 'MD5'}, {'pattern': r'^[a-fA-F0-9]{16}$', 'type': 'MySQL'}, {'pattern': r'^\*[a-fA-F0-9]{40}$', 'type': 'MySQL5'}, {'pattern': r'^[a-fA-F0-9]{40}$', 'type': 'SHA1'}, {'pattern': r'^[a-fA-F0-9]{56}$', 'type': 'SHA224'}, {'pattern': r'^[a-fA-F0-9]{64}$', 'type': 'SHA256'}, {'pattern': r'^[a-fA-F0-9]{96}$', 'type': 'SHA384'}, {'pattern': r'^[a-fA-F0-9]{128}$', 'type': 'SHA512'}, {'pattern': r'^\$[PH]{1}\$.{31}$', 'type': 'phpass'}, {'pattern': r'^\$2[ya]?\$.{56}$', 'type': 'bcrypt'}, ] for hashitem in hashdict: if re.match(hashitem['pattern'], hashstr): return hashitem['type'] return False def get_random_str(self, length): return ''.join(random.choice(string.lowercase) for i in range(length)) def _is_writeable(self, filename): try: fp = open(filename, 'a') fp.close() return True except IOError: return False def _parse_rowids(self, rowids): xploded = [] rowids = [x.strip() for x in rowids.split(',')] for rowid in rowids: try: if '-' in rowid: start = int(rowid.split('-')[0].strip()) end = int(rowid.split('-')[-1].strip()) xploded += range(start, end+1) else: xploded.append(int(rowid)) except ValueError: continue return sorted(list(set(xploded))) def print_exception(self, line=''): stack_list = [x.strip() for x in traceback.format_exc().strip().splitlines()] exctype = stack_list[-1].split(':')[0].strip() message = ' '.join(stack_list[-1].split(':')[1:]).strip() if self._global_options['verbosity'] == 0: return elif self._global_options['verbosity'] == 1: line = ' '.join([x for x in [message, line] if x]) self.error(line) elif self._global_options['verbosity'] == 2: print(f"{Colors.R}{'-'*60}") traceback.print_exc() print(f"{'-'*60}{Colors.N}") def error(self, line): if not re.search('[.,;!?]$', line): line += '.' line = line[:1].upper() + line[1:] print(f"{Colors.R}[!] {line}{Colors.N}") def output(self, line): print(f"{Colors.B}[*]{Colors.N} {line}") def alert(self, line): print(f"{Colors.G}[*]{Colors.N} {line}") def verbose(self, line): if self._global_options['verbosity'] >= 1: self.output(line) def debug(self, line): if self._global_options['verbosity'] >= 2: self.output(line) def heading(self, line, level=1): line = line print('') if level == 0: print(self.ruler*len(line)) print(line.upper()) print(self.ruler*len(line)) if level == 1: print(f"{self.spacer}{line.title()}") print(f"{self.spacer}{self.ruler*len(line)}") def table(self, data, header=[], title=''): tdata = list(data) if header: tdata.insert(0, header) if len(set([len(x) for x in tdata])) > 1: raise FrameworkException('Row lengths not consistent.') lens = [] cols = len(tdata[0]) for i in range(0,cols): lens.append(len(max([self.to_unicode_str(x[i]) if x[i] != None else '' for x in tdata], key=len))) title_len = len(title) tdata_len = sum(lens) + (3*(cols-1)) diff = title_len - tdata_len if diff > 0: diff_per = diff / cols lens = [x+diff_per for x in lens] diff_mod = diff % cols for x in range(0, diff_mod): lens[x] += 1 if len(tdata) > 0: separator_str = f"{self.spacer}+-{'%s---'*(cols-1)}%s-+" separator_sub = tuple(['-'*x for x in lens]) separator = separator_str % separator_sub data_str = f"{self.spacer}| {'%s | '*(cols-1)}%s |" print('') print(separator) if title: print(f"{self.spacer}| {title.center(tdata_len)} |") print(separator) if header: rdata = tdata.pop(0) data_sub = tuple([rdata[i].center(lens[i]) for i in range(0,cols)]) print(data_str % data_sub) print(separator) for rdata in tdata: data_sub = tuple([self.to_unicode_str(rdata[i]).ljust(lens[i]) if rdata[i] != None else ''.ljust(lens[i]) for i in range(0,cols)]) print(data_str % data_sub) print(separator) print('') def query(self, *args, **kwargs): path = os.path.join(self.workspace, 'data.db') return self._query(path, *args, **kwargs) def _query(self, path, query, values=(), include_header=False): self.debug(f"DATABASE => {path}") self.debug(f"QUERY => {query}") with sqlite3.connect(path) as conn: with closing(conn.cursor()) as cur: if values: self.debug(f"VALUES => {repr(values)}") cur.execute(query, values) else: cur.execute(query) if cur.rowcount == -1: rows = [] if include_header: rows.append(tuple([x[0] for x in cur.description])) rows.extend(cur.fetchall()) results = rows else: conn.commit() results = cur.rowcount return results def get_columns(self, table): return [(x[1], x[2]) for x in self.query(f"PRAGMA table_info('{table}')")] def get_tables(self): return [x[0] for x in self.query('SELECT name FROM sqlite_master WHERE type=\'table\'') if x[0] not in ['dashboard']] def _display(self, data, rowcount, pattern=None, keys=None): display = self.alert if rowcount else self.verbose if pattern and keys: values = tuple([data[key] or '<blank>' for key in keys]) display(pattern % values) else: for key in sorted(data.keys()): display(f"{key.title()}: {data[key]}") display(self.ruler*50) def insert_domains(self, domain=None, mute=False): data = dict( domain = domain ) rowcount = self.insert('domains', data.copy(), data.keys()) if not mute: self._display(data, rowcount, '[domain] %s', data.keys()) return rowcount def insert_companies(self, company=None, description=None, mute=False): data = dict( company = company, description = description ) rowcount = self.insert('companies', data.copy(), ('company',)) if not mute: self._display(data, rowcount, '[company] %s - %s', data.keys()) return rowcount def insert_netblocks(self, netblock=None, mute=False): data = dict( netblock = netblock ) rowcount = self.insert('netblocks', data.copy(), data.keys()) if not mute: self._display(data, rowcount, '[netblock] %s', data.keys()) return rowcount
MIT License
checkpointsw/karta
src/thumbs_up/utils/function.py
FeatureClassifier.train
python
def train(self, scoped_functions): clf = RandomForestClassifier(n_estimators=100) eas = [self._interest(x) for x in scoped_functions] + [self._interest(x) + self._inner_offset for x in scoped_functions] data_set = [self.extractSample(x) for x in eas] data_results = [self._tag(x) for x in eas] clf.fit(data_set, data_results) self._classifier = clf
Train the classifier on the scoped functions. Args: scoped_functions (list): list of all relevant (scoped) functions Note: Training must happen *after* the calibration phase
https://github.com/checkpointsw/karta/blob/b845928487b50a5b41acd532ae0399177a4356aa/src/thumbs_up/utils/function.py#L80-L97
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import idc import ida_nalt import sark import numpy import struct import time CALIBRATION_LOWER_BOUND = 0.75 CALIBRATION_UPPER_BOUND = 0.96 CALIBRATION_TIME_ESTIMATE = 10 class FeatureClassifier(): def __init__(self, analyzer, name, feature_size, inner_offset, classifier_offsets, interest, tag): self._analyzer = analyzer self._name = name self._feature_size = feature_size self._inner_offset = inner_offset self._classifier_offsets = classifier_offsets self._interest = interest self._tag = tag self._classifier = None self._needs_calibration = True self._needs_training = True def extractSample(self, ea): return [idc.get_wide_byte(ea + o) for o in self._classifier_offsets]
MIT License
stypr/clubhouse-py
clubhouse/clubhouse.py
Clubhouse.get_club
python
def get_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id, "query_id": None, "query_result_position": None, "slug": None, } req = requests.post(f"{self.API_URL}/get_club", headers=self.HEADERS, json=data) return req.json()
(Clubhouse, int, int) -> dict Get the information about the given club_id.
https://github.com/stypr/clubhouse-py/blob/a0aad17a42a4f391fc40eebc36e5535e629bdd9a/clubhouse/clubhouse.py#L532-L545
import uuid import random import secrets import functools import requests class Clubhouse: API_URL = "https://www.clubhouseapi.com/api" API_BUILD_ID_IOS = "434" API_BUILD_VERSION = "0.1.40" API_BUILD_ID_ANDROID = "3389" API_BUILD_VERSION_ANDROID= "1.0.1" API_UA_IOS = f"clubhouse/{API_BUILD_ID_IOS} (iPhone; iOS 14.4; Scale/2.00)" API_UA_ANDROID = f"clubhouse/android/{API_BUILD_ID_ANDROID}" API_UA_STATIC = f"Clubhouse/{API_BUILD_ID_IOS} CFNetwork/1220.1 Darwin/20.3.0" PUBNUB_PUB_KEY = "pub-c-6878d382-5ae6-4494-9099-f930f938868b" PUBNUB_SUB_KEY = "sub-c-a4abea84-9ca3-11ea-8e71-f2b83ac9263d" PUBNUB_API_URL = "https://clubhouse.pubnubapi.com/v2" SENTRY_URL = "[email protected]/5245095" TWITTER_ID = "NyJhARWVYU1X3qJZtC2154xSI" TWITTER_SECRET = "ylFImLBFaOE362uwr4jut8S8gXGWh93S1TUKbkfh7jDIPse02o" INSTAGRAM_ID = "1352866981588597" INSTAGRAM_CALLBACK = "https://www.joinclubhouse.com/callback/instagram" AGORA_KEY = "938de3e8055e42b281bb8c6f69c21f78" INSTABUG_KEY = "4e53155da9b00728caa5249f2e35d6b3" AMPLITUDE_KEY = "9098a21a950e7cb0933fb5b30affe5be" STRIPE_PUBLISH_KEY = "[email protected]/5245095" ANDROID_API_KEY = "AIzaSyDGJ877BvgHAg2Bed1sgFjZ4wJmh2RfEfU" ANDROID_API_ID = "1:1096237342636:android:c800b1b9e5ee70d1f8a409" ANDROID_RECAPTCHA_KEY = "LcNAMYaAAAAAKDxm-jPPMrJvh_VTiWyWy4D9jp3" IOS_API_ID = "co.alphaexploration.clubhouse:16.0.3" IOS_RECAPTCHA_KEY = "6LeWyKUaAAAAAA7XsHRe-JWuI1qLwoZn5p3seyoW" HEADERS = { "CH-Languages": "en-JP,ja-JP", "CH-Locale": "en_JP", "Accept": "application/json", "Accept-Language": "en-JP;q=1, ja-JP;q=0.9", "Accept-Encoding": "gzip, deflate", "ch-keyboards": "en_US", "CH-AppBuild": f"{API_BUILD_ID_ANDROID}", "CH-AppVersion": f"{API_BUILD_VERSION_ANDROID}", "User-Agent": f"{API_UA_ANDROID}", "Connection": "close", "Content-Type": "application/json; charset=utf-8", "Cookie": f"__cfduid={secrets.token_hex(21)}{random.randint(1, 9)}" } def require_authentication(func): @functools.wraps(func) def wrap(self, *args, **kwargs): if not (self.HEADERS.get("CH-UserID") and self.HEADERS.get("CH-DeviceId") and self.HEADERS.get("Authorization")): raise Exception('Not Authenticated') return func(self, *args, **kwargs) return wrap def unstable_endpoint(func): @functools.wraps(func) def wrap(self, *args, **kwargs): print("[!] This endpoint is NEVER TESTED and MAY BE UNSTABLE. BE CAREFUL!") return func(self, *args, **kwargs) return wrap def __init__(self, user_id='', user_token='', user_device='', headers=None): self.HEADERS = dict(self.HEADERS) if isinstance(headers, dict): self.HEADERS.update(headers) self.HEADERS['CH-UserID'] = user_id if user_id else "(null)" if user_token: self.HEADERS['Authorization'] = f"Token {user_token}" self.HEADERS['CH-DeviceId'] = user_device.upper() if user_device else str(uuid.uuid4()).upper() def __str__(self): return "Clubhouse(user_Id={}, user_token={}, user_device={})".format( self.HEADERS.get('CH-UserID'), self.HEADERS.get('Authorization'), self.HEADERS.get('CH-DeviceId') ) def start_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/start_phone_number_auth", headers=self.HEADERS, json=data) return req.json() @unstable_endpoint def call_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/call_phone_number_auth", headers=self.HEADERS, json=data) return req.json() @unstable_endpoint def resend_phone_number_auth(self, phone_number): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "phone_number": phone_number } req = requests.post(f"{self.API_URL}/resend_phone_number_auth", headers=self.HEADERS, json=data) return req.json() def complete_phone_number_auth(self, phone_number, verification_code, rc_token=None, safety_net_nonce=None, safety_net_response=None): if self.HEADERS.get("Authorization"): raise Exception('Already Authenticatied') data = { "device_token": None, "rc_token": rc_token, "safety_net_nonce": safety_net_nonce, "safety_net_response": safety_net_response, "phone_number": phone_number, "verification_code": verification_code } req = requests.post(f"{self.API_URL}/complete_phone_number_auth", headers=self.HEADERS, json=data) return req.json() def check_for_update(self, is_testflight=False): query = f"is_testflight={int(is_testflight)}" req = requests.get(f"{self.API_URL}/check_for_update?{query}", headers=self.HEADERS) return req.json() @require_authentication def logout(self): data = {} req = requests.post(f"{self.API_URL}/logout", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_release_notes(self): req = requests.post(f"{self.API_URL}/get_release_notes", headers=self.HEADERS) return req.json() @require_authentication def check_waitlist_status(self): req = requests.post(f"{self.API_URL}/check_waitlist_status", headers=self.HEADERS) return req.json() @require_authentication def add_email(self, email): data = { "email": email } req = requests.post(f"{self.API_URL}/add_email", headers=self.HEADERS, json=data) return req.json() @require_authentication def update_photo(self, photo_filename): files = { "file": ("image.jpg", open(photo_filename, "rb"), "image/jpeg"), } tmp = self.HEADERS['Content-Type'] self.HEADERS.pop("Content-Type") req = requests.post(f"{self.API_URL}/update_photo", headers=self.HEADERS, files=files) self.HEADERS['Content-Type'] = tmp return req.json() @require_authentication def follow(self, user_id, user_ids=None, source=4, source_topic_id=None): data = { "source_topic_id": source_topic_id, "user_ids": user_ids, "user_id": int(user_id), "source": source } req = requests.post(f"{self.API_URL}/follow", headers=self.HEADERS, json=data) return req.json() @require_authentication def unfollow(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/unfollow", headers=self.HEADERS, json=data) return req.json() @require_authentication def block(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/block", headers=self.HEADERS, json=data) return req.json() @require_authentication def unblock(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/unblock", headers=self.HEADERS, json=data) return req.json() @require_authentication def follow_multiple(self, user_ids, user_id=None, source=7, source_topic_id=None): data = { "source_topic_id": source_topic_id, "user_ids": user_ids, "user_id": user_id, "source": source } req = requests.post(f"{self.API_URL}/follow_multiple", headers=self.HEADERS, json=data) return req.json() @require_authentication def follow_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id } req = requests.post(f"{self.API_URL}/follow_club", headers=self.HEADERS, json=data) return req.json() @require_authentication def unfollow_club(self, club_id, source_topic_id=None): data = { "club_id": int(club_id), "source_topic_id": source_topic_id } req = requests.post(f"{self.API_URL}/unfollow_club", headers=self.HEADERS, json=data) return req.json() @require_authentication def update_follow_notifications(self, user_id, notification_type=2): data = { "user_id": int(user_id), "notification_type": int(notification_type) } req = requests.post(f"{self.API_URL}/update_follow_notifications", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_similar(self, user_id='', username=''): data = { "user_id": int(user_id) if user_id else None, "username": username if username else None, "query_id": None, "query_result_position": None, } req = requests.post(f"{self.API_URL}/get_suggested_follows_similar", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_friends_only(self, club_id=None, upload_contacts=True, contacts=()): data = { "club_id": club_id, "upload_contacts": upload_contacts, "contacts": contacts } req = requests.post(f"{self.API_URL}/get_suggested_follows_friends_only", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_suggested_follows_all(self, in_onboarding=True, page_size=50, page=1): query = "in_onboarding={}&page_size={}&page={}".format( "true" if in_onboarding else "false", page_size, page ) req = requests.get(f"{self.API_URL}/get_suggested_follows_all?{query}", headers=self.HEADERS) return req.json() @require_authentication def ignore_suggested_follow(self, user_id): data = { "user_id": int(user_id) } req = requests.post(f"{self.API_URL}/user_id", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_event(self, event_id=None, user_ids=None, club_id=None, is_member_only=False, event_hashid=None, description=None, time_start_epoch=None, name=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/get_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def create_event(self, name, time_start_epoch, description, event_id=None, user_ids=(), club_id=None, is_member_only=False, event_hashid=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/edit_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def edit_event(self, name, time_start_epoch, description, event_id=None, user_ids=(), club_id=None, is_member_only=False, event_hashid=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/edit_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def delete_event(self, event_id, user_ids=None, club_id=None, is_member_only=False, event_hashid=None, description=None, time_start_epoch=None, name=None): data = { "user_ids": user_ids, "club_id": club_id, "is_member_only": is_member_only, "event_id": int(event_id) if event_id else None, "event_hashid": event_hashid, "description": description, "time_start_epoch": time_start_epoch, "name": name } req = requests.post(f"{self.API_URL}/delete_event", headers=self.HEADERS, json=data) return req.json() @require_authentication def get_events(self, is_filtered=True, page_size=25, page=1): _is_filtered = "true" if is_filtered else "false" query = "is_filtered={}&page_size={}&page={}".format( "true" if is_filtered else "false", page_size, page ) req = requests.get(f"{self.API_URL}/get_events?{query}", headers=self.HEADERS) return req.json() @require_authentication
MIT License
ngageoint/sarpy
sarpy/io/product/sidd2_elements/Measurement.py
PlaneProjectionType.__init__
python
def __init__(self, ReferencePoint=None, SampleSpacing=None, TimeCOAPoly=None, ProductPlane=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] super(PlaneProjectionType, self).__init__( ReferencePoint=ReferencePoint, SampleSpacing=SampleSpacing, TimeCOAPoly=TimeCOAPoly, **kwargs) self.ProductPlane = ProductPlane
Parameters ---------- ReferencePoint : ReferencePointType SampleSpacing : RowColDoubleType|numpy.ndarray|list|tuple TimeCOAPoly : Poly2DType|numpy.ndarray|list|tuple ProductPlane : ProductPlaneType kwargs
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/io/product/sidd2_elements/Measurement.py#L132-L149
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" from typing import Union, List from sarpy.io.xml.base import Serializable, SerializableArray from sarpy.io.xml.descriptors import SerializableDescriptor, UnitVectorDescriptor, FloatDescriptor, StringEnumDescriptor, SerializableArrayDescriptor from .base import DEFAULT_STRICT from .blocks import ReferencePointType, RowColDoubleType, Poly2DType, XYZType, RowColIntType, XYZPolyType, RowColArrayElement class BaseProjectionType(Serializable): _fields = ('ReferencePoint', ) _required = ('ReferencePoint', ) ReferencePoint = SerializableDescriptor( 'ReferencePoint', ReferencePointType, _required, strict=DEFAULT_STRICT, docstring='Reference point for the geometrical system.') def __init__(self, ReferencePoint=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.ReferencePoint = ReferencePoint super(BaseProjectionType, self).__init__(**kwargs) class MeasurableProjectionType(BaseProjectionType): _fields = ('ReferencePoint', 'SampleSpacing', 'TimeCOAPoly') _required = ('ReferencePoint', 'SampleSpacing', 'TimeCOAPoly') SampleSpacing = SerializableDescriptor( 'SampleSpacing', RowColDoubleType, _required, strict=DEFAULT_STRICT, docstring='Sample spacing in row and column.') TimeCOAPoly = SerializableDescriptor( 'TimeCOAPoly', Poly2DType, _required, strict=DEFAULT_STRICT, docstring='Time (units = seconds) at which center of aperture for a given pixel ' 'coordinate in the product occurs.') def __init__(self, ReferencePoint=None, SampleSpacing=None, TimeCOAPoly=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] super(MeasurableProjectionType, self).__init__(ReferencePoint=ReferencePoint, **kwargs) self.SampleSpacing = SampleSpacing self.TimeCOAPoly = TimeCOAPoly class ProductPlaneType(Serializable): _fields = ('RowUnitVector', 'ColUnitVector') _required = _fields RowUnitVector = UnitVectorDescriptor( 'RowUnitVector', XYZType, _required, strict=DEFAULT_STRICT, docstring='Unit vector of the plane defined to be aligned in the increasing row direction ' 'of the product. (Defined as Rpgd in Design and Exploitation document)') ColUnitVector = UnitVectorDescriptor( 'ColUnitVector', XYZType, _required, strict=DEFAULT_STRICT, docstring='Unit vector of the plane defined to be aligned in the increasing column direction ' 'of the product. (Defined as Cpgd in Design and Exploitation document)') def __init__(self, RowUnitVector=None, ColUnitVector=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.RowUnitVector = RowUnitVector self.ColUnitVector = ColUnitVector super(ProductPlaneType, self).__init__(**kwargs) class PlaneProjectionType(MeasurableProjectionType): _fields = ('ReferencePoint', 'SampleSpacing', 'TimeCOAPoly', 'ProductPlane') _required = ('ReferencePoint', 'SampleSpacing', 'TimeCOAPoly', 'ProductPlane') ProductPlane = SerializableDescriptor( 'ProductPlane', ProductPlaneType, _required, strict=DEFAULT_STRICT, docstring='')
MIT License
elliot79313/tra-tracking-on-gae
gaesessions/__init__.py
Session.make_cookie_headers
python
def make_cookie_headers(self): if not self.sid: return [EXPIRE_COOKIE_FMT % k for k in self.cookie_keys] if self.cookie_data is None: return [] if self.is_ssl_only(): m = MAX_DATA_PER_COOKIE - 8 fmt = COOKIE_FMT_SECURE else: m = MAX_DATA_PER_COOKIE fmt = COOKIE_FMT sig = Session.__compute_hmac(self.base_key, self.sid, self.cookie_data) cv = sig + self.sid + b64encode(self.cookie_data) num_cookies = 1 + (len(cv) - 1) / m if self.get_expiration() > 0: ed = "expires=%s; " % datetime.datetime.fromtimestamp(self.get_expiration()).strftime(COOKIE_DATE_FMT) else: ed = '' cookies = [fmt % (i, cv[i * m:i * m + m], ed) for i in xrange(num_cookies)] old_cookies = xrange(num_cookies, len(self.cookie_keys)) key = COOKIE_NAME_PREFIX + '%02d' cookies_to_ax = [EXPIRE_COOKIE_FMT % (key % i) for i in old_cookies] return cookies + cookies_to_ax
Returns a list of cookie headers to send (if any).
https://github.com/elliot79313/tra-tracking-on-gae/blob/9f920a6e96b357bccba2d4328a3a7e2dcdebfc0a/gaesessions/__init__.py#L120-L149
from Cookie import CookieError, SimpleCookie from base64 import b64decode, b64encode import datetime import hashlib import hmac import logging import pickle import os import threading import time from google.appengine.api import memcache from google.appengine.ext import db COOKIE_NAME_PREFIX = "DgU" COOKIE_PATH = "/" DEFAULT_COOKIE_ONLY_THRESH = 10240 DEFAULT_LIFETIME = datetime.timedelta(days=7) SID_LEN = 43 SIG_LEN = 44 MAX_COOKIE_LEN = 4096 EXPIRE_COOKIE_FMT = ' %s=; expires=Wed, 01-Jan-1970 00:00:00 GMT; Path=' + COOKIE_PATH COOKIE_FMT = ' ' + COOKIE_NAME_PREFIX + '%02d="%s"; %sPath=' + COOKIE_PATH + '; HttpOnly' COOKIE_FMT_SECURE = COOKIE_FMT + '; Secure' COOKIE_DATE_FMT = '%a, %d-%b-%Y %H:%M:%S GMT' COOKIE_OVERHEAD = len(COOKIE_FMT % (0, '', '')) + len('expires=Xxx, xx XXX XXXX XX:XX:XX GMT; ') + 150 MAX_DATA_PER_COOKIE = MAX_COOKIE_LEN - COOKIE_OVERHEAD _tls = threading.local() def get_current_session(): return _tls.current_session def set_current_session(session): _tls.current_session = session def is_gaesessions_key(k): return k.startswith(COOKIE_NAME_PREFIX) class SessionModel(db.Model): pdump = db.BlobProperty() class Session(object): DIRTY_BUT_DONT_PERSIST_TO_DB = 1 def __init__(self, sid=None, lifetime=DEFAULT_LIFETIME, no_datastore=False, cookie_only_threshold=DEFAULT_COOKIE_ONLY_THRESH, cookie_key=None): self._accessed = False self.sid = None self.cookie_keys = [] self.cookie_data = None self.data = {} self.dirty = False self.lifetime = lifetime self.no_datastore = no_datastore self.cookie_only_thresh = cookie_only_threshold self.base_key = cookie_key if sid: self.__set_sid(sid, False) self.data = None else: self.__read_cookie() @staticmethod def __compute_hmac(base_key, sid, text): key = base_key + sid return b64encode(hmac.new(key, text, hashlib.sha256).digest()) def __read_cookie(self): try: cookie = SimpleCookie(os.environ['HTTP_COOKIE']) self.cookie_keys = filter(is_gaesessions_key, cookie.keys()) if not self.cookie_keys: return self.cookie_keys.sort() data = ''.join(cookie[k].value for k in self.cookie_keys) i = SIG_LEN + SID_LEN sig, sid, b64pdump = data[:SIG_LEN], data[SIG_LEN:i], data[i:] pdump = b64decode(b64pdump) actual_sig = Session.__compute_hmac(self.base_key, sid, pdump) if sig == actual_sig: self.__set_sid(sid, False) if self.get_expiration() != 0 and time.time() > self.get_expiration(): return self.terminate() if pdump: self.data = self.__decode_data(pdump) else: self.data = None else: logging.warn('cookie with invalid sig received from %s: %s' % (os.environ.get('REMOTE_ADDR'), b64pdump)) except (CookieError, KeyError, IndexError, TypeError): self.terminate(False)
MIT License
ziirish/burp-ui
burpui/tools/logging.py
Logger.init_logger
python
def init_logger(self, config): level = config.get("level", None) level = self.level if level is None else level level = convert_level(level) logfile = config.get("logfile") if self._handler is not None: self.removeHandler(self._handler) if logfile: handler = logging.FileHandler(logfile) else: handler = logging.StreamHandler() if level > logging.DEBUG: LOG_FORMAT = ( "[%(asctime)s] %(levelname)s in " "%(module)s.%(funcName)s: %(message)s" ) else: LOG_FORMAT = ( "-" * 27 + "[%(asctime)s]" + "-" * 28 + "\n" + "%(levelname)s in %(module)s.%(funcName)s " + "[%(pathname)s:%(lineno)d]:\n" + "%(message)s\n" + "-" * 80 ) handler.setLevel(level) handler.setFormatter(logging.Formatter(LOG_FORMAT)) self.setLevel(level) self.addHandler(handler) self._handler = handler
:param config: Logger configuration :type config: dict
https://github.com/ziirish/burp-ui/blob/668922753d97f0a71844d6985d9b8b2695fb2421/burpui/tools/logging.py#L84-L121
import logging def convert_level(verbose): if logging.getLevelName(verbose) != "Level %s" % verbose and ( not isinstance(verbose, int) or verbose > 0 ): return verbose if isinstance(verbose, bool): if verbose: verbose = logging.DEBUG else: verbose = logging.CRITICAL else: levels = [ logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG, ] if verbose >= len(levels): verbose = len(levels) - 1 if not verbose: verbose = 0 verbose = levels[verbose] return verbose class Logger(logging.Logger): app = None _handler = None def __init__(self, app=None, name=None, level=logging.NOTSET): if app and not name: name = app.name elif not name: name = "burp-ui" logging.Logger.__init__(self, name, level) if app: self.init_app(app) def init_app(self, app): self.app = app config = { "level": app.config.get("LOG_LEVEL"), "logfile": app.config.get("LOG_FILE"), } self.init_logger(config)
BSD 3-Clause New or Revised License
galacticpuzzlehunt/gph-site
puzzles/views.py
hints
python
def hints(request): puzzle = request.context.puzzle team = request.context.team open_hints = [] if ONE_HINT_AT_A_TIME: open_hints = [hint for hint in team.asked_hints if hint.status == Hint.NO_RESPONSE] relevant_hints_remaining = (team.num_hints_remaining if puzzle.round.slug == INTRO_ROUND_SLUG else team.num_nonintro_hints_remaining) puzzle_hints = [hint for hint in reversed(team.asked_hints) if hint.puzzle == puzzle] can_followup = bool(puzzle_hints) and puzzle_hints[0].status == Hint.ANSWERED error = None if request.context.hunt_is_over: error = 'Sorry, hints are closed.' can_followup = False elif team.num_hints_remaining <= 0 and team.num_free_answers_remaining <= 0: error = 'You have no hints available!' elif relevant_hints_remaining <= 0 and team.num_free_answers_remaining <= 0: error = 'You have no hints that can be used on this puzzle.' elif open_hints: error = ('You already have a hint open (on %s)! ' 'You can have one hint open at a time.' % open_hints[0].puzzle) can_followup = False if request.method == 'POST': is_followup = can_followup and bool(request.POST.get('is_followup')) if error and not is_followup: messages.error(request, error) return redirect('hints', puzzle.slug) form = RequestHintForm(team, request.POST) if form.is_valid(): if relevant_hints_remaining <= 0 and not is_followup: team.total_hints_awarded += 1 team.total_free_answers_awarded -= 1 team.save() Hint( team=team, puzzle=puzzle, hint_question=form.cleaned_data['hint_question'], notify_emails=form.cleaned_data['notify_emails'], is_followup=is_followup, ).save() messages.success(request, ( 'Your request for a hint has been submitted and the puzzle ' 'hunt staff has been notified\u2014we will respond to it soon!' )) return redirect('hints', puzzle.slug) else: form = RequestHintForm(team) return render(request, 'hints.html', { 'hints': puzzle_hints, 'error': error, 'form': form, 'intro_count': sum(1 for p in request.context.all_puzzles if p.round.slug == INTRO_ROUND_SLUG), 'relevant_hints_remaining': relevant_hints_remaining, 'can_followup': can_followup, })
List or submit hint requests for a puzzle.
https://github.com/galacticpuzzlehunt/gph-site/blob/1f7b123106fb78f2ea5b05d5692126d708d4c7ff/puzzles/views.py#L770-L830
import csv import datetime import itertools import json import logging import os import requests from collections import defaultdict, OrderedDict, Counter from functools import wraps from urllib.parse import unquote from django.conf import settings from django.contrib import messages from django.contrib.auth import login, update_session_auth_hash from django.contrib.auth.forms import PasswordChangeForm from django.contrib.auth.models import User from django.contrib.auth.tokens import default_token_generator from django.db.models import F, Q, Avg, Count from django.forms import formset_factory, modelformset_factory from django.http import HttpResponse, Http404 from django.shortcuts import redirect, render from django.template import TemplateDoesNotExist from django.urls import reverse from django.utils.encoding import force_bytes from django.utils.html import escape from django.utils.http import urlsafe_base64_encode from django.views.decorators.clickjacking import xframe_options_sameorigin from django.views.decorators.http import require_GET, require_POST from django.views.static import serve from puzzles.models import ( Round, Puzzle, Team, TeamMember, PuzzleUnlock, AnswerSubmission, PuzzleMessage, Survey, Hint, ) from puzzles.forms import ( RegisterForm, TeamMemberForm, TeamMemberFormset, TeamMemberModelFormset, SubmitAnswerForm, RequestHintForm, AnswerHintForm, SurveyForm, PasswordResetForm, ) from puzzles.hunt_config import ( STORY_PAGE_VISIBLE, WRAPUP_PAGE_VISIBLE, INITIAL_STATS_AVAILABLE, SURVEYS_AVAILABLE, HUNT_START_TIME, HUNT_END_TIME, HUNT_CLOSE_TIME, MAX_MEMBERS_PER_TEAM, ONE_HINT_AT_A_TIME, INTRO_ROUND_SLUG, META_META_SLUG, ) from puzzles.messaging import send_mail_wrapper, dispatch_victory_alert from puzzles.shortcuts import dispatch_shortcut def validate_puzzle(require_team=False): def decorator(f): @wraps(f) def inner(request, slug): puzzle = Puzzle.objects.filter(slug=slug).first() if not puzzle or puzzle not in request.context.unlocks: messages.error(request, 'Invalid puzzle name.') return redirect('puzzles') if request.context.team: unlock = request.context.team.db_unlocks.get(puzzle.id) if unlock and not unlock.view_datetime: unlock.view_datetime = request.context.now unlock.save() elif require_team: messages.error( request, 'You must be signed in and have a registered team to ' 'access this page.' ) return redirect('puzzle', slug) request.context.puzzle = puzzle return f(request) return inner return decorator def access_restrictor(check_request): def decorator(f): @wraps(f) def inner(request, *args, **kwargs): if not request.context.is_superuser: check_res = check_request(request) if check_res is not None: return check_res return f(request, *args, **kwargs) return inner return decorator @access_restrictor def require_admin(request): raise Http404 @access_restrictor def require_after_hunt_end_or_admin(request): if not request.context.hunt_is_over: messages.error(request, 'Sorry, not available until the hunt ends.') return redirect('index') @access_restrictor def require_before_hunt_closed_or_admin(request): if request.context.hunt_is_closed: messages.error(request, 'Sorry, the hunt is over.') return redirect('index') @require_GET def index(request): return render(request, 'home.html') @require_GET def rules(request): return render(request, 'rules.html') @require_GET def faq(request): return render(request, 'faq.html') @require_GET def archive(request): return render(request, 'archive.html') recaptcha_logger = logging.getLogger('puzzles.recaptcha') @require_before_hunt_closed_or_admin def register(request): team_members_formset = formset_factory( TeamMemberForm, formset=TeamMemberFormset, extra=0, min_num=1, max_num=MAX_MEMBERS_PER_TEAM, validate_max=True, ) if request.method == 'POST': form = RegisterForm(request.POST) formset = team_members_formset(request.POST) if 'g-recaptcha-response' in request.POST: url = 'https://www.google.com/recaptcha/api/siteverify' token = request.POST['g-recaptcha-response'] try: response = requests.post(url, data={ 'secret': settings.RECAPTCHA_SECRETKEY, 'response': token, }).json() recaptcha_logger.info('team [%s] token [%s]\n%s' % ( request.POST['team_id'], token, response)) except Exception: pass if form.is_valid() and formset.is_valid(): data = form.cleaned_data formset_data = formset.cleaned_data user = User.objects.create_user( data.get('team_id'), password=data.get('password'), first_name=data.get('team_name'), ) team = Team.objects.create( user=user, team_name=data.get('team_name'), ) for team_member in formset_data: TeamMember.objects.create( team=team, name=team_member.get('name'), email=team_member.get('email'), ) login(request, user) team_link = request.build_absolute_uri( reverse('team', args=(data.get('team_name'),)) ) send_mail_wrapper( 'Team created', 'registration_email', { 'team_name': data.get('team_name'), 'team_link': team_link, }, team.get_emails()) return redirect('index') else: form = RegisterForm() formset = team_members_formset() return render(request, 'register.html', { 'form': form, 'team_members_formset': formset, }) @require_before_hunt_closed_or_admin def password_change(request): if request.method == 'POST': form = PasswordChangeForm(user=request.user, data=request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) return redirect('password_change_done') else: form = PasswordChangeForm(user=request.user) return render(request, 'password_change.html', {'form': form}) @require_before_hunt_closed_or_admin def password_reset(request): if request.method == 'POST': form = PasswordResetForm(data=request.POST) if form.is_valid(): team = form.cleaned_data.get('team') uid = urlsafe_base64_encode(force_bytes(team.user.pk)) token = default_token_generator.make_token(team.user) reset_link = request.build_absolute_uri(reverse( 'password_reset_confirm', kwargs={'uidb64': uid, 'token': token}, )) send_mail_wrapper( 'Password reset', 'password_reset_email', {'team_name': team.team_name, 'reset_link': reset_link}, team.get_emails()) return redirect('password_reset_done') else: form = PasswordResetForm() return render(request, 'password_reset.html', {'form': form}) @require_GET def team(request, team_name): user_team = request.context.team is_own_team = user_team is not None and user_team.team_name == team_name can_view_info = is_own_team or request.context.is_superuser team_query = Team.objects.filter(team_name=team_name) if not can_view_info: team_query = team_query.exclude(is_hidden=True) team = team_query.first() if not team: messages.error(request, 'Team \u201c{}\u201d not found.'.format(team_name)) return redirect('teams') leaderboard_ids = Team.leaderboard_teams(user_team).values_list('id', flat=True) rank = None for i, leaderboard_id in enumerate(leaderboard_ids): if team.id == leaderboard_id: rank = i + 1 break guesses = defaultdict(int) correct = {} unlock_time_map = { puzzle_id: unlock.unlock_datetime for (puzzle_id, unlock) in team.db_unlocks.items() } for submission in team.submissions: if submission.is_correct: correct[submission.puzzle_id] = { 'submission': submission, 'unlock_time': unlock_time_map.get(submission.puzzle_id), 'solve_time': submission.submitted_datetime, 'open_duration': (submission.submitted_datetime - unlock_time_map[submission.puzzle_id]) .total_seconds() if submission.puzzle_id in unlock_time_map else None, } else: guesses[submission.puzzle_id] += 1 submissions = [] for puzzle in correct: correct[puzzle]['guesses'] = guesses[puzzle] submissions.append(correct[puzzle]) submissions.sort(key=lambda s: s['solve_time']) solves = [HUNT_START_TIME] + [s['solve_time'] for s in submissions] if solves[-1] >= HUNT_END_TIME: solves.append(min(request.context.now, HUNT_CLOSE_TIME)) else: solves.append(HUNT_END_TIME) chart = { 'hunt_length': (solves[-1] - HUNT_START_TIME).total_seconds(), 'solves': [{ 'before': (solves[i - 1] - HUNT_START_TIME).total_seconds(), 'after': (solves[i] - HUNT_START_TIME).total_seconds(), } for i in range(1, len(solves))], 'metas': [ (s['solve_time'] - HUNT_START_TIME).total_seconds() for s in submissions if s['submission'].puzzle.is_meta ], 'end': (HUNT_END_TIME - HUNT_START_TIME).total_seconds(), } return render(request, 'team.html', { 'view_team': team, 'submissions': submissions, 'chart': chart, 'solves': sum(1 for s in submissions if not s['submission'].used_free_answer), 'modify_info_available': is_own_team and not request.context.hunt_is_closed, 'view_info_available': can_view_info, 'rank': rank, }) def teams_generic(request, hide_hidden): team_name = request.GET.get('team') user_team = request.context.team return render(request, 'teams.html', { 'teams': Team.leaderboard(user_team, hide_hidden=hide_hidden), 'current_team': user_team, }) @require_GET def teams(request): return teams_generic(request, hide_hidden=True) @require_GET @require_admin def teams_unhidden(request): return teams_generic(request, hide_hidden=False) @require_before_hunt_closed_or_admin def edit_team(request): team = request.context.team if team is None: messages.error(request, 'You\u2019re not logged in.') return redirect('login') team_members_formset = modelformset_factory( TeamMember, formset=TeamMemberModelFormset, fields=('name', 'email'), extra=0, min_num=1, max_num=MAX_MEMBERS_PER_TEAM, validate_max=True, ) if request.method == 'POST': post_data_copy = request.POST.copy() num_forms = 0 for i in range(int(post_data_copy['form-TOTAL_FORMS'])): if post_data_copy.get('form-{}-id'.format(i)): num_forms += 1 post_data_copy['form-INITIAL_FORMS'] = str(num_forms) post_data_copy['team'] = team formset = team_members_formset(post_data_copy) if formset.is_valid(): team_member_ids = set(team.teammember_set.values_list('id', flat=True)) for form in formset.forms: if form.cleaned_data and form.cleaned_data['id'] is not None: team_member_ids.remove(form.cleaned_data['id'].id) TeamMember.objects.filter(id__in=team_member_ids).delete() team_member_instances = formset.save(commit=False) for team_member in team_member_instances: team_member.team = team team_member.save() messages.success(request, 'Team updated!') return redirect('edit-team') if len(formset) == 0: errors = formset.non_form_errors() formset = team_members_formset(queryset=team.teammember_set.all()) formset.non_form_errors().extend(errors) else: formset = team_members_formset(queryset=team.teammember_set.all()) return render(request, 'edit_team.html', {'team_members_formset': formset}) @require_GET def puzzles(request): if request.context.hunt_has_started: return render(request, 'puzzles.html', {'rounds': render_puzzles(request)}) elif request.context.hunt_has_almost_started: return render(request, 'countdown.html', {'start': request.context.start_time}) else: raise Http404 @require_GET def round(request, slug): round = Round.objects.filter(slug=slug).first() if round: rounds = render_puzzles(request) if slug in rounds: request.context.round = round template_name = 'round_bodies/{}.html'.format(slug) try: return render(request, template_name, {'round': rounds[slug]}) except (TemplateDoesNotExist, IsADirectoryError): return redirect('puzzles') messages.error(request, 'Invalid round name.') return redirect('puzzles') def render_puzzles(request): team = request.context.team solved = {} hints = {} if team is not None: solved = team.solves hints = Counter(hint.puzzle_id for hint in team.asked_hints) correct = defaultdict(int) guesses = defaultdict(int) teams = defaultdict(set) full_stats = request.context.is_superuser or request.context.hunt_is_over if full_stats or INITIAL_STATS_AVAILABLE: for submission in AnswerSubmission.objects.filter( used_free_answer=False, team__is_hidden=False, submitted_datetime__lt=HUNT_END_TIME, ): if submission.is_correct: correct[submission.puzzle_id] += 1 guesses[submission.puzzle_id] += 1 teams[submission.puzzle_id].add(submission.team_id) fields = Survey.fields() survey_averages = dict() if request.context.is_superuser: surveyed_puzzles = Puzzle.objects.annotate(**{ field.name: Avg('survey__' + field.name) for field in fields }).values_list('id', *(field.name for field in fields)) for sp in surveyed_puzzles: if all(a is not None for a in sp[1:]): survey_averages[sp[0]] = sp[1:] rounds = OrderedDict() for puzzle in request.context.unlocks: if puzzle.round.slug not in rounds: rounds[puzzle.round.slug] = { 'round': puzzle.round, 'puzzles': [], 'unlocked_slugs': [], } rounds[puzzle.round.slug]['unlocked_slugs'].append(puzzle.slug) data = {'puzzle': puzzle} if puzzle.id in solved: data['answer'] = puzzle.answer if puzzle.is_meta: rounds[puzzle.round.slug]['meta_answer'] = puzzle.answer if puzzle.id in hints: data['hints'] = hints[puzzle.id] data['full_stats'] = full_stats if puzzle.id in guesses: data['solve_stats'] = { 'correct': correct[puzzle.id], 'guesses': guesses[puzzle.id], 'teams': len(teams[puzzle.id]), } if puzzle.id in survey_averages: data['survey_stats'] = [{ 'average': average, 'adjective': field.adjective, 'max_rating': field.max_rating, } for (field, average) in zip(fields, survey_averages[puzzle.id])] data['new'] = (team and puzzle.id in team.db_unlocks and not team.db_unlocks[puzzle.id].view_datetime) rounds[puzzle.round.slug]['puzzles'].append(data) return rounds @require_GET @validate_puzzle() def puzzle(request): team = request.context.team template_name = 'puzzle_bodies/{}'.format(request.context.puzzle.body_template) data = { 'can_view_hints': team and not request.context.hunt_is_closed and ( team.num_hints_total > 0 or team.num_free_answers_total > 0 ), 'can_ask_for_hints': team and not request.context.hunt_is_over and ( team.num_hints_remaining > 0 or team.num_free_answers_remaining > 0 ), } try: return render(request, template_name, data) except (TemplateDoesNotExist, IsADirectoryError): data['template_name'] = template_name return render(request, 'puzzle.html', data) @validate_puzzle(require_team=True) @require_before_hunt_closed_or_admin def solve(request): puzzle = request.context.puzzle team = request.context.team form = None survey = None if request.method == 'POST' and 'answer' in request.POST: if request.context.puzzle_answer: messages.error(request, 'You\u2019ve already solved this puzzle!') return redirect('solve', puzzle.slug) if request.context.guesses_remaining <= 0: messages.error(request, 'You have no more guesses for this puzzle!') return redirect('solve', puzzle.slug) semicleaned_guess = PuzzleMessage.semiclean_guess(request.POST.get('answer')) normalized_answer = Puzzle.normalize_answer(request.POST.get('answer')) puzzle_messages = [ message for message in puzzle.puzzlemessage_set.all() if semicleaned_guess == message.semicleaned_guess ] tried_before = any( normalized_answer == submission.submitted_answer for submission in request.context.puzzle_submissions ) is_correct = normalized_answer == puzzle.normalized_answer form = SubmitAnswerForm(request.POST) if puzzle_messages: for message in puzzle_messages: form.add_error(None, message.response) elif not normalized_answer: form.add_error(None, 'All puzzle answers will have ' 'at least one letter A through Z (case does not matter).') elif tried_before: form.add_error(None, 'You\u2019ve already tried calling in the ' 'answer \u201c%s\u201d for this puzzle.' % normalized_answer) elif form.is_valid(): AnswerSubmission( team=team, puzzle=puzzle, submitted_answer=normalized_answer, is_correct=is_correct, used_free_answer=False, ).save() if is_correct: if not request.context.hunt_is_over: team.last_solve_time = request.context.now team.save() messages.success(request, '%s is correct!' % puzzle.answer) if puzzle.slug == META_META_SLUG: dispatch_victory_alert( 'Team %s has finished the hunt!' % team + '\n**Emails:** <%s>' % request.build_absolute_uri(reverse('finishers'))) return redirect('victory') else: messages.error(request, '%s is incorrect.' % normalized_answer) return redirect('solve', puzzle.slug) elif request.method == 'POST': if not request.context.puzzle_answer or not SURVEYS_AVAILABLE: raise Http404 survey = SurveyForm(request.POST) if survey.is_valid(): Survey.objects.update_or_create( puzzle=puzzle, team=team, defaults=survey.cleaned_data) messages.success(request, 'Thanks!') return redirect('solve', puzzle.slug) if survey is None and SURVEYS_AVAILABLE: survey = SurveyForm( instance=Survey.objects.filter(puzzle=puzzle, team=team).first()) return render(request, 'solve.html', { 'form': form or SubmitAnswerForm(), 'survey': survey, }) @validate_puzzle(require_team=True) @require_before_hunt_closed_or_admin def free_answer(request): puzzle = request.context.puzzle team = request.context.team if request.method == 'POST': if puzzle.is_meta: messages.error(request, 'You can\u2019t use a free answer on a metapuzzle.') elif request.context.puzzle_answer: messages.error(request, 'You\u2019ve already solved this puzzle!') elif team.num_free_answers_remaining <= 0: messages.error(request, 'You have no free answers to use.') elif request.POST.get('use') == 'Yes': AnswerSubmission( team=team, puzzle=puzzle, submitted_answer=puzzle.normalized_answer, is_correct=True, used_free_answer=True, ).save() messages.success(request, 'Free answer used!') return redirect('solve', puzzle.slug) return render(request, 'free_answer.html') @validate_puzzle() @require_after_hunt_end_or_admin def post_hunt_solve(request): puzzle = request.context.puzzle answer = Puzzle.normalize_answer(request.GET.get('answer')) is_correct = answer == puzzle.normalized_answer return render(request, 'post_hunt_solve.html', { 'is_correct': answer is not None and is_correct, 'is_wrong': answer is not None and not is_correct, 'form': SubmitAnswerForm(), }) @require_GET @validate_puzzle() @require_admin def survey(request): surveys = [ {'survey': survey, 'ratings': []} for survey in request.context.puzzle.survey_set.select_related('team').order_by('id') ] fields = [ {'field': field, 'total': 0, 'count': 0, 'max': field.max_rating} for field in Survey.fields() ] for field in fields: for survey in surveys: rating = field['field'].value_from_object(survey['survey']) if not survey['survey'].team.is_hidden: field['total'] += rating field['count'] += 1 survey['ratings'].append((rating, field['field'].max_rating)) field['average'] = field['total'] / field['count'] if field['count'] else 0 return render(request, 'survey.html', {'fields': fields, 'surveys': surveys}) @require_GET @require_admin def hint_list(request): if 'team' in request.GET or 'puzzle' in request.GET: hints = ( Hint.objects .select_related() .order_by('-submitted_datetime') ) query_description = "Hints" if 'team' in request.GET: team = Team.objects.get(id=request.GET['team']) hints = hints.filter(team=team) query_description += " from " + team.team_name if 'puzzle' in request.GET: puzzle = Puzzle.objects.get(id=request.GET['puzzle']) hints = hints.filter(puzzle=puzzle) query_description += " on " + puzzle.name return render(request, 'hint_list_query.html', { 'query_description': query_description, 'hints': hints, }) else: unanswered = ( Hint.objects .select_related() .filter(status=Hint.NO_RESPONSE) .order_by('submitted_datetime') ) popular = list( Hint.objects .values('puzzle_id') .annotate(count=Count('team_id', distinct=True)) .order_by('-count') ) claimers = list( Hint.objects .values('claimer') .annotate(count=Count('*')) .order_by('-count') ) puzzles = {puzzle.id: puzzle for puzzle in request.context.all_puzzles} for aggregate in popular: aggregate['puzzle'] = puzzles[aggregate['puzzle_id']] return render(request, 'hint_list.html', { 'unanswered': unanswered, 'stats': itertools.zip_longest(popular, claimers), }) @validate_puzzle(require_team=True) @require_before_hunt_closed_or_admin
MIT License
plangrid/flask-rebar
flask_rebar/authenticators/header_api_key.py
HeaderApiKeyAuthenticator.register_key
python
def register_key(self, key, app_name=DEFAULT_APP_NAME): self.keys[key] = app_name
Register a client application's shared secret. :param str app_name: Name for the application. Since an application can have multiple shared secrets, this does not need to be unique. :param str key: The shared secret.
https://github.com/plangrid/flask-rebar/blob/9aa839badb97e1048f261d1573a0dc1341b335eb/flask_rebar/authenticators/header_api_key.py#L51-L61
from flask import request, g from werkzeug.security import safe_str_cmp from flask_rebar import errors, messages from flask_rebar.authenticators.base import Authenticator def get_authenticated_app_name(): return g.authenticated_app_name class HeaderApiKeyAuthenticator(Authenticator): DEFAULT_APP_NAME = "default" def __init__(self, header, name="sharedSecret"): self.header = header self.keys = {} self.name = name @property def authenticated_app_name(self): return get_authenticated_app_name()
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/akamai_net_storage_input.py
AkamaiNetStorageInput.__eq__
python
def __eq__(self, other): if not isinstance(other, AkamaiNetStorageInput): return False return self.__dict__ == other.__dict__
Returns true if both objects are equal
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/akamai_net_storage_input.py#L187-L192
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.input import Input import pprint import six class AkamaiNetStorageInput(Input): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, host=None, username=None, password=None): super(AkamaiNetStorageInput, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._host = None self._username = None self._password = None self.discriminator = None if host is not None: self.host = host if username is not None: self.username = username if password is not None: self.password = password @property def openapi_types(self): types = {} if hasattr(super(AkamaiNetStorageInput, self), 'openapi_types'): types = getattr(super(AkamaiNetStorageInput, self), 'openapi_types') types.update({ 'host': 'string_types', 'username': 'string_types', 'password': 'string_types' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(AkamaiNetStorageInput, self), 'attribute_map'): attributes = getattr(super(AkamaiNetStorageInput, self), 'attribute_map') attributes.update({ 'host': 'host', 'username': 'username', 'password': 'password' }) return attributes @property def host(self): return self._host @host.setter def host(self, host): if host is not None: if not isinstance(host, string_types): raise TypeError("Invalid type for `host`, type has to be `string_types`") self._host = host @property def username(self): return self._username @username.setter def username(self, username): if username is not None: if not isinstance(username, string_types): raise TypeError("Invalid type for `username`, type has to be `string_types`") self._username = username @property def password(self): return self._password @password.setter def password(self, password): if password is not None: if not isinstance(password, string_types): raise TypeError("Invalid type for `password`, type has to be `string_types`") self._password = password def to_dict(self): result = {} if hasattr(super(AkamaiNetStorageInput, self), "to_dict"): result = super(AkamaiNetStorageInput, self).to_dict() for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if value is None: continue if isinstance(value, list): if len(value) == 0: continue result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]] elif hasattr(value, "to_dict"): result[self.attribute_map.get(attr)] = value.to_dict() elif isinstance(value, Enum): result[self.attribute_map.get(attr)] = value.value elif isinstance(value, dict): result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()} else: result[self.attribute_map.get(attr)] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
yuxixie/rl-for-question-generation
discriminators/src/answerability/pretraining/fairseq/data/iterators.py
EpochBatchIterator.next_epoch_itr
python
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: self.epoch += 1 self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus) return self._cur_epoch_itr
Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator. Default: ``True`` fix_batches_to_gpus: ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching. Default: ``False``
https://github.com/yuxixie/rl-for-question-generation/blob/188cd7b04528e4f192023a596a072b3245c62838/discriminators/src/answerability/pretraining/fairseq/data/iterators.py#L96-L114
import itertools import math import numpy as np import torch from . import data_utils class CountingIterator(object): def __init__(self, iterable): self.iterable = iterable self.count = 0 self.itr = iter(self) def __len__(self): return len(self.iterable) def __iter__(self): for x in self.iterable: self.count += 1 yield x def __next__(self): return next(self.itr) def has_next(self): return self.count < len(self) def skip(self, num_to_skip): next(itertools.islice(self.itr, num_to_skip, num_to_skip), None) return self class EpochBatchIterator(object): def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.frozen_batches = tuple(batch_sampler) self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.epoch = 0 self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = ( hasattr(dataset, 'supports_prefetch') and dataset.supports_prefetch ) def __len__(self): return len(self.frozen_batches)
MIT License
cebel/pyuniprot
src/pyuniprot/manager/database.py
DbManager.get_alternative_short_names
python
def get_alternative_short_names(cls, entry): names = [] query = "./n:protein/n:alternativeName/n:shortName" for name in entry.iterfind(query, namespaces=XN): names.append(models.AlternativeShortName(name=name.text)) return names
get list of models.AlternativeShortName objects from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.AlternativeShortName` objects
https://github.com/cebel/pyuniprot/blob/19cef498bf9cb01955da1673c00361f8e2d4e8f9/src/pyuniprot/manager/database.py#L440-L452
import configparser import gzip import logging import os import re import shutil import sys import time import lxml from configparser import RawConfigParser from datetime import datetime from typing import Iterable import numpy as np import sqlalchemy from sqlalchemy.engine import reflection from sqlalchemy.orm import sessionmaker, scoped_session from sqlalchemy.sql import sqltypes from tqdm import tqdm from lxml.etree import iterparse from . import defaults from . import models from ..constants import PYUNIPROT_DATA_DIR, PYUNIPROT_DIR if sys.version_info[0] == 3: from urllib.request import urlretrieve from requests.compat import urlparse, urlsplit else: from urllib import urlretrieve from urlparse import urlparse, urlsplit log = logging.getLogger(__name__) alchemy_pandas_dytpe_mapper = { sqltypes.Text: np.unicode, sqltypes.String: np.unicode, sqltypes.Integer: np.float, sqltypes.REAL: np.double } LxmlElement = lxml.etree._Element XN_URL = 'http://uniprot.org/uniprot' XN = {'n': XN_URL} def get_connection_string(connection=None): if not connection: config = configparser.ConfigParser() cfp = defaults.config_file_path if os.path.exists(cfp): log.info('fetch database configuration from %s', cfp) config.read(cfp) connection = config['database']['sqlalchemy_connection_string'] log.info('load connection string from %s: %s', cfp, connection) else: with open(cfp, 'w') as config_file: connection = defaults.sqlalchemy_connection_string_default config['database'] = {'sqlalchemy_connection_string': connection} config.write(config_file) log.info('create configuration file %s', cfp) return connection class BaseDbManager(object): def __init__(self, connection=None, echo=False): log.setLevel(logging.INFO) handler = logging.FileHandler(os.path.join(PYUNIPROT_DIR, defaults.TABLE_PREFIX + 'database.log')) handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) log.addHandler(handler) try: self.connection = get_connection_string(connection) self.engine = sqlalchemy.create_engine(self.connection, echo=echo) self.inspector = reflection.Inspector.from_engine(self.engine) self.sessionmaker = sessionmaker( bind=self.engine, autoflush=False, autocommit=False, expire_on_commit=True ) self.session = scoped_session(self.sessionmaker) except: log.warning('No valid database connection. Execute `pyuniprot connection` on command line') def _create_tables(self, checkfirst=True): log.info('create tables in {}'.format(self.engine.url)) models.Base.metadata.create_all(self.engine, checkfirst=checkfirst) def _drop_tables(self): log.info('drop tables in {}'.format(self.engine.url)) self.session.commit() models.Base.metadata.drop_all(self.engine) self.session.commit() class DbManager(BaseDbManager): pmids = set() keywords = {} subcellular_locations = {} tissues = {} def db_import_xml(self, url: Iterable[str] = None, force_download: bool = False, taxids: Iterable[int] = None, silent: bool = False): log.info('Update UniProt database from {}'.format(url)) self._drop_tables() xml_file_path, version_file_path = self.download_and_extract(url, force_download) self._create_tables() self.import_version(version_file_path) self.import_xml(xml_file_path, taxids, silent) self.session.close() def import_version(self, version_file_path): pattern = "UniProtKB/(?P<knowledgebase>Swiss-Prot|TrEMBL) Release" " (?P<release_name>\\d{4}_\\d{2}) of (?P<release_date>\\d{2}-\\w{3}-\\d{4})" with open(version_file_path) as fd: content = fd.read() for knowledgebase, release_name, release_date_str in re.findall(pattern, content): release_date = datetime.strptime(release_date_str, '%d-%b-%Y') version = models.Version( knowledgebase=knowledgebase, release_name=release_name, release_date=release_date ) self.session.add(version) self.session.commit() def import_xml(self, xml_file_path, taxids=None, silent=False): version = self.session.query(models.Version).filter(models.Version.knowledgebase == 'Swiss-Prot').first() version.import_start_date = datetime.now() log.info('Load gzipped XML from {}'.format(xml_file_path)) doc = iterparse(xml_file_path, events=('start', 'end')) batch_commit_after = 100 counter = 0 for action, elem in tqdm(doc, mininterval=1, disable=silent): if action == 'end' and elem.tag == f'{{{XN_URL}}}entry': counter += 1 self.insert_entry(elem, taxids) if counter%batch_commit_after == 0: self.session.commit() elem.clear() version.import_completed_date = datetime.now() self.session.commit() def insert_entry(self, entry, taxids): entry_dict = dict(entry.attrib) entry_dict['created'] = datetime.strptime(entry_dict['created'], '%Y-%m-%d') entry_dict['modified'] = datetime.strptime(entry_dict['modified'], '%Y-%m-%d') taxid = self.get_taxid(entry) if taxids is None or taxid in taxids: entry_dict = self.update_entry_dict(entry, entry_dict, taxid) entry_obj = models.Entry(**entry_dict) del entry_dict self.session.add(entry_obj) def update_entry_dict(self, entry, entry_dict, taxid): rp_full, rp_short = self.get_recommended_protein_name(entry) pmids = self.get_pmids(entry) accessions = self.get_accessions(entry) sequence = self.get_sequence(entry) name = self.get_entry_name(entry) subcellular_locations = self.get_subcellular_locations(entry) tissue_in_references = self.get_tissue_in_references(entry) organism_hosts = self.get_organism_hosts(entry) db_references = self.get_db_references(entry) other_gene_names = self.get_other_gene_names(entry) features = self.get_features(entry) functions = self.get_functions(entry) gene_name = self.get_gene_name(entry) keywords = self.get_keywords(entry) ec_numbers = self.get_ec_numbers(entry) alternative_full_names = self.get_alternative_full_names(entry) alternative_short_names = self.get_alternative_short_names(entry) disease_comments = self.get_disease_comments(entry) tissue_specificities = self.get_tissue_specificities(entry) entry_dict.update( accessions=accessions, sequence=sequence, name=name, pmids=pmids, subcellular_locations=subcellular_locations, tissue_in_references=tissue_in_references, organism_hosts=organism_hosts, recommended_full_name=rp_full, recommended_short_name=rp_short, taxid=taxid, db_references=db_references, other_gene_names=other_gene_names, features=features, functions=functions, gene_name=gene_name, keywords=keywords, ec_numbers=ec_numbers, alternative_full_names=alternative_full_names, alternative_short_names=alternative_short_names, disease_comments=disease_comments, tissue_specificities=tissue_specificities ) return entry_dict @classmethod def get_sequence(cls, entry): seq_tag = entry.find("./n:sequence", namespaces=XN) seq = seq_tag.text seq_tag.clear() return models.Sequence(sequence=seq) def get_tissue_in_references(self, entry): tissue_in_references = [] query = "./n:reference/n:source/n:tissue" tissues = {x.text for x in entry.iterfind(query, namespaces=XN)} for tissue in tissues: if tissue not in self.tissues: self.tissues[tissue] = models.TissueInReference(tissue=tissue) tissue_in_references.append(self.tissues[tissue]) return tissue_in_references @classmethod def get_tissue_specificities(cls, entry): tissue_specificities = [] query = "./n:comment[@type='tissue specificity']/n:text" for ts in entry.iterfind(query, namespaces=XN): tissue_specificities.append(models.TissueSpecificity(comment=ts.text)) return tissue_specificities def get_subcellular_locations(self, entry): subcellular_locations = [] query = './n:comment/n:subcellularLocation/location' sls = {x.text for x in entry.iterfind(query, namespaces=XN)} for sl in sls: if sl not in self.subcellular_locations: self.subcellular_locations[sl] = models.SubcellularLocation(location=sl) subcellular_locations.append(self.subcellular_locations[sl]) return subcellular_locations def get_keywords(self, entry): keyword_objects = [] for keyword in entry.iterfind("./n:keyword", namespaces=XN): identifier = keyword.get('id') name = keyword.text keyword_hash = hash(identifier) if keyword_hash not in self.keywords: self.keywords[keyword_hash] = models.Keyword(**{'identifier': identifier, 'name': name}) keyword_objects.append(self.keywords[keyword_hash]) return keyword_objects @classmethod def get_entry_name(cls, entry): name = entry.find('./n:name', namespaces=XN).text return name def get_disease_comments(self, entry): disease_comments = [] query = "./n:comment[@type='disease']" for disease_comment in entry.iterfind(query, namespaces=XN): value_dict = {'comment': disease_comment.find('./n:text', namespaces=XN).text} disease = disease_comment.find("./n:disease", namespaces=XN) if disease is not None: disease_dict = {'identifier': disease.get('id')} for element in disease: key = element.tag if key in ['acronym', 'description', 'name']: disease_dict[key] = element.text if key == 'dbReference': disease_dict['ref_id'] = element.get('id') disease_dict['ref_type'] = element.get('type') disease_obj = models.get_or_create(self.session, models.Disease, **disease_dict) self.session.add(disease_obj) self.session.flush() value_dict['disease_id'] = disease_obj.id disease_comments.append(models.DiseaseComment(**value_dict)) return disease_comments @classmethod def get_alternative_full_names(cls, entry): names = [] query = "./n:protein/n:alternativeName/n:fullName" for name in entry.iterfind(query, namespaces=XN): names.append(models.AlternativeFullName(name=name.text)) return names @classmethod
Apache License 2.0
intel/openfl
openfl/protocols/director_pb2_grpc.py
FederationDirectorServicer.CollaboratorHealthCheck
python
def CollaboratorHealthCheck(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
Missing associated documentation comment in .proto file.
https://github.com/intel/openfl/blob/4bda3850b6bce7c904a5ac3ed56115bec00be2e0/openfl/protocols/director_pb2_grpc.py#L123-L127
import grpc from . import director_pb2 as director__pb2 class FederationDirectorStub(object): def __init__(self, channel): self.AcknowledgeShard = channel.unary_unary( '/FederationDirector/AcknowledgeShard', request_serializer=director__pb2.ShardInfo.SerializeToString, response_deserializer=director__pb2.ShardAcknowledgement.FromString, ) self.WaitExperiment = channel.stream_stream( '/FederationDirector/WaitExperiment', request_serializer=director__pb2.WaitExperimentRequest.SerializeToString, response_deserializer=director__pb2.WaitExperimentResponse.FromString, ) self.GetExperimentData = channel.unary_stream( '/FederationDirector/GetExperimentData', request_serializer=director__pb2.GetExperimentDataRequest.SerializeToString, response_deserializer=director__pb2.ExperimentData.FromString, ) self.SetNewExperiment = channel.stream_unary( '/FederationDirector/SetNewExperiment', request_serializer=director__pb2.ExperimentInfo.SerializeToString, response_deserializer=director__pb2.SetNewExperimentResponse.FromString, ) self.GetDatasetInfo = channel.unary_unary( '/FederationDirector/GetDatasetInfo', request_serializer=director__pb2.GetDatasetInfoRequest.SerializeToString, response_deserializer=director__pb2.ShardInfo.FromString, ) self.GetTrainedModel = channel.unary_unary( '/FederationDirector/GetTrainedModel', request_serializer=director__pb2.GetTrainedModelRequest.SerializeToString, response_deserializer=director__pb2.TrainedModelResponse.FromString, ) self.StreamMetrics = channel.unary_stream( '/FederationDirector/StreamMetrics', request_serializer=director__pb2.StreamMetricsRequest.SerializeToString, response_deserializer=director__pb2.StreamMetricsResponse.FromString, ) self.RemoveExperimentData = channel.unary_unary( '/FederationDirector/RemoveExperimentData', request_serializer=director__pb2.RemoveExperimentRequest.SerializeToString, response_deserializer=director__pb2.RemoveExperimentResponse.FromString, ) self.CollaboratorHealthCheck = channel.unary_unary( '/FederationDirector/CollaboratorHealthCheck', request_serializer=director__pb2.CollaboratorStatus.SerializeToString, response_deserializer=director__pb2.CollaboratorHealthCheckResponse.FromString, ) self.GetEnvoys = channel.unary_unary( '/FederationDirector/GetEnvoys', request_serializer=director__pb2.GetEnvoysRequest.SerializeToString, response_deserializer=director__pb2.GetEnvoysResponse.FromString, ) class FederationDirectorServicer(object): def AcknowledgeShard(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def WaitExperiment(self, request_iterator, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetExperimentData(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def SetNewExperiment(self, request_iterator, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetDatasetInfo(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTrainedModel(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def StreamMetrics(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RemoveExperimentData(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
Apache License 2.0
jgurtowski/ectools
nucio.py
getNucmerAlignmentIterator
python
def getNucmerAlignmentIterator(fh): return lineRecordIterator(fh, NucRecord, NucRecordTypes)
Get nucmer alignments from show-coords output (Deprecated legacy)
https://github.com/jgurtowski/ectools/blob/031eb0300c82392915d8393a5fedb4d3452b15bf/nucio.py#L68-L72
import sys from collections import namedtuple from itertools import imap, izip, ifilter from misc import trueFunc NucRecord = namedtuple('NucRecord', ["sstart","send","b3","qstart","qend", "b6", "salen","qalen","b9","pctid", "b11","slen","qlen","b14","sname","qname"]) NucRecordTypes = [int, int, str, int,int,str,int,int,str,float,str, int,int,str,str,str] NucSNPRecord = namedtuple('NucSNPRecord', ["spos", "sbase", "qbase", "qpos", "b5", "buf", "dist", "b8", "r", "q", "b9", "frm1", "frm2", "b12", "r1", "r2", "sname", "qname"]) NucSNPRecordTypes = [int, str, str, int, str, int, int, str, int, int, str, int, int, str, int, int, str, str] DeltaAlignment = namedtuple('DeltaAlignment', ["sstart","send","qstart","qend","errors", "simerrors", "stopcodons", "positions"]) DeltaAlignmentTypes = [int, int, int, int ,int ,int ,int, list] DeltaRecord = namedtuple('DeltaRecord', ["sname", "qname", "slen", "qlen", "alignments"]) DeltaRecordTypes = [str, str, int, int, list] M4Record = namedtuple('M4Record', ["qname", "tname", "score", "pctsimilarity", "qstrand", "qstart", "qend", "qseqlength", "tstrand", "tstart" ,"tend", "tseqlength", "mapqv"]) M4RecordTypes = [str, str, float, float, int, int, int, int, int, int,int,int,int] def lineRecordIterator(fh, nt, nt_types, filter_func=trueFunc, delim=None, cleaner_func=None): if cleaner_func == None: cleaner_func = lambda line : line.strip().split(delim) filtered_lines = ifilter(filter_func, fh) split_clean_lines = imap(cleaner_func, filtered_lines) typed = imap(lambda splitline : typeify(splitline, nt_types), split_clean_lines) return imap(nt._make, typed) def lineItemIterator(fh, filter_func=trueFunc): filtered = ifilter( filter_func, fh) return imap(str.split, filtered)
BSD 3-Clause New or Revised License
awused/cynaoko
naoko/lib/database.py
NaokoDB.insertVideo
python
def insertVideo(self, site, vid, title, dur, nick): self.logger.debug("Inserting %s into videos", (site, vid, int(dur * 1000), title, 0)) self.logger.debug("Inserting %s into video_stats", (site, vid, nick)) self.executeDML("INSERT OR IGNORE INTO videos VALUES(?, ?, ?, ?, ?)", (site, vid, int(dur * 1000), title, 0)) self.executeDML("INSERT INTO video_stats VALUES(?, ?, ?)", (site, vid, nick)) self.commit() self.unflagVideo(site, vid, 1)
Inserts a video into the database. The video is assumed to be valid so it also removes the invalid flag from the video. dur is supplied in seconds as a float but stored in milliseconds as an integer. nick is the username of the user who added it, with unregistered users using an empty string.
https://github.com/awused/cynaoko/blob/23e3f287814535e80268a0fa8dfb6d415bb4a9a2/naoko/lib/database.py#L465-L480
import sqlite3 import logging import time try: from settings import LOG_LEVEL except: print "Defaulting to LOG_LEVEL debug [%s]" % (__name__) LOG_LEVEL = logging.DEBUG ProgrammingError = sqlite3.ProgrammingError DatabaseError = sqlite3.DatabaseError def dbopen(fn): def dbopen_func(self, *args, **kwargs): if self._state == "open": return fn(self, *args, **kwargs) elif self._state == "closed": raise DatabaseError("Cannot perform operations on closed database") else: raise DatabaseError("Database must be open to perform operations") return dbopen_func class NaokoCursor(sqlite3.Cursor): _id = 0 def __init__(self, *args, **kwargs): self.logger = logging.getLogger('naokocursor') self.id = NaokoCursor._id NaokoCursor._id += 1 sqlite3.Cursor.__init__(self, *args, **kwargs) def __enter__(self): return self def __str__(self): return "NaokoCursor #%d" % self.id def __exit__(self, exc_type, exc_value, traceback): self.close() if not self.logger: return if exc_type and exc_value: self.logger.error("%s closed %s: %s" % (self, exc_type, exc_value)) else: self.logger.debug("%s closed" % self) class NaokoDB(object): _dbinfo_sql = "SELECT name FROM sqlite_master WHERE type='table'" _version_sql = "SELECT value FROM metadata WHERE key='dbversion'" _required_tables = set(["video_stats", "videos", "user_count", "bans", "chat"]) _foreign_keys = False def __enter__(self): return self def __init__(self, database): self.logger = logging.getLogger("database") self.logger.setLevel(LOG_LEVEL) self.db_file = database self.con = sqlite3.connect(database, timeout=60) self._state = "open" self.initdb() tables = self._getTables() if not self._required_tables <= tables: raise ValueError("Database '%s' is non-empty but " "does not provide required tables %s" % (database, self._required_tables - tables)) def __exit__(self, exc_type, exc_val, exc_tb): self._state = "closed" if self.con: self.con.close() if exc_type and exc_val: self.logger.error("Database '%s' closed due to %s: %s" % (self.db_file, exc_type, exc_val)) else: self.logger.debug("Database '%s' closed" % self.db_file) def _getTables(self): with self.execute(self._dbinfo_sql) as cur: return set([table[0] for table in cur.fetchall()]) def _getVersion(self): tables = self._getTables() if 'metadata' in tables: try: with self.execute(self._version_sql) as cur: version = cur.fetchone()[0] self.logger.debug("Database version is %s" % version) return int(version) except TypeError as e: self.logger.debug(e) self.logger.debug("Database version is 3 (empty metadata table)") self.executeDML("INSERT INTO metadata(key, value) VALUES ('dbversion', '3')") self.commit() return 3 elif tables : self.logger.debug("Database version is 1 (no metadata table)") return 1 def _update(self): version = self._getVersion() if version < 2: stmts = ["CREATE TABLE IF NOT EXISTS videos(type TEXT, id TEXT, duration_ms INTEGER, title TEXT, primary key(type, id))", "CREATE TABLE IF NOT EXISTS video_stats(type TEXT, id TEXT, uname TEXT, FOREIGN KEY(type, id) REFERENCES video(type, id))", "CREATE INDEX IF NOT EXISTS video_stats_idx ON video_stats(type, id)", "CREATE TABLE IF NOT EXISTS bans(reason TEXT, auth INTEGER, uname TEXT, timestamp INTEGER, mod TEXT)", "CREATE TABLE IF NOT EXISTS user_count(timestamp INTEGER, count INTEGER, primary key(timestamp, count))", "CREATE TABLE IF NOT EXISTS chat(timestamp INTEGER, username TEXT, userid TEXT, msg TEXT, protocol TEXT, channel TEXT, flags TEXT)", "CREATE INDEX IF NOT EXISTS chat_ts ON chat(timestamp)", "CREATE INDEX IF NOT EXISTS chat_user ON chat(username)", "ALTER TABLE videos ADD COLUMN flags INTEGER DEFAULT 0 NOT NULL", "CREATE TABLE metadata(key TEXT, value TEXT, PRIMARY KEY(key))", "INSERT INTO metadata(key, value) VALUES ('dbversion', '2')"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 3: stmts = ["UPDATE chat SET timestamp = timestamp * 1000", "UPDATE metadata SET value = '3' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 4: stmts = ["UPDATE user_count SET timestamp = count, count = timestamp WHERE timestamp < 1000", "UPDATE metadata SET value = '4' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 5: stmts = ["CREATE TABLE IF NOT EXISTS playlistmeta(name TEXT, uname TEXT, length INTEGER, timestamp INTEGER, PRIMARY KEY(name))", "CREATE TABLE IF NOT EXISTS playlists(name TEXT, idx INTEGER, type TEXT, id TEXT, PRIMARY KEY(name, idx), FOREIGN KEY(name) " + "REFERENCES playlistmeta(name), FOREIGN KEY(type, id) REFERENCES videos(type, id))", "CREATE TABLE IF NOT EXISTS video_stats2(type TEXT, id TEXT, uname TEXT, FOREIGN KEY(type, id) REFERENCES videos(type, id))", "INSERT INTO video_stats2(type, id, uname) SELECT type, id, uname FROM video_stats", "DROP INDEX video_stats_idx", "ALTER TABLE video_stats RENAME TO video_stats_backup", "ALTER TABLE video_stats2 RENAME TO video_stats", "CREATE INDEX IF NOT EXISTS video_stats_idx ON video_stats(type, id)", "UPDATE metadata SET value = '5' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() if version < 6: stmts = ["DROP TABLE video_stats_backup", "UPDATE videos SET flags = flags & ~1 WHERE type = 'yt'", "UPDATE metadata SET value = '6' WHERE key = 'dbversion'"] for stmt in stmts: self.executeDML(stmt) self.commit() self._foreign_keys = True @dbopen def initdb(self): self._update() assert self._getVersion() >= 6 @dbopen def cursor(self): cur = self.con.cursor(NaokoCursor) if self._foreign_keys: cur.execute("PRAGMA foreign_keys = ON"); return cur @dbopen def execute(self, stmt, *args): cur = self.cursor() cur.execute(stmt, *args) return cur @dbopen def executeDML(self, stmt, *args): with self.execute(stmt, *args): pass @dbopen def commit(self): self.con.commit() @dbopen def executescript(self, script): cur = self.cursor() cur.executescript(script) return cur @dbopen def fetch(self, stmt, *args): with self.execute(stmt, *args) as cur: return cur.fetchall() def close(self): self.__exit__(None, None, None) def getVideos(self, num=None, columns=None, orderby=None, duration_s=None, title=None, user=None, blockedFlags=0b11, blockedSites = []): _tables = {'videos' : set(['type', 'id', 'duration_ms', 'title']), 'video_stats' : set(['type', 'id', 'uname'])} legal_cols = set.union(_tables['videos'], _tables['video_stats']) if not columns: columns = legal_cols if not set(columns) <= legal_cols: raise ProgrammingError("Argument columns: %s not a subset of video " "columns %s" % (columns, _tables['videos'])) col_repl = {'id' : 'v.id', 'type' : 'v.type'} sel_cols = [] for col in columns: sel_col = col if col in col_repl: sel_col = col_repl[col] sel_cols.append(sel_col) binds = () sel_list = ', '.join(sel_cols) sel_cls = 'SELECT DISTINCT %s' % (sel_list) from_cls = ' FROM video_stats vs, videos v ' where_cls = ' WHERE vs.type = v.type AND vs.id = v.id ' if isinstance(duration_s, (int, long)): where_cls += " AND v.duration_ms <= ? " binds += (duration_s*1000,) if isinstance(title, (str, unicode)): where_cls += " AND v.title like ? COLLATE NOCASE " binds += ("%%%s%%" % (title),) if isinstance(user, (str, unicode)): where_cls += " AND vs.uname like ? COLLATE NOCASE " binds += (user,) if isinstance(blockedFlags, (int, long)): where_cls += " AND v.flags & ? = 0 " binds += (blockedFlags,) if isinstance(blockedSites, (list, tuple)): sites_cls = " AND v.type NOT IN (" flg = False for b in blockedSites: if isinstance(b, (str, unicode)) and len(b) == 2: if flg: sites_cls += "," sites_cls += "?" binds += (b,) flg = True if flg: where_cls += sites_cls + ") " sql = sel_cls + from_cls + where_cls def matchOrderBy(this, other): valid = this == other if not valid: valid = (len(this) == 2) and (len(other) == 2) for i in range(len(this)): valid = valid and (this[i].lower() == other[1].lower()) return valid valid = this and other and (this[0].lower() != other[0].lower()) if valid and (len(this) == 2) and this[1] and other[1]: return valid and (this[1].lower() == other[1].lower()) else: return valid and (this[1] == other[1]) if orderby is None: pass elif matchOrderBy(orderby, ('id', 'ASC')): sql += ' ORDER BY v.id ASC' elif matchOrderBy(orderby, ('id', 'DESC')): sql += ' ORDER BY v.id DESC' elif matchOrderBy(orderby, ('RANDOM()',)): sql += ' ORDER BY RANDOM()' else: raise ProgrammingError("Invalid orderby %s" % (orderby)) if isinstance(num, (int, long)): sql += ' LIMIT ?' binds += (num,) elif num != None: raise ProgrammingError("Invalid num %s" % (num)) self.logger.debug("Generated SQL %s" % (sql)) with self.execute(sql, binds) as cur: return cur.fetchall() def insertChat(self, msg, username, userid=None, timestamp=None, protocol='ST', channel=None, flags=None): if userid is None: userid = username if timestamp is None: timestamp = int(time.time() * 1000) chat = (timestamp, username, userid, msg, protocol, channel, flags) with self.cursor() as cur: self.logger.debug("Inserting chat message %s" % (chat,)) cur.execute("INSERT INTO chat VALUES(?, ?, ?, ?, ?, ?, ?)", chat) self.commit() def getQuote(self, nick, excludes=[], protocol=None): select_cls = "SELECT username, msg, timestamp, protocol FROM chat " where_cls = " WHERE msg NOT LIKE '/me%%' AND msg NOT LIKE '$%%' " limit_cls = " ORDER BY RANDOM() LIMIT 1" binds = () if protocol: where_cls += " AND protocol = ? " binds = (protocol,) if nick: where_cls += " AND username = ? COLLATE NOCASE " binds += (nick,) else: for e in excludes: where_cls += " AND (username != ? or protocol != ?) " binds += e sql = select_cls + where_cls + limit_cls rows = self.fetch(sql, binds) if rows: return rows[0] else: return None def flagVideo(self, site, vid, flags): self.logger.debug("Flagging %s:%s with flags %s", site, vid, bin(flags)) self.executeDML("UPDATE videos SET flags=(flags | ?) WHERE type = ? AND id = ?", (flags, site, vid)) self.commit() def unflagVideo(self, site, vid, flags): self.executeDML("UPDATE videos SET flags=(flags & ?) WHERE type = ? AND id = ?", (~flags, site, vid)) self.commit()
BSD 2-Clause Simplified License
kriaga/health-checker
HealthChecker/venv/Lib/site-packages/nltk/app/wordnet_app.py
page_from_reference
python
def page_from_reference(href): word = href.word pos_forms = defaultdict(list) words = word.split(',') words = [w for w in [w.strip().lower().replace(' ', '_') for w in words] if w != ""] if len(words) == 0: return "", "Please specify a word to search for." for w in words: for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]: form = wn.morphy(w, pos) if form and form not in pos_forms[pos]: pos_forms[pos].append(form) body = '' for pos,pos_str,name in _pos_tuples(): if pos in pos_forms: body += _hlev(3, name) + '\n' for w in pos_forms[pos]: try: body += _collect_all_synsets(w, pos, href.synset_relations) except KeyError: pass if not body: body = "The word or words '%s' where not found in the dictonary." % word return body, word
Returns a tuple of the HTML page built and the new current word :param href: The hypertext reference to be solved :type href: str :return: A tuple (page,word), where page is the new current HTML page to be sent to the browser and word is the new current word :rtype: A tuple (str,str)
https://github.com/kriaga/health-checker/blob/3d9ce933f131bcbb897103b0f509cc45393cae4a/HealthChecker/venv/Lib/site-packages/nltk/app/wordnet_app.py#L723-L764
from __future__ import print_function from sys import path import os import sys from sys import argv from collections import defaultdict import webbrowser import datetime import re import threading import time import getopt import base64 import pickle import copy from six.moves.urllib.parse import unquote_plus from nltk import compat from nltk.corpus import wordnet as wn from nltk.corpus.reader.wordnet import Synset, Lemma if compat.PY3: from http.server import HTTPServer, BaseHTTPRequestHandler else: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler firstClient = True server_mode = None logfile = None class MyServerHandler(BaseHTTPRequestHandler): def do_HEAD(self): self.send_head() def do_GET(self): global firstClient sp = self.path[1:] if unquote_plus(sp) == 'SHUTDOWN THE SERVER': if server_mode: page = "Server must be killed with SIGTERM." type = "text/plain" else: print('Server shutting down!') os._exit(0) elif sp == '': type = 'text/html' if not server_mode and firstClient: firstClient = False page = get_static_index_page(True) else: page = get_static_index_page(False) word = 'green' elif sp.endswith('.html'): type = 'text/html' usp = unquote_plus(sp) if usp == 'NLTK Wordnet Browser Database Info.html': word = '* Database Info *' if os.path.isfile(usp): with open(usp, 'r') as infile: page = infile.read() else: page = (html_header % word) + '<p>The database info file:' '<p><b>' + usp + '</b>' + '<p>was not found. Run this:' + '<p><b>python dbinfo_html.py</b>' + '<p>to produce it.' + html_trailer else: word = sp page = get_static_page_by_path(usp) elif sp.startswith("search"): type = 'text/html' parts = (sp.split("?")[1]).split("&") word = [p.split("=")[1].replace("+", " ") for p in parts if p.startswith("nextWord")][0] page, word = page_from_word(word) elif sp.startswith("lookup_"): type = 'text/html' sp = sp[len("lookup_"):] page, word = page_from_href(sp) elif sp == "start_page": type = 'text/html' page, word = page_from_word("wordnet") else: type = 'text/plain' page = "Could not parse request: '%s'" % sp self.send_head(type) self.wfile.write(page.encode('utf8')) def send_head(self, type=None): self.send_response(200) self.send_header('Content-type', type) self.end_headers() def log_message(self, format, *args): global logfile if logfile: logfile.write( "%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args)) def get_unique_counter_from_url(sp): pos = sp.rfind('%23') if pos != -1: return int(sp[(pos + 3):]) else: return None def wnb(port=8000, runBrowser=True, logfilename=None): global server_mode, logfile server_mode = not runBrowser if logfilename: try: logfile = open(logfilename, "a", 1) except IOError as e: sys.stderr.write("Couldn't open %s for writing: %s", logfilename, e) sys.exit(1) else: logfile = None url = 'http://localhost:' + str(port) server_ready = None browser_thread = None if runBrowser: server_ready = threading.Event() browser_thread = startBrowser(url, server_ready) server = HTTPServer(('', port), MyServerHandler) if logfile: logfile.write( 'NLTK Wordnet browser server running serving: %s\n' % url) if runBrowser: server_ready.set() try: server.serve_forever() except KeyboardInterrupt: pass if runBrowser: browser_thread.join() if logfile: logfile.close() def startBrowser(url, server_ready): def run(): server_ready.wait() time.sleep(1) webbrowser.open(url, new = 2, autoraise = 1) t = threading.Thread(target=run) t.start() return t def _pos_tuples(): return [ (wn.NOUN,'N','noun'), (wn.VERB,'V','verb'), (wn.ADJ,'J','adj'), (wn.ADV,'R','adv')] def _pos_match(pos_tuple): if pos_tuple[0] == 's': pos_tuple = ('a', pos_tuple[1], pos_tuple[2]) for n,x in enumerate(pos_tuple): if x is not None: break for pt in _pos_tuples(): if pt[n] == pos_tuple[n]: return pt return None HYPONYM = 0 HYPERNYM = 1 CLASS_REGIONAL = 2 PART_HOLONYM = 3 PART_MERONYM = 4 ATTRIBUTE = 5 SUBSTANCE_HOLONYM = 6 SUBSTANCE_MERONYM = 7 MEMBER_HOLONYM = 8 MEMBER_MERONYM = 9 VERB_GROUP = 10 INSTANCE_HYPONYM = 12 INSTANCE_HYPERNYM = 13 CAUSE = 14 ALSO_SEE = 15 SIMILAR = 16 ENTAILMENT = 17 ANTONYM = 18 FRAMES = 19 PERTAINYM = 20 CLASS_CATEGORY = 21 CLASS_USAGE = 22 CLASS_REGIONAL = 23 CLASS_USAGE = 24 CLASS_CATEGORY = 11 DERIVATIONALLY_RELATED_FORM = 25 INDIRECT_HYPERNYMS = 26 def lemma_property(word, synset, func): def flattern(l): if l == []: return [] else: return l[0] + flattern(l[1:]) return flattern([func(l) for l in synset.lemmas if l.name == word]) def rebuild_tree(orig_tree): node = orig_tree[0] children = orig_tree[1:] return (node, [rebuild_tree(t) for t in children]) def get_relations_data(word, synset): if synset.pos() == wn.NOUN: return ((HYPONYM, 'Hyponyms', synset.hyponyms()), (INSTANCE_HYPONYM , 'Instance hyponyms', synset.instance_hyponyms()), (HYPERNYM, 'Direct hypernyms', synset.hypernyms()), (INDIRECT_HYPERNYMS, 'Indirect hypernyms', rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]), (INSTANCE_HYPERNYM , 'Instance hypernyms', synset.instance_hypernyms()), (PART_HOLONYM, 'Part holonyms', synset.part_holonyms()), (PART_MERONYM, 'Part meronyms', synset.part_meronyms()), (SUBSTANCE_HOLONYM, 'Substance holonyms', synset.substance_holonyms()), (SUBSTANCE_MERONYM, 'Substance meronyms', synset.substance_meronyms()), (MEMBER_HOLONYM, 'Member holonyms', synset.member_holonyms()), (MEMBER_MERONYM, 'Member meronyms', synset.member_meronyms()), (ATTRIBUTE, 'Attributes', synset.attributes()), (ANTONYM, "Antonyms", lemma_property(word, synset, lambda l: l.antonyms())), (DERIVATIONALLY_RELATED_FORM, "Derivationally related form", lemma_property(word, synset, lambda l: l.derivationally_related_forms()))) elif synset.pos() == wn.VERB: return ((ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())), (HYPONYM, 'Hyponym', synset.hyponyms()), (HYPERNYM, 'Direct hypernyms', synset.hypernyms()), (INDIRECT_HYPERNYMS, 'Indirect hypernyms', rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]), (ENTAILMENT, 'Entailments', synset.entailments()), (CAUSE, 'Causes', synset.causes()), (ALSO_SEE, 'Also see', synset.also_sees()), (VERB_GROUP, 'Verb Groups', synset.verb_groups()), (DERIVATIONALLY_RELATED_FORM, "Derivationally related form", lemma_property(word, synset, lambda l: l.derivationally_related_forms()))) elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT: return ((ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())), (SIMILAR, 'Similar to', synset.similar_tos()), (PERTAINYM, 'Pertainyms', lemma_property(word, synset, lambda l: l.pertainyms())), (ATTRIBUTE, 'Attributes', synset.attributes()), (ALSO_SEE, 'Also see', synset.also_sees())) elif synset.pos() == wn.ADV: return ((ANTONYM, 'Antonym', lemma_property(word, synset, lambda l: l.antonyms())),) else: raise TypeError("Unhandles synset POS type: " + str(synset.pos())) html_header = ''' <!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN' 'http://www.w3.org/TR/html4/strict.dtd'> <html> <head> <meta name='generator' content= 'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'> <meta http-equiv='Content-Type' content= 'text/html; charset=us-ascii'> <title>NLTK Wordnet Browser display of: %s</title></head> <body bgcolor='#F5F5F5' text='#000000'> ''' html_trailer = ''' </body> </html> ''' explanation = ''' <h3>Search Help</h3> <ul><li>The display below the line is an example of the output the browser shows you when you enter a search word. The search word was <b>green</b>.</li> <li>The search result shows for different parts of speech the <b>synsets</b> i.e. different meanings for the word.</li> <li>All underlined texts are hypertext links. There are two types of links: word links and others. Clicking a word link carries out a search for the word in the Wordnet database.</li> <li>Clicking a link of the other type opens a display section of data attached to that link. Clicking that link a second time closes the section again.</li> <li>Clicking <u>S:</u> opens a section showing the relations for that synset. </li> <li>Clicking on a relation name opens a section that displays the associated synsets.</li> <li>Type a search word in the <b>Word</b> field and start the search by the <b>Enter/Return</b> key or click the <b>Search</b> button.</li> </ul> <hr width='100%'> ''' def _bold(txt): return '<b>%s</b>' % txt def _center(txt): return '<center>%s</center>' % txt def _hlev(n,txt): return '<h%d>%s</h%d>' % (n,txt,n) def _italic(txt): return '<i>%s</i>' % txt def _li(txt): return '<li>%s</li>' % txt def pg(word, body): return (html_header % word) + body + html_trailer def _ul(txt): return '<ul>' + txt + '</ul>' def _abbc(txt): return _center(_bold('<br>'*10 + '*'*10 + ' ' + txt + ' ' + '*'*10)) full_hyponym_cont_text = _ul(_li(_italic('(has full hyponym continuation)'))) + '\n' def _get_synset(synset_key): return wn.synset(synset_key) def _collect_one_synset(word, synset, synset_relations): if isinstance(synset, tuple): raise NotImplementedError("word not supported by _collect_one_synset") typ = 'S' pos_tuple = _pos_match((synset.pos(), None, None)) assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos() descr = pos_tuple[2] ref = copy.deepcopy(Reference(word, synset_relations)) ref.toggle_synset(synset) synset_label = typ + ";" if synset.name() in synset_relations: synset_label = _bold(synset_label) s = '<li>%s (%s) ' % (make_lookup_link(ref, synset_label), descr) def format_lemma(w): w = w.replace('_', ' ') if w.lower() == word: return _bold(w) else: ref = Reference(w) return make_lookup_link(ref, w) s += ', '.join(format_lemma(l.name()) for l in synset.lemmas()) gl = " (%s) <i>%s</i> " % (synset.definition(), "; ".join("\"%s\"" % e for e in synset.examples())) return s + gl + _synset_relations(word, synset, synset_relations) + '</li>\n' def _collect_all_synsets(word, pos, synset_relations=dict()): return '<ul>%s\n</ul>\n' % ''.join((_collect_one_synset(word, synset, synset_relations) for synset in wn.synsets(word, pos))) def _synset_relations(word, synset, synset_relations): if not synset.name() in synset_relations: return "" ref = Reference(word, synset_relations) def relation_html(r): if isinstance(r, Synset): return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0]) elif isinstance(r, Lemma): return relation_html(r.synset()) elif isinstance(r, tuple): return "%s\n<ul>%s</ul>\n" % (relation_html(r[0]), ''.join('<li>%s</li>\n' % relation_html(sr) for sr in r[1])) else: raise TypeError("r must be a synset, lemma or list, it was: type(r) = %s, r = %s" % (type(r), r)) def make_synset_html(db_name, disp_name, rels): synset_html = '<i>%s</i>\n' % make_lookup_link( copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(), disp_name) if db_name in ref.synset_relations[synset.name()]: synset_html += '<ul>%s</ul>\n' % ''.join("<li>%s</li>\n" % relation_html(r) for r in rels) return synset_html html = '<ul>' + '\n'.join(("<li>%s</li>" % make_synset_html(*rel_data) for rel_data in get_relations_data(word, synset) if rel_data[2] != [])) + '</ul>' return html class Reference(object): def __init__(self, word, synset_relations=dict()): self.word = word self.synset_relations = synset_relations def encode(self): string = pickle.dumps((self.word, self.synset_relations), -1) return base64.urlsafe_b64encode(string).decode() @staticmethod def decode(string): string = base64.urlsafe_b64decode(string.encode()) word, synset_relations = pickle.loads(string) return Reference(word, synset_relations) def toggle_synset_relation(self, synset, relation): if relation in self.synset_relations[synset.name()]: self.synset_relations[synset.name()].remove(relation) else: self.synset_relations[synset.name()].add(relation) return self def toggle_synset(self, synset): if synset.name() in self.synset_relations: del self.synset_relations[synset.name()] else: self.synset_relations[synset.name()] = set() return self def make_lookup_link(ref, label): return '<a href="lookup_%s">%s</a>' % (ref.encode(), label) def page_from_word(word): return page_from_reference(Reference(word)) def page_from_href(href): return page_from_reference(Reference.decode(href))
MIT License
centerforthebuiltenvironment/clima
my_project/tab_wind/app_wind.py
sliders
python
def sliders(): return html.Div( className="container-col justify-center", id="slider-container", children=[ html.Div( className="container-row each-slider", children=[ html.P("Month Range"), dcc.RangeSlider( id="month-slider", min=1, max=12, step=1, value=[1, 12], marks={1: "1", 12: "12"}, tooltip={"always_visible": False, "placement": "top"}, allowCross=False, ), ], ), html.Div( className="container-row each-slider", children=[ html.P("Hour Range"), dcc.RangeSlider( id="hour-slider", min=1, max=24, step=1, value=[1, 24], marks={1: "1", 24: "24"}, tooltip={"always_visible": False, "placement": "topLeft"}, allowCross=False, ), ], ), ], )
Returns 2 sliders for the hour
https://github.com/centerforthebuiltenvironment/clima/blob/b3bec2839aed4a3766dd31d9369817073ac465cf/my_project/tab_wind/app_wind.py#L11-L50
from dash import dcc, html from my_project.global_scheme import month_lst, container_row_center_full from dash.dependencies import Input, Output, State from my_project.template_graphs import heatmap, wind_rose from my_project.utils import title_with_tooltip, generate_chart_name from my_project.utils import code_timer from app import app, cache, TIMEOUT
MIT License
mgraffg/evodag
EvoDAG/base.py
EvoDAG.generations
python
def generations(self): return self._generations
Number of generations
https://github.com/mgraffg/evodag/blob/d553444d6505a3885760250955f47383c064f6d8/EvoDAG/base.py#L182-L184
import numpy as np import logging from SparseArray import SparseArray from .node import Variable from .node import Add, Mul, Div, Fabs, Exp, Sqrt, Sin, Cos, Log1p from .node import Sq, Min, Max from .node import Atan2, Hypot, Acos, Asin, Atan, Tan, Cosh, Sinh from .node import Tanh, Acosh, Asinh, Atanh, Expm1, Log, Log2, Log10 from .node import Lgamma, Sign, Ceil, Floor, NaiveBayes, NaiveBayesMN from .node import Centroid from .model import Model from .population import SteadyState from .utils import tonparray from .function_selection import FunctionSelection from .naive_bayes import NaiveBayes as NB from .bagging_fitness import BaggingFitness import time import importlib import inspect class EvoDAG(object): def __init__(self, generations=np.inf, popsize=10000, seed=0, tournament_size=2, early_stopping_rounds=-1, function_set=[Add, Mul, Div, Fabs, Exp, Sqrt, Sin, Cos, Log1p, Sq, Min, Max, Atan2, Hypot, Acos, Asin, Atan, Tan, Cosh, Sinh, Tanh, Acosh, Asinh, Atanh, Expm1, Log, Log2, Log10, Lgamma, Sign, Ceil, Floor, NaiveBayes, NaiveBayesMN, Centroid], tr_fraction=0.5, population_class=SteadyState, negative_selection=True, number_tries_feasible_ind=30, time_limit=None, unique_individuals=True, classifier=True, labels=None, all_inputs=False, random_generations=0, fitness_function='BER', orthogonal_selection=False, orthogonal_dot_selection=False, min_density=0.8, multiple_outputs=False, function_selection=True, fs_tournament_size=2, finite=True, pr_variable=0.33, share_inputs=False, input_functions=None, F1_index=-1, use_all_vars_input_functions=False, remove_raw_inputs=True, probability_calibration=None, **kwargs): self._remove_raw_inputs = remove_raw_inputs self._fitness_function = fitness_function self._bagging_fitness = BaggingFitness(base=self) generations = np.inf if generations is None else generations self._pr_variable = pr_variable self._share_inputs = share_inputs self._finite = finite self._generations = generations self._popsize = popsize self._classifier = classifier self._number_tries_feasible_ind = number_tries_feasible_ind self._unfeasible_counter = 0 self._negative_selection = negative_selection self._number_tries_unique_args = 3 self._tr_fraction = tr_fraction if early_stopping_rounds is not None and early_stopping_rounds < 0: early_stopping_rounds = popsize self._early_stopping_rounds = early_stopping_rounds self._tournament_size = tournament_size self._seed = seed self._labels = labels self._multiclass = False self._function_set = function_set self._function_selection = function_selection self._fs_tournament_size = fs_tournament_size density_safe = [k for k, v in enumerate(function_set) if v.density_safe] self._function_selection_ins = FunctionSelection(nfunctions=len(self._function_set), seed=seed, tournament_size=self._fs_tournament_size, nargs=map(lambda x: x.nargs, function_set), density_safe=density_safe) self._min_density = min_density self._function_selection_ins.min_density = self._min_density self._time_limit = time_limit self._init_time = time.time() self._random_generations = random_generations if not inspect.isclass(population_class): pop = importlib.import_module('EvoDAG.population') population_class = getattr(pop, population_class) self._set_input_functions(input_functions) self._population_class = population_class np.random.seed(self._seed) self._unique_individuals = unique_individuals self._unique_individuals_set = set() self._logger = logging.getLogger('EvoDAG') self._all_inputs = all_inputs if self._generations == np.inf and tr_fraction == 1: raise RuntimeError("Infinite evolution, set generations\ or tr_fraction < 1 ") self._multiple_outputs = multiple_outputs self._F1_index = F1_index self._use_all_vars_input_functions = use_all_vars_input_functions self._probability_calibration = probability_calibration self._orthogonal_selection = orthogonal_selection self._orthogonal_dot_selection = orthogonal_dot_selection self._extras = kwargs if self._time_limit is not None: self._logger.info('Time limit: %0.2f' % self._time_limit) def _set_input_functions(self, input_functions): if input_functions is not None: if not isinstance(input_functions, list): input_functions = [input_functions] r = [] for f in input_functions: if not inspect.isclass(f): _ = importlib.import_module('EvoDAG.node') f = getattr(_, f) r.append(f) else: r.append(f) self._input_functions = r else: self._input_functions = input_functions def get_params(self): import inspect a = inspect.getargspec(self.__init__)[0] out = dict() for key in a[1:]: value = getattr(self, "_%s" % key, None) out[key] = value return out def clone(self): return self.__class__(**self.get_params()) @property def classifier(self): return self._classifier @property def signature(self): kw = self.get_params() keys = sorted(kw.keys()) l = [] for k in keys: n = k[0] + k[-1] v = kw[k] if k == 'function_set': v = "_".join([x.__name__[0] + x.__name__[-1] + str(x.nargs) for x in kw[k]]) elif k == 'population_class': v = kw[k].__name__ else: v = str(v) l.append('{0}_{1}'.format(n, v)) return '-'.join(l) @property def popsize(self): return self._popsize @property
Apache License 2.0
apiad/sublime-browser-integration
selenium/webdriver/phantomjs/service.py
Service.__init__
python
def __init__(self, executable_path, port=0, service_args=None, log_path=None): self.port = port self.path = executable_path self.service_args= service_args if self.port == 0: self.port = utils.free_port() if self.service_args is None: self.service_args = [] self.service_args.insert(0, self.path) self.service_args.append("--webdriver=%d" % self.port) if not log_path: log_path = "ghostdriver.log" self._log = open(log_path, 'w')
Creates a new instance of the Service :Args: - executable_path : Path to PhantomJS binary - port : Port the service is running on - service_args : A List of other command line options to pass to PhantomJS - log_path: Path for PhantomJS service to log to
https://github.com/apiad/sublime-browser-integration/blob/3914a8cd80ceabe58593a4123dd9e9493c5d5ebd/selenium/webdriver/phantomjs/service.py#L28-L50
import subprocess import time import signal from selenium.common.exceptions import WebDriverException from selenium.webdriver.common import utils class Service(object):
MIT License
dcoles/pycurl-requests
pycurl_requests/_pycurl.py
debug_function
python
def debug_function(infotype: int, message: bytes): if infotype > CURLINFO_HEADER_OUT: return message = message.decode('utf-8', 'replace') if infotype == CURLINFO_TEXT: LOGGER_TEXT.debug(message.rstrip()) elif infotype == CURLINFO_HEADER_IN: for line in message.splitlines(): LOGGER_HEADER_IN.debug(line) elif infotype == CURLINFO_HEADER_OUT: for line in message.splitlines(): LOGGER_HEADER_OUT.debug(line)
cURL `DEBUGFUNCTION` that writes to logger
https://github.com/dcoles/pycurl-requests/blob/4365c7797d7897a655e9d7f91081dc7e303d230a/pycurl_requests/_pycurl.py#L195-L210
import datetime import http.client import io from io import BytesIO import logging import pycurl from pycurl_requests import exceptions from pycurl_requests import models from pycurl_requests import structures try: from urllib3.util.timeout import Timeout except ImportError: Timeout = None CURLINFO_TEXT = 0 CURLINFO_HEADER_IN = 1 CURLINFO_HEADER_OUT = 2 LOGGER = logging.getLogger('curl') LOGGER_TEXT = LOGGER.getChild('text') LOGGER_HEADER_IN = LOGGER.getChild('header_in') LOGGER_HEADER_OUT = LOGGER.getChild('header_out') DEBUGFUNCTION_LOGGERS = {LOGGER_TEXT, LOGGER_HEADER_IN, LOGGER_HEADER_OUT} VERSION_INFO = pycurl.version_info() class Request: def __init__(self, prepared, *, curl=None, timeout=None, allow_redirects=True, max_redirects=-1): self.prepared = prepared self.curl = curl or pycurl.Curl() self.timeout = timeout self.allow_redirects = allow_redirects self.max_redirects = max_redirects if timeout is not None: if isinstance(timeout, (int, float)): self.connect_timeout, self.read_timeout = timeout, timeout elif Timeout and isinstance(timeout, Timeout): timeout.start_connect() self.connect_timeout = (0 if timeout.connect_timeout is Timeout.DEFAULT_TIMEOUT else timeout.connect_timeout) self.read_timeout = (0 if timeout.read_timeout is Timeout.DEFAULT_TIMEOUT else timeout.read_timeout) else: self.connect_timeout, self.read_timeout = timeout else: self.connect_timeout, self.read_timeout = (None, None) self.response_buffer = BytesIO() self.reason = None self.headers = http.client.HTTPMessage() self.reset_headers = False def header_function(self, line: bytes): if self.reset_headers: self.headers = http.client.HTTPMessage() self.reset_headers = False try: line = line.decode('utf-8') except UnicodeDecodeError: line = line.decode('iso-8859-1') if self.reason is None: _, _, reason = line.split(' ', 2) self.reason = reason.strip() return if line == '\r\n': self.reset_headers = True return elif ':' not in line: return name, value = line.split(':', 1) self.headers.add_header(name, value.strip()) def send(self): try: scheme, _ = self.prepared.url.split(':', 1) except ValueError: raise exceptions.MissingSchema('Missing scheme for {!r}'.format(self.prepared.url)) supported_protocols = VERSION_INFO[8] if scheme.lower() not in supported_protocols: raise exceptions.InvalidSchema('Unsupported scheme for {!r}'.format(self.prepared.url)) self.curl.setopt(pycurl.URL, self.prepared.url) if self.prepared.method: self.curl.setopt(pycurl.CUSTOMREQUEST, self.prepared.method) if self.prepared.method == 'HEAD': self.curl.setopt(pycurl.NOBODY, 1) self._prepare_http_auth() self.curl.setopt(pycurl.HTTPHEADER, ['{}: {}'.format(n, v) for n, v in self.prepared.headers.items()]) if self.prepared.body is not None: if isinstance(self.prepared.body, str): body = io.BytesIO(self.prepared.body.encode('iso-8859-1')) elif isinstance(self.prepared.body, bytes): body = io.BytesIO(self.prepared.body) else: body = self.prepared.body self.curl.setopt(pycurl.UPLOAD, 1) self.curl.setopt(pycurl.READDATA, body) content_length = self.prepared.headers.get('Content-Length') if content_length is not None: self.curl.setopt(pycurl.INFILESIZE_LARGE, int(content_length)) self.curl.setopt(pycurl.HEADERFUNCTION, self.header_function) self.curl.setopt(pycurl.WRITEDATA, self.response_buffer) if self.connect_timeout is not None: timeout = int(self.connect_timeout * 1000) self.curl.setopt(pycurl.CONNECTTIMEOUT_MS, timeout) if self.read_timeout is not None: timeout = int(self.read_timeout * 1000) self.curl.setopt(pycurl.TIMEOUT_MS, timeout) if self.allow_redirects: self.curl.setopt(pycurl.FOLLOWLOCATION, 1) self.curl.setopt(pycurl.POSTREDIR, pycurl.REDIR_POST_ALL) self.curl.setopt(pycurl.MAXREDIRS, self.max_redirects) if any((l.isEnabledFor(logging.DEBUG) for l in DEBUGFUNCTION_LOGGERS)): self.curl.setopt(pycurl.VERBOSE, 1) self.curl.setopt(pycurl.DEBUGFUNCTION, debug_function) return self.perform() def _prepare_http_auth(self): if not self.prepared.curl_auth: return self.prepared.curl_auth.setopts(self.curl) def perform(self): try: start_time = datetime.datetime.now(tz=datetime.timezone.utc) try: self.curl.perform() finally: end_time = datetime.datetime.now(tz=datetime.timezone.utc) self.prepared.url = self.curl.getinfo(pycurl.EFFECTIVE_URL) self.response_buffer.seek(0) response = self.build_response(elapsed=end_time - start_time) except pycurl.error as e: raise exceptions.RequestException.from_pycurl_error( e, request=self.prepared, response=response) from e return response def build_response(self, elapsed=None): status_code = self.curl.getinfo(pycurl.RESPONSE_CODE) if not status_code: return None response = models.Response() response.request = self.prepared response.elapsed = elapsed response.status_code = status_code response.reason = self.reason response.headers = structures.CaseInsensitiveDict( ((k, ', '.join(self.headers.get_all(k))) for k in self.headers.keys())) response.encoding = self.headers.get_content_charset() response.url = self.prepared.url response.raw = self.response_buffer return response
MIT License
mikeiacovacci/axiom-framework
lib/classes.py
AxiomTool.resolve_command
python
def resolve_command(self, number): if number >= 0 and number in range(self.combined_list.__len__()): command_name = self.combined_list[number] return self.resolve_command_name(command_name) else: return None, int(-1)
SUMMARY: determines the object's type (command or action) and finds its ID value INPUT: command/action ID number integer OUTPUT: two-item tuple containing 1) "command", "action", or None and 2) ID value, -1 if unresolved
https://github.com/mikeiacovacci/axiom-framework/blob/2edc8bb1a123eb3c67897b0742050ee6956058bf/lib/classes.py#L953-L962
import lib.config as config from lib.config import print_error from os import devnull, path from pexpect import exceptions, pty_spawn from prompt_toolkit import prompt, PromptSession from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.history import FileHistory from queue import Queue from re import search from shlex import split from subprocess import call, PIPE, Popen, STDOUT from threading import Event from time import sleep class AxiomAction: def __init__(self, name, prompt_type, execution_type, text, output_list, note): self.execution_type = execution_type self.name = name self.note = note self.output_list = output_list self.prompt_type = prompt_type self.text = text def cli_print(self): print() if isinstance(self.text, str): print(self.text) elif isinstance(self.text, list): line = 0 while line < self.text.__len__(): print(self.text[line]) line += 1 print() def confirm_and_execute(self, tool): self.show() response = input("\n[AXIOM] Execute? [Y/n] ") if response not in ["Y", "y", "Yes", "yes"]: return False else: self.run(tool) return True def existing_subprocess(self): i = 0 while i < dispatch.subprocesses.__len__(): if self.prompt_type == dispatch.subprocesses[i].current_prompt: return True i += 1 return False def extract_ending_prompt(self): ending_prompt = str() if self.execution_type != "interactive": return False for x in self.output_list: if isinstance(x, tuple): if x[0] == "PROMPT": ending_prompt = x[1] break return ending_prompt def print_text(self): if isinstance(self.text, str): print("\n TEXT: " + self.text) elif isinstance(self.text, list): print("\n TEXT: ", end="") print(self.text[0]) line = 1 while line < self.text.__len__(): print(" " + self.text[line]) line += 1 def run(self, tool): if self.prompt_type == "bash" and not self.existing_subprocess(): if not tool.platform_matches(): print_error(str("\nERROR: Cannot execute " + tool.name + " (" + tool.platform + ") on " + config.axiom.platform)) dispatch.continue_trigger.set() return if tool.is_installed(): pass else: if tool.install(): self.show() print() else: if tool.proceed_despite_uninstalled(): pass else: dispatch.continue_trigger.set() return elif self.prompt_type != "other" and not self.existing_subprocess(): print_error("\nERROR: Prompt type incompatible with current runtime") dispatch.continue_trigger.set() return multiple_lines = False if isinstance(self, AxiomCommand): if isinstance(self.text[0], list): multiple_lines = True elif isinstance(self, AxiomAction): if isinstance(self.text, list): multiple_lines = True if self.execution_type == "standalone": if multiple_lines: self.run_multiline_standalone() else: self.run_standalone() elif self.execution_type == "autonomous": if multiple_lines: print_error("ERROR: Autonomous multi-line commands are unsupported") else: self.run_autonomous() elif self.execution_type == "interactive": self.run_interactive() elif self.execution_type == "NX": if multiple_lines: self.run_multiline_nx() else: self.run_nx() def run_autonomous(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(self.text, shell=True) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_interactive(self): ending_prompt = self.extract_ending_prompt() if ending_prompt is not False: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, ending_prompt)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_multiline_nx(self): print() line = 0 while line < self.text.__len__(): print(self.text[line]) line += 1 dispatch.continue_trigger.set() def run_multiline_standalone(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() proc = Popen(["bash", "-i"], shell=True, stdin=PIPE, stdout=PIPE) i = 0 while proc.returncode is None: if i < self.text.__len__(): proc.stdin.write(self.text[i].encode()) proc.stdin.write("\n".encode()) proc.stdin.flush() i += 1 else: proc.stdin.close() proc.poll() except OSError: print_error("ERROR: Failed to execute via Popen()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_nx(self): print() print(self.text) print() dispatch.continue_trigger.set() def run_standalone(self): if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(split(self.text)) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(self.text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def show(self): print("\n NAME: " + self.name + "\n TYPE: " + self.execution_type + " action (" + self.prompt_type + ")" "\n NOTE: " + self.note) self.print_text() class AxiomCommand(AxiomAction): def __init__(self, name, prompt_type, execution_type, text, output_list, note, input_list): super().__init__(name, prompt_type, execution_type, text, output_list, note) self.input_list = input_list def build(self): input_count = 0 if isinstance(self.text[0], str): token_count = 0 built_text = str() while token_count < self.text.__len__() or input_count < self.input_list.__len__(): if token_count < self.text.__len__(): built_text += self.text[token_count] token_count += 1 if input_count < self.input_list.__len__(): built_text += self.input_build_prompt(input_count) input_count += 1 else: built_text = [] current_line = 0 while current_line < self.text.__len__(): line_tokens = self.text[current_line].__len__() current_token = 0 line_inputs = line_tokens - 1 current_input = 0 built_line = str() while current_token < line_tokens or current_input < line_inputs: if current_token < line_tokens: built_line += self.text[current_line][current_token] current_token += 1 if current_input < line_inputs: built_line += self.input_build_prompt(input_count) current_input += 1 input_count += 1 built_text.append(built_line) current_line += 1 return built_text def build_with_placeholders(self): input_count = 0 if isinstance(self.text[0], str): token_count = 0 built_text = str() while token_count < self.text.__len__() or input_count < self.input_list.__len__(): if token_count < self.text.__len__(): built_text += self.text[token_count] token_count += 1 if input_count < self.input_list.__len__(): built_text += str("{" + self.input_list[input_count][1] + "}") input_count += 1 else: built_text = [] current_line = 0 while current_line < self.text.__len__(): line_tokens = self.text[current_line].__len__() current_token = 0 line_inputs = line_tokens - 1 current_input = 0 built_line = str() while current_token < line_tokens or current_input < line_inputs: if current_token < line_tokens: built_line += self.text[current_line][current_token] current_token += 1 if current_input < line_inputs: built_line += str("{" + self.input_list[input_count][1] + "}") current_input += 1 input_count += 1 built_text.append(built_line) current_line += 1 return built_text def cli_print(self): text = self.build() print() if isinstance(text, str): print(text) elif isinstance(text, list): line = 0 while line < text.__len__(): print(text[line]) line += 1 print() def input_build_prompt(self, input_count): input_type = self.input_list[input_count][1] prompt_text = str("[AXIOM] Enter " + self.input_list[input_count][0] + ": ") if input_type in ["STRMENU", "INTMENU"]: option_name = self.input_list[input_count][0] option_list = self.input_list[input_count][2] response = self.option_prompt(option_name, option_list) return response elif input_type in ["STR", "INT", "IPV4", "IPV6", "IPV4RNGE", "IPV6RNGE", "IPV4CIDR", "IPV6CIDR", "MAC", "FILE", "RLATVPTH", "FULLPATH", "DOMAIN", "HTTPURL", "HTTPSURL", "WEBURL"]: if input_type == "HTTPSURL": history_file = str(config.axiom.history_folder + "/WEBURL" + ".axiom") else: history_file = str(config.axiom.history_folder + "/" + input_type + ".axiom") session = PromptSession(history=FileHistory(history_file)) response = session.prompt(prompt_text, auto_suggest=AutoSuggestFromHistory()) return response else: response = prompt(prompt_text) return response @staticmethod def option_prompt(option_name, option_list): while True: print("\n" + option_name + "\n") count = 0 while count < option_list.__len__(): print(" " + str(count + 1) + "\t" + str(option_list[count])) count += 1 number = prompt("\n[AXIOM] Select an option: ") try: number = int(number) number -= 1 except (ValueError, TypeError): number = -1 if 0 <= number < option_list.__len__(): return option_list[number] def print_text(self): text_with_placeholders = self.build_with_placeholders() if isinstance(text_with_placeholders, str): print("\n TEXT: " + text_with_placeholders) elif isinstance(text_with_placeholders, list): print("\n TEXT: ", end="") print(text_with_placeholders[0]) line = 1 while line < text_with_placeholders.__len__(): print(" " + text_with_placeholders[line]) line += 1 def run_autonomous(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(text, shell=True) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_interactive(self): text = self.build() ending_prompt = self.extract_ending_prompt() if ending_prompt is not False: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, ending_prompt)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_multiline_nx(self): text = self.build() print() line = 0 while line < self.text.__len__(): print(text[line]) line += 1 dispatch.continue_trigger.set() def run_multiline_standalone(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() proc = Popen(["bash", "-i"], shell=True, stdin=PIPE, stdout=PIPE) i = 0 while proc.returncode is None: if i < text.__len__(): proc.stdin.write(text[i].encode()) proc.stdin.write("\n".encode()) proc.stdin.flush() i += 1 else: proc.stdin.close() proc.poll() except OSError: print_error("ERROR: Failed to execute via Popen()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def run_nx(self): text = self.build() print() print(text) print() dispatch.continue_trigger.set() def run_standalone(self): text = self.build() if self.prompt_type == "bash" and not self.existing_subprocess(): try: print() call(split(text)) except OSError: print_error("ERROR: Failed to execute via call()") else: dispatch.tasking.put(AxiomInteractiveTask(text, self.prompt_type, self.prompt_type)) dispatch.monitor_task_queue() dispatch.continue_trigger.set() def show(self): print("\n NAME: " + self.name + "\n TYPE: " + self.execution_type + " command (" + self.prompt_type + ")" "\n NOTE: " + self.note) self.print_text() class AxiomDispatcher: def __init__(self): self.continue_trigger = Event() self.subprocesses = [] self.tasking = Queue(maxsize=0) self.trigger = Event() def check_for_ambiguous_target(self, current_task): prompt_type = current_task.ending_prompt for x in self.subprocesses: if x.current_prompt == prompt_type: return True return False @staticmethod def get_subprocess_output_detect_prompt(proc, pattern): timeout = 0 safety_timer = 0 while True: try: print(proc.readline().decode(), end='') except exceptions.TIMEOUT: if search(pattern, proc.before.decode()): if timeout >= config.axiom.pattern_timeout: print(proc.before.decode()) break else: timeout += 1 sleep(1) continue else: safety_timer += 1 sleep(1) if safety_timer >= config.axiom.safety_timeout: proc.sendline() continue else: timeout = 0 safety_timer = 0 def handle_new_tasks(self): if not self.tasking.empty(): current_task = self.tasking.get() if self.matching_subprocess(current_task) >= 0: target = self.matching_subprocess(current_task) if current_task.prompt_change: if self.check_for_ambiguous_target(current_task): print_error("\nERROR: Cannot create subprocess with same prompt type as existing subprocess") self.tasking.task_done() return self.read_and_transmit(target, current_task) self.tasking.task_done() return elif current_task.starting_prompt == "bash": if self.check_for_ambiguous_target(current_task): print_error("\nERROR: Cannot create subprocess with same prompt type as existing subprocess") self.tasking.task_done() return self.spawn_and_transmit(current_task) self.tasking.task_done() return else: print_error("\nERROR: Prompt type incompatible with current runtime") self.tasking.task_done() return def matching_subprocess(self, current_task): i = 0 while i < self.subprocesses.__len__(): if current_task.starting_prompt == self.subprocesses[i].current_prompt: return i else: i += 1 return -1 def monitor_task_queue(self): self.handle_new_tasks() def read_and_transmit(self, target, current_task): proc = self.subprocesses[target].process while True: try: print(proc.readline().decode(), end='') except exceptions.TIMEOUT: break self.transmit_text(current_task, proc) self.subprocesses[target].current_prompt = current_task.ending_prompt self.subprocesses[target].prompt_pattern = current_task.ending_prompt_pattern dispatch.continue_trigger.set() def spawn_and_transmit(self, current_task): try: self.subprocesses.append(AxiomExecutingSubprocess(current_task.starting_prompt, pty_spawn.spawn("/bin/bash -i", timeout=config.axiom.pty_timeout))) except OSError: print_error("ERROR: Failed to spawn /bin/bash subprocess") exit(1) else: target = self.matching_subprocess(current_task) proc = self.subprocesses[target].process self.transmit_text(current_task, proc) self.subprocesses[target].current_prompt = current_task.ending_prompt self.subprocesses[target].prompt_pattern = current_task.ending_prompt_pattern dispatch.continue_trigger.set() def transmit_text(self, current_task, proc): pattern = str(current_task.ending_prompt_pattern + "$") try: if isinstance(current_task.text, str): proc.sendline(current_task.text) elif isinstance(current_task.text, list): i = 0 while i < current_task.text.__len__(): proc.sendline(current_task.text[i]) i += 1 except OSError: print_error("ERROR: Failed to transmit command") exit(1) else: self.get_subprocess_output_detect_prompt(proc, pattern) class AxiomExecutingSubprocess: def __init__(self, current_prompt, process): self.current_prompt = current_prompt self.process = process self.prompt_pattern = None class AxiomInteractiveTask: def __init__(self, text, starting_prompt, ending_prompt): self.ending_prompt = ending_prompt self.starting_prompt = starting_prompt self.text = text self.prompt_change = self.detect_prompt_change() self.ending_prompt_pattern = self.resolve_ending_prompt_pattern() def detect_prompt_change(self): if self.starting_prompt == self.ending_prompt: return False else: return True def resolve_ending_prompt_pattern(self): if self.prompt_change: for x in config.axiom.prompts: if x[0] == self.ending_prompt: return x[1] else: for x in config.axiom.prompts: if x[0] == self.starting_prompt: return x[1] class AxiomToolkit: def __init__(self, name, location, tool_name_list): self.location = location self.name = name self.tool_name_list = tool_name_list class AxiomTool: def __init__(self, name, platform, ptf_module, description, action_list, command_list): self.action_list = action_list self.combined_list = [] self.command_list = command_list self.description = description self.name = name self.platform = platform self.ptf_module = ptf_module def initialize_combined_list(self): self.combined_list = [] x = 0 while x < self.action_list.__len__(): self.combined_list.append(self.action_list[x].name) x += 1 y = 0 while y < self.command_list.__len__(): self.combined_list.append(self.command_list[y].name) y += 1 self.combined_list = sorted(self.combined_list, key=str.casefold) def install(self): if self.ptf_module not in ["", None]: answer = input("[AXIOM] Install " + self.name + " via PTF? [Y/n] ") if answer not in ["Y", "y", "Yes", "yes"]: return False else: if config.axiom.platform.lower() != "linux": print_error(str("ERROR: Unable to run PTF on " + config.axiom.platform)) return False else: input_text = str("python3 ./ptf --no-network-connection << EOF\n" + str("use " + self.ptf_module + "\n") + "install\n" + "EOF\n") try: call(input_text, shell=True, cwd=config.axiom.ptf_folder) return True except OSError: print_error("ERROR: Failed to execute PTF") exit(1) else: return False def is_installed(self): ptf_config_file = str(config.axiom.ptf_folder + "/config/ptf.config") if self.ptf_module not in ["", None]: tool_module_file = str(config.axiom.ptf_folder + "/" + self.ptf_module + ".py") try: with open(ptf_config_file) as ptf_config: for line in enumerate(ptf_config): if search("^BASE_INSTALL_PATH=", line[1]): install_path = line[1].split("\"")[1] break except OSError: print_error(str("ERROR: Failed to extract PTF base install path from " + ptf_config_file)) exit(1) else: try: with open(tool_module_file) as module_file: for line in enumerate(module_file): if search("^INSTALL_LOCATION=", line[1]): location = line[1].split("\"")[1] break except OSError: print_error(str("ERROR: Failed to extract PTF install location from " + tool_module_file)) exit(1) else: folder = str(self.ptf_module.split("/")[1]) ptf_tool_folder = str(install_path + "/" + folder + "/" + location) if path.exists(ptf_tool_folder): return True else: return False text = str("which \"" + self.name + "\"") try: dev_null = open(devnull, 'w') if call(split(text), stdout=dev_null, stderr=STDOUT) == 0: return True else: return False except OSError: print_error(str("ERROR: Failed to run command " + text)) exit(1) def platform_matches(self): if self.platform.lower() == config.axiom.platform.lower(): return True else: return False def proceed_despite_uninstalled(self): answer = input("[AXIOM] Unable to confirm " + self.name + " is installed. Proceed anyway? [Y/n] ") if answer not in ["Y", "y", "Yes", "yes"]: return False else: return True
Apache License 2.0
humancompatibleai/rlsp
src/envs/env.py
Env.step
python
def step(self, action, r_vec=None): self.s = self.state_step(action) self.timestep+=1 obs = self.s_to_f(self.s) reward = 0 if r_vec is None else np.array(obs.T @ r_vec) done = False info = defaultdict(lambda : '') return np.array(obs, dtype='float32'), reward, np.array(done, dtype='bool'), info
given an action, takes a step from self.s, updates self.s and returns: - the observation (features of the next state) - the associated reward - done, the indicator of completed episode - info
https://github.com/humancompatibleai/rlsp/blob/cacae643752a02b2be092870df2ce3de8d674144/src/envs/env.py#L68-L83
import numpy as np from collections import defaultdict from copy import deepcopy from scipy.sparse import lil_matrix class Env(object): def __init__(self): raise ValueError('Cannot instantiate abstract class Env') def is_deterministic(self): return False def get_initial_state_distribution(self, known_initial_state=True): if known_initial_state: p_0 = np.zeros(self.nS) p_0[self.get_num_from_state(self.init_state)] = 1 else: p_0 = np.ones(self.nS) / self.nS return p_0 def make_transition_matrices(self, states_iter, actions_iter, nS, nA): P = {} T_matrix = lil_matrix((nS * nA, nS)) baseline_matrix = lil_matrix((nS, nS)) actions = list(actions_iter) for state in states_iter: state_id = self.get_num_from_state(state) P[state_id] = {} for action in actions: next_s = self.get_next_states(state, action) next_s = [(p, self.get_num_from_state(s), r) for p, s, r in next_s] P[state_id][action] = next_s state_action_index = state_id * nA + action for prob, next_state_id, _ in next_s: T_matrix[state_action_index, next_state_id] = prob if action == self.default_action: baseline_matrix[state_id, next_state_id] = prob self.P = P self.T_matrix = T_matrix.tocsr() self.T_matrix_transpose = T_matrix.transpose().tocsr() self.baseline_matrix_transpose = baseline_matrix.transpose().tocsr() def make_f_matrix(self, nS, num_features): self.f_matrix = np.zeros((nS, num_features)) for state_id in self.P.keys(): state = self.get_state_from_num(state_id) self.f_matrix[state_id, :] = self.s_to_f(state) def reset(self, state=None): if state is None: state = self.init_state self.timestep = 0 self.s = deepcopy(state) def state_step(self, action, state=None): if state == None: state = self.s next_states = self.get_next_states(state, action) probabilities = [p for p, _, _ in next_states] idx = np.random.choice(np.arange(len(next_states)), p=probabilities) return next_states[idx][1]
MIT License
instadeepai/mava
mava/wrappers/pettingzoo.py
PettingZooParallelEnvWrapper.extra_spec
python
def extra_spec(self) -> Dict[str, specs.BoundedArray]: return {}
Extra data spec. Returns: Dict[str, specs.BoundedArray]: spec for extra data.
https://github.com/instadeepai/mava/blob/3159ed45bce4936c8298085cad1bdefbac50160f/mava/wrappers/pettingzoo.py#L549-L555
import copy from typing import Any, Dict, Iterator, List, Optional, Union import dm_env import gym import numpy as np from acme import specs from acme.wrappers.gym_wrapper import _convert_to_spec from pettingzoo.utils.env import AECEnv, ParallelEnv from supersuit import black_death_v1 from mava import types from mava.utils.wrapper_utils import ( apply_env_wrapper_preprocessers, convert_dm_compatible_observations, convert_np_type, parameterized_restart, ) from mava.wrappers.env_wrappers import ParallelEnvWrapper, SequentialEnvWrapper class PettingZooAECEnvWrapper(SequentialEnvWrapper): def __init__( self, environment: AECEnv, env_preprocess_wrappers: Optional[List] = [ (black_death_v1, None), ], ): self._environment = environment self._reset_next_step = True if env_preprocess_wrappers: self._environment = apply_env_wrapper_preprocessers( self._environment, env_preprocess_wrappers ) self.correct_agent_name() self.last_turn_agent = None def reset(self) -> dm_env.TimeStep: self._reset_next_step = False self._environment.reset() self._step_types = { agent: dm_env.StepType.FIRST for agent in self.possible_agents } self._first_step_performed = {agent: False for agent in self.possible_agents} observe, _, done, _ = self._environment.last() agent = self.current_agent observation = self._convert_observation(agent, observe, done) self._discount = convert_np_type(self.discount_spec()[agent].dtype, 1) reward = convert_np_type(self.reward_spec()[agent].dtype, 0) return parameterized_restart(reward, self._discount, observation) def step(self, action: Union[int, float]) -> dm_env.TimeStep: if self._reset_next_step: return self.reset() _, _, done, _ = self._environment.last() if done: self._environment.step(None) else: self._environment.step(action) agent = self.current_agent if self.env_done(): self._reset_next_step = True reward = convert_np_type(self.reward_spec()[agent].dtype, 0) observation = self._convert_observation( agent, self._environment.observe(agent), done ) else: observe, reward, done, info = self._environment.last() reward = convert_np_type(self.reward_spec()[agent].dtype, reward) observation = self._convert_observation(agent, observe, done) step_type = dm_env.StepType.LAST if done else dm_env.StepType.MID return dm_env.TimeStep( observation=observation, reward=reward, discount=self._discount, step_type=step_type, ) def env_done(self) -> bool: return not self.agents def agent_iter(self, max_iter: int = 2 ** 63) -> Iterator: return self._environment.agent_iter(max_iter) def _convert_observation( self, agent: str, observe: Union[dict, np.ndarray], done: bool ) -> types.OLT: legals: np.ndarray = None observation: np.ndarray = None if isinstance(observe, dict) and "action_mask" in observe: legals = observe["action_mask"] observation = observe["observation"] else: legals = np.ones( self._environment.action_spaces[agent].shape, dtype=self._environment.action_spaces[agent].dtype, ) observation = observe if observation.dtype == np.int8: observation = np.dtype(np.float32).type( observation ) if legals.dtype == np.int8: legals = np.dtype(np.int64).type(legals) observation = types.OLT( observation=observation, legal_actions=legals, terminal=np.asarray([done], dtype=np.float32), ) return observation def correct_agent_name(self) -> None: self._environment.reset() if "tictactoe" in self._environment.metadata["name"]: corrected_names = ["player_0", "player_1"] self._environment.unwrapped.possible_agents = corrected_names self._environment.unwrapped.agents = corrected_names self._environment.possible_agents = corrected_names self._environment.agents = corrected_names previous_names = list(self.observation_spaces.keys()) for corrected_name, prev_name in zip(corrected_names, previous_names): self.observation_spaces[corrected_name] = self.observation_spaces[ prev_name ] self.action_spaces[corrected_name] = self.action_spaces[prev_name] self.rewards[corrected_name] = self.rewards[prev_name] self.dones[corrected_name] = self.dones[prev_name] self.infos[corrected_name] = self.infos[prev_name] del self.observation_spaces[prev_name] del self.action_spaces[prev_name] del self.rewards[prev_name] del self.dones[prev_name] del self.infos[prev_name] def observation_spec(self) -> types.Observation: observation_specs = {} for agent in self._environment.possible_agents: if isinstance(self._environment.observation_spaces[agent], gym.spaces.Dict): obs_space = copy.deepcopy( self._environment.observation_spaces[agent]["observation"] ) legal_actions_space = copy.deepcopy( self._environment.observation_spaces[agent]["action_mask"] ) else: obs_space = copy.deepcopy(self._environment.observation_spaces[agent]) legal_actions_space = copy.deepcopy( self._environment.action_spaces[agent] ) if obs_space.dtype == np.int8: obs_space.dtype = np.dtype(np.float32) if legal_actions_space.dtype == np.int8: legal_actions_space.dtype = np.dtype(np.int64) observation_specs[agent] = types.OLT( observation=_convert_to_spec(obs_space), legal_actions=_convert_to_spec(legal_actions_space), terminal=specs.Array((1,), np.float32), ) return observation_specs def action_spec(self) -> Dict[str, specs.DiscreteArray]: action_specs = {} for agent in self.possible_agents: action_specs[agent] = _convert_to_spec( self._environment.action_spaces[agent] ) return action_specs def reward_spec(self) -> Dict[str, specs.Array]: reward_specs = {} for agent in self.possible_agents: reward_specs[agent] = specs.Array((), np.float32) return reward_specs def discount_spec(self) -> Dict[str, specs.BoundedArray]: discount_specs = {} for agent in self.possible_agents: discount_specs[agent] = specs.BoundedArray( (), np.float32, minimum=0, maximum=1.0 ) return discount_specs def extra_spec(self) -> Dict[str, specs.BoundedArray]: return {} @property def agents(self) -> List: return self._environment.agents @property def possible_agents(self) -> List: return self._environment.possible_agents @property def environment(self) -> AECEnv: return self._environment @property def current_agent(self) -> Any: return self._environment.agent_selection @property def num_agents(self) -> int: return self._environment.num_agents def __getattr__(self, name: str) -> Any: if hasattr(self.__class__, name): return self.__getattribute__(name) else: return getattr(self._environment, name) class PettingZooParallelEnvWrapper(ParallelEnvWrapper): def __init__( self, environment: ParallelEnv, env_preprocess_wrappers: Optional[List] = [ (black_death_v1, None), ], ): self._environment = environment self._reset_next_step = True if env_preprocess_wrappers: self._environment = apply_env_wrapper_preprocessers( self._environment, env_preprocess_wrappers ) def reset(self) -> dm_env.TimeStep: self._reset_next_step = False self._step_type = dm_env.StepType.FIRST discount_spec = self.discount_spec() observe = self._environment.reset() self._discounts = { agent: convert_np_type(discount_spec[agent].dtype, 1) for agent in self.possible_agents } if type(observe) == tuple: observe, env_extras = observe else: env_extras = {} observations = self._convert_observations( observe, {agent: False for agent in self.possible_agents} ) rewards_spec = self.reward_spec() rewards = { agent: convert_np_type(rewards_spec[agent].dtype, 0) for agent in self.possible_agents } return parameterized_restart(rewards, self._discounts, observations), env_extras def step(self, actions: Dict[str, np.ndarray]) -> dm_env.TimeStep: if self._reset_next_step: return self.reset() observations, rewards, dones, infos = self._environment.step(actions) rewards = self._convert_reward(rewards) observations = self._convert_observations(observations, dones) if self.env_done(): self._step_type = dm_env.StepType.LAST self._reset_next_step = True else: self._step_type = dm_env.StepType.MID return dm_env.TimeStep( observation=observations, reward=rewards, discount=self._discounts, step_type=self._step_type, ) def env_done(self) -> bool: return not self.agents def _convert_reward(self, rewards: Dict[str, float]) -> Dict[str, float]: rewards_spec = self.reward_spec() if not rewards: rewards = { agent: convert_np_type(rewards_spec[agent].dtype, 0) for agent in self.possible_agents } else: rewards = { agent: convert_np_type(rewards_spec[agent].dtype, reward) for agent, reward in rewards.items() } return rewards def _convert_observations( self, observes: Dict[str, np.ndarray], dones: Dict[str, bool] ) -> types.Observation: return convert_dm_compatible_observations( observes, dones, self._environment.action_spaces, self._environment.observation_spaces, self.env_done(), self.possible_agents, ) def observation_spec(self) -> types.Observation: observation_specs = {} for agent in self.possible_agents: observation_specs[agent] = types.OLT( observation=_convert_to_spec( self._environment.observation_spaces[agent] ), legal_actions=_convert_to_spec(self._environment.action_spaces[agent]), terminal=specs.Array((1,), np.float32), ) return observation_specs def action_spec(self) -> Dict[str, Union[specs.DiscreteArray, specs.BoundedArray]]: action_specs = {} action_spaces = self._environment.action_spaces for agent in self.possible_agents: action_specs[agent] = _convert_to_spec(action_spaces[agent]) return action_specs def reward_spec(self) -> Dict[str, specs.Array]: reward_specs = {} for agent in self.possible_agents: reward_specs[agent] = specs.Array((), np.float32) return reward_specs def discount_spec(self) -> Dict[str, specs.BoundedArray]: discount_specs = {} for agent in self.possible_agents: discount_specs[agent] = specs.BoundedArray( (), np.float32, minimum=0, maximum=1.0 ) return discount_specs
Apache License 2.0
bbn-q/auspex
src/auspex/stream.py
DataStreamDescriptor.axis_names
python
def axis_names(self, with_metadata=False): vals = [] for a in self.axes: if a.unstructured: for p in a.parameter: vals.append(p.name) else: vals.append(a.name) if with_metadata and a.metadata is not None: if a.unstructured: vals.append("+".join(a.name) + "_metadata") else: vals.append(a.name + "_metadata") return vals
Returns all axis names included those from unstructured axes
https://github.com/bbn-q/auspex/blob/e9763e1907546ad49210415a6b8c2f6d9999f31a/src/auspex/stream.py#L392-L406
import os import sys if sys.platform == 'win32' or 'NOFORKING' in os.environ: import threading as mp from queue import Queue else: import multiprocessing as mp from multiprocessing import Queue from multiprocessing import Value, RawValue, RawArray import ctypes import logging import numbers import itertools import time import datetime import numpy as np from functools import reduce from auspex.log import logger def cartesian(arrays, out=None, dtype='f'): arrays = [np.asarray(x) for x in arrays] n = np.prod([x.size for x in arrays]) if out is None: out = np.zeros([n, len(arrays)], dtype=dtype) m = int(n / arrays[0].size) out[:,0] = np.repeat(arrays[0], m) if arrays[1:]: cartesian(arrays[1:], out=out[0:m,1:]) for j in range(1, arrays[0].size): out[j*m:(j+1)*m,1:] = out[0:m,1:] return out class DataAxis(object): def __init__(self, name, points=[], unit=None, metadata=None, dtype=np.float32): super(DataAxis, self).__init__() if isinstance(name, list): self.unstructured = True self.name = name else: self.unstructured = False self.name = str(name) self.points = np.array(points) self.unit = unit self.refine_func = None self.metadata = metadata self.done = True self.original_points = self.points self.has_been_extended = False self.num_new_points = 0 self.dtype = dtype if self.unstructured: if unit is not None and len(name) != len(unit): raise ValueError("DataAxis unit length {} and tuples length {} must match.".format(len(unit),len(name))) if self.unstructured and len(name) != len(points[0]): raise ValueError("DataAxis points length {} and names length {} must match.".format(len(points[0]), len(name))) def data_type(self, with_metadata=False): dtype = [] if self.unstructured: name = "+".join(self.name) dtype.extend([(p.name, 'f') for p in self.parameter]) else: name = self.name dtype.append((name, 'f')) if with_metadata and self.metadata is not None: dtype.append((name + "_metadata", str)) return dtype def points_with_metadata(self): if self.metadata is not None: if self.unstructured: return [list(self.original_points[i]) + [self.metadata[i]] for i in range(len(self.original_points))] return [(self.original_points[i], self.metadata[i], ) for i in range(len(self.original_points))] if self.unstructured: return [tuple(self.original_points[i]) for i in range(len(self.original_points))] return [(self.original_points[i],) for i in range(len(self.original_points))] def tuple_width(self): if self.unstructured: width = len(name) else: width = 1 if self.metadata: width += 1 return width def num_points(self): if self.has_been_extended: return len(self.points) else: return len(self.original_points) def add_points(self, points): if self.unstructured and len(self.parameter) != len(points[0]): raise ValueError("Parameter value tuples must be the same length as the number of parameters.") if type(points) in [list, np.ndarray]: points = np.array(points) else: points = np.array([points]) self.num_new_points = len(points) self.points = np.append(self.points, points, axis=0) self.has_been_extended = True def reset(self): self.points = self.original_points self.has_been_extended = False self.num_new_points = 0 def __repr__(self): return "<DataAxis(name={}, start={}, stop={}, num={}, unit={})>".format( self.name, self.points[0], self.points[-1], len(self.points), self.unit) def __str__(self): return "<DataAxis(name={}, start={}, stop={}, num={}, unit={})>".format( self.name, self.points[0], self.points[-1], len(self.points), self.unit) class SweepAxis(DataAxis): def __init__(self, parameter, points = [], metadata=None, refine_func=None, callback_func=None): self.unstructured = hasattr(parameter, '__iter__') self.parameter = parameter if self.unstructured: unit = [p.unit for p in parameter] super(SweepAxis, self).__init__([p.name for p in parameter], points=points, unit=unit, metadata=metadata) self.value = points[0] else: super(SweepAxis, self).__init__(parameter.name, points, unit=parameter.unit, metadata=metadata) self.value = points[0] if self.metadata is not None: self.metadata_value = self.metadata[0] self.refine_func = refine_func self.callback_func = callback_func self.step = 0 self.done = False self.experiment = None if self.unstructured and len(parameter) != len(points[0]): raise ValueError("Parameter value tuples must be the same length as the number of parameters.") logger.debug("Created {}".format(self.__repr__())) def update(self): if self.step < self.num_points(): if self.callback_func: self.callback_func(self, self.experiment) self.value = self.points[self.step] if self.metadata is not None: self.metadata_value = self.metadata[self.step] logger.debug("Sweep Axis '{}' at step {} takes value: {}.".format(self.name, self.step,self.value)) self.push() self.step += 1 self.done = False def check_for_refinement(self, output_connectors_dict): if not self.done and self.step==self.num_points(): logger.debug("Refining on axis {}".format(self.name)) if self.refine_func: points = self.refine_func(self, self.experiment) if points is None or points is False: self.step = 0 self.done = True self.reset() logger.debug("Sweep Axis '{}' complete.".format(self.name)) for oc in output_connectors_dict.values(): oc.push_event("refined", (self.name, True, self.original_points)) return False self.add_points(points) self.done = False for oc in output_connectors_dict.values(): oc.push_event("refined", (self.name, False, points)) return True else: self.step = 0 self.done = True logger.debug("Sweep Axis '{}' complete.".format(self.name)) return False def push(self): if self.unstructured: for p, v in zip(self.parameter, self.value): p.value = v p.push() else: self.parameter.value = self.value self.parameter.push() def __repr__(self): return "<SweepAxis(name={},length={},unit={},value={},unstructured={}>".format(self.name, self.num_points(),self.unit,self.value,self.unstructured) class DataStreamDescriptor(object): def __init__(self, dtype=np.float32): super(DataStreamDescriptor, self).__init__() self.data_name = "Data" self.data_unit = "Unit" self.axes = [] self.unit = None self.params = {} self.parent = None self._exp_src = None self.dtype = dtype self.metadata = {} self.buffer_mult_factor = 1 self.visited_tuples = [] def is_adaptive(self): return True in [a.refine_func is not None for a in self.axes] def add_axis(self, axis, position=0): if isinstance(axis, DataAxis): logger.debug("Adding DataAxis into DataStreamDescriptor: {}".format(axis)) self.axes.insert(position, axis) else: raise TypeError("Failed adding axis. Object is not DataAxis: {}".format(axis)) def add_param(self, key, value): self.params[key] = value def num_dims(self): return len(self.axes) def extent(self, flip=False): if self.num_dims() == 2: return (self.axes[1].points[0], self.axes[1].points[-1], self.axes[0].points[0], self.axes[0].points[-1]) else: raise Exception("Can't get extent for any number of axes other than two.") def data_dims(self): dims = [] for a in self.axes: if isinstance(a, SweepAxis): dims.append(1) else: dims.append(len(a.points)) return dims def tuple_width(self): return sum([a.tuple_width() for a in self.axes]) def dims(self): dims = [] for a in self.axes: dims.append(len(a.points)) return [a.num_points() for a in self.axes] def axes_done(self): doneness = [a.done for a in self.axes] return [np.all(doneness[i:]) for i in range(len(doneness))] def done(self): return np.all([a.done for a in self.axes]) def num_points(self): if len(self.axes)>0: return reduce(lambda x,y: x*y, [a.num_points() for a in self.axes]) else: return 0 def expected_num_points(self): if len(self.axes)>0: return reduce(lambda x,y: x*y, [len(a.original_points) for a in self.axes]) else: return 0 def last_data_axis(self): data_axes_idx = [i for i, a in enumerate(self.axes) if not isinstance(a,SweepAxis)] if len(data_axes_idx)>0: return data_axes_idx[0] else: logger.warning("DataStreamDescriptor has no pure DataAxis. Return None.") return None def axis_data_type(self, with_metadata=False, excluding_axis=None): dtype = [] for a in self.axes: if a.name != excluding_axis: dtype.extend(a.data_type(with_metadata=with_metadata)) return dtype def tuples(self, as_structured_array=True): if len(self.visited_tuples) == 0: self.visited_tuples = self.expected_tuples(with_metadata=True) if as_structured_array: if type(self.visited_tuples) is np.ndarray and type(self.visited_tuples.dtype.names) is tuple: return self.visited_tuples elif type(self.visited_tuples) is np.ndarray: return np.rec.fromarrays(self.visited_tuples.T, dtype=self.axis_data_type(with_metadata=True)) return np.core.records.fromrecords(self.visited_tuples, dtype=self.axis_data_type(with_metadata=True)) return self.visited_tuples def expected_tuples(self, with_metadata=False, as_structured_array=True): vals = [a.points_with_metadata() for a in self.axes] simple = True if True in [a.unstructured for a in self.axes]: simple = False if True in [a.metadata is not None for a in self.axes]: simple = False if self.axes == []: simple = False if simple: flattened_list = cartesian(vals) else: nested_list = itertools.product(*vals) flattened_list = [tuple((val for sublist in line for val in sublist)) for line in nested_list] if as_structured_array: if simple: return np.rec.fromarrays(flattened_list.T, dtype=self.axis_data_type(with_metadata=True)) return np.rec.fromrecords(flattened_list, dtype=self.axis_data_type(with_metadata=True)) return flattened_list
Apache License 2.0
lsgos/uncertainty-adversarial-paper
ROC_curves_cats.py
create_adv_examples
python
def create_adv_examples(model, input_t, x_to_adv, attack_dict): if attack_dict['method'] == 'fgm': attack = attacks.FastGradientMethod(model, sess=K.get_session(), back='tf') elif attack_dict['method'] == 'bim': attack = attacks.BasicIterativeMethod(model, sess=K.get_session(), back='tf') elif attack_dict['method'] == 'mim': attack = attacks.MomentumIterativeMethod(model, sess=K.get_session(), back='tf') else: assert False, 'Current attack needs to be added to the create attack fn' adv_tensor = attack.generate(input_t, **{k: a for k, a in attack_dict.items() if k != 'method'}) x_adv = batch_eval(adv_tensor, input_t, x_to_adv, batch_size=args.batch_size, verbose="Generating adv examples") return x_adv
This fn may seem bizarre and pointless, but the point of it is to enable the entire attack to be specified as a dict from the command line without editing this script, which is convenient for storing the settings used for an attack
https://github.com/lsgos/uncertainty-adversarial-paper/blob/7f39d1ebf15061bd9b6c33f9c5fe4afb5bce42cf/ROC_curves_cats.py#L70-L87
import argparse import h5py import json import numpy as np from keras import backend as K from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, average_precision_score from keras.utils import to_categorical import src.utilities as U from cleverhans import attacks from cleverhans.model import CallableModelWrapper from cats_and_dogs import H5PATH, define_model_resnet def load_model(deterministic=False, name='save/cats_dogs_rn50_w_run.h5'): lp = not deterministic K.set_learning_phase(lp) model = define_model_resnet() model.load_weights(name) model.compile(loss='categorical_crossentropy', optimizer='sgd') return model def make_random_targets(y, n_classes=10): labels = y.argmax(axis=1) new = (labels + np.random.randint(1, n_classes - 1)) % n_classes return to_categorical(new, num_classes=n_classes) def get_models(n_mc=10): models = [] model = load_model(deterministic=True) models.append(('Deterministic Model', model)) model = load_model(deterministic=False) input_tensor = model.input mc_model = U.MCModel(model, input_tensor, n_mc=n_mc) models.append(('MC Model', mc_model)) return models def batch_gen(array, batch_size=256): N = array.shape[0] n_batches = N // batch_size + (N % batch_size != 0) bs = batch_size return (array[i * bs:(i + 1) * bs] for i in range(n_batches)) def batch_eval(tensor, input_t, x, batch_size=256, verbose=False): bg = batch_gen(x, batch_size=batch_size) res = [] for i, b in enumerate(bg): res.append(tensor.eval(session=K.get_session(), feed_dict={input_t: b})) if verbose: print(verbose, 'iteration: ', i) return np.concatenate(res, axis=0)
MIT License
sulab/wikidataintegrator
wikidataintegrator/wdi_fastrun.py
FastRunContainer.clear
python
def clear(self): self.prop_dt_map = dict() self.prop_data = dict() self.rev_lookup = defaultdict(set) self.rev_lookup_ci = defaultdict(set)
convinience function to empty this fastrun container
https://github.com/sulab/wikidataintegrator/blob/5feff88d7e97ffad1713a086202efa93ebe49516/wikidataintegrator/wdi_fastrun.py#L585-L592
import copy from collections import defaultdict from functools import lru_cache from itertools import chain from wikidataintegrator.wdi_config import config example_Q14911732 = {'P1057': {'Q14911732-23F268EB-2848-4A82-A248-CF4DF6B256BC': {'v': 'Q847102', 'ref': {'9d96507726508344ef1b8f59092fb350171b3d99': {('P248', 'Q29458763'), ('P594', 'ENSG00000123374')}}, 'qual': {('P659', 'Q21067546'), ('P659', 'Q20966585')}, } } } class FastRunContainer(object): def __init__(self, base_data_type, engine, mediawiki_api_url=None, sparql_endpoint_url=None, wikibase_url=None, concept_base_uri=None, base_filter=None, use_refs=False, ref_handler=None, case_insensitive=False, debug=False): self.prop_data = {} self.loaded_langs = {} self.statements = [] self.base_filter = {} self.base_filter_string = '' self.prop_dt_map = {} self.current_qid = '' self.rev_lookup = defaultdict(set) self.rev_lookup_ci = defaultdict(set) self.base_data_type = base_data_type self.engine = engine self.mediawiki_api_url = config['MEDIAWIKI_API_URL'] if mediawiki_api_url is None else mediawiki_api_url self.sparql_endpoint_url = config['SPARQL_ENDPOINT_URL'] if sparql_endpoint_url is None else sparql_endpoint_url self.wikibase_url = config['WIKIBASE_URL'] if wikibase_url is None else wikibase_url self.concept_base_uri = config['CONCEPT_BASE_URI'] if concept_base_uri is None else concept_base_uri self.case_insensitive = case_insensitive self.debug = debug self.reconstructed_statements = [] self.use_refs = use_refs self.ref_handler = ref_handler if base_filter and any(base_filter): self.base_filter = base_filter for k, v in self.base_filter.items(): if v: self.base_filter_string += '?item wdt:{0} wd:{1} . \n'.format(k, v) else: self.base_filter_string += '?item wdt:{0} ?zz . \n'.format(k) def reconstruct_statements(self, qid): reconstructed_statements = [] if qid not in self.prop_data: self.reconstructed_statements = reconstructed_statements return reconstructed_statements for prop_nr, dt in self.prop_data[qid].items(): q_props = set(chain(*[[x[0] for x in d['qual']] for d in dt.values()])) r_props = set(chain(*[set(chain(*[[y[0] for y in x] for x in d['ref'].values()])) for d in dt.values()])) props = q_props | r_props for prop in props: if prop not in self.prop_dt_map: self.prop_dt_map.update({prop: self.get_prop_datatype(prop)}) for uid, d in dt.items(): qualifiers = [] for q in d['qual']: f = [x for x in self.base_data_type.__subclasses__() if x.DTYPE == self.prop_dt_map[q[0]]][0] qualifiers.append(f(q[1], prop_nr=q[0], is_qualifier=True)) references = [] for ref_id, refs in d['ref'].items(): this_ref = [] for ref in refs: f = [x for x in self.base_data_type.__subclasses__() if x.DTYPE == self.prop_dt_map[ref[0]]][0] this_ref.append(f(ref[1], prop_nr=ref[0], is_reference=True)) references.append(this_ref) f = [x for x in self.base_data_type.__subclasses__() if x.DTYPE == self.prop_dt_map[prop_nr]][0] if self.prop_dt_map[prop_nr] == 'quantity' and d['unit'] != '1': reconstructed_statements.append( f(d['v'], prop_nr=prop_nr, qualifiers=qualifiers, references=references, unit=d['unit'], concept_base_uri=self.concept_base_uri)) else: reconstructed_statements.append( f(d['v'], prop_nr=prop_nr, qualifiers=qualifiers, references=references)) self.reconstructed_statements = reconstructed_statements return reconstructed_statements def load_item(self, data, cqid=None): match_sets = [] for date in data: current_value = date.get_value() if not current_value and not date.data_type: continue prop_nr = date.get_prop_nr() if prop_nr not in self.prop_dt_map: if self.debug: print("{} not found in fastrun".format(prop_nr)) self.prop_dt_map.update({prop_nr: self.get_prop_datatype(prop_nr)}) self._query_data(prop_nr) if self.prop_dt_map[prop_nr] == 'time': current_value = current_value[0] elif self.prop_dt_map[prop_nr] == 'wikibase-item': current_value = 'Q{}'.format(current_value) elif self.prop_dt_map[prop_nr] == 'quantity': current_value = self.format_amount(current_value[0]) if self.debug: print(current_value) if current_value in self.rev_lookup: temp_set = set(self.rev_lookup[current_value]) elif self.case_insensitive and current_value.casefold() in self.rev_lookup_ci: temp_set = set(self.rev_lookup_ci[current_value.casefold()]) else: if self.debug: if self.case_insensitive: print('case insensitive enabled') print(self.rev_lookup_ci) else: print(self.rev_lookup) print('no matches for rev lookup') return True match_sets.append(temp_set) if cqid: matching_qids = {cqid} else: matching_qids = match_sets[0].intersection(*match_sets[1:]) if not len(matching_qids) == 1: if self.debug: print('no matches') return True qid = matching_qids.pop() self.current_qid = qid def write_required(self, data, append_props=None, cqid=None): del_props = set() data_props = set() if not append_props: append_props = [] for x in data: if x.value and x.data_type: data_props.add(x.get_prop_nr()) write_required = False self.load_item(data, cqid) reconstructed_statements = self.reconstruct_statements(self.current_qid) tmp_rs = copy.deepcopy(reconstructed_statements) for p in append_props: app_data = [x for x in data if x.get_prop_nr() == p] rec_app_data = [x for x in tmp_rs if x.get_prop_nr() == p] comp = [] for x in app_data: for y in rec_app_data: if x.get_value() == y.get_value(): if self.use_refs and self.ref_handler: to_be = copy.deepcopy(y) self.ref_handler(to_be, x) else: to_be = x if y.equals(to_be, include_ref=self.use_refs): comp.append(True) if len(comp) != len(app_data): if self.debug: print("failed append: {}".format(p)) return True tmp_rs = [x for x in tmp_rs if x.get_prop_nr() not in append_props and x.get_prop_nr() in data_props] for date in data: reconst_props = set([x.get_prop_nr() for x in tmp_rs]) if (not date.value or not date.data_type) and date.get_prop_nr() in reconst_props: if self.debug: print('returned from delete prop handling') return True elif not date.value or not date.data_type: continue if date.get_prop_nr() in append_props: continue if not date.get_value() and not date.data_type: del_props.add(date.get_prop_nr()) bool_vec = [] for x in tmp_rs: if (x.get_value() == date.get_value() or ( self.case_insensitive and x.get_value().casefold() == date.get_value().casefold())) and x.get_prop_nr() not in del_props: if self.use_refs and self.ref_handler: to_be = copy.deepcopy(x) self.ref_handler(to_be, date) else: to_be = date if x.equals(to_be, include_ref=self.use_refs): bool_vec.append(True) else: bool_vec.append(False) else: bool_vec.append(False) """ bool_vec = [x.equals(date, include_ref=self.use_refs, fref=self.ref_comparison_f) and x.get_prop_nr() not in del_props for x in tmp_rs] """ if self.debug: print("bool_vec: {}".format(bool_vec)) print('-----------------------------------') for x in tmp_rs: if date == x and x.get_prop_nr() not in del_props: print(x.get_prop_nr(), x.get_value(), [z.get_value() for z in x.get_qualifiers()]) print(date.get_prop_nr(), date.get_value(), [z.get_value() for z in date.get_qualifiers()]) else: if x.get_prop_nr() == date.get_prop_nr(): print(x.get_prop_nr(), x.get_value(), [z.get_value() for z in x.get_qualifiers()]) print(date.get_prop_nr(), date.get_value(), [z.get_value() for z in date.get_qualifiers()]) if not any(bool_vec): if self.debug: print(len(bool_vec)) print('fast run failed at', date.get_prop_nr()) write_required = True else: tmp_rs.pop(bool_vec.index(True)) if len(tmp_rs) > 0: if self.debug: print('failed because not zero') for x in tmp_rs: print('xxx', x.get_prop_nr(), x.get_value(), [z.get_value() for z in x.get_qualifiers()]) print('failed because not zero--END') write_required = True return write_required def init_language_data(self, lang, lang_data_type): if lang not in self.loaded_langs: self.loaded_langs[lang] = {} if lang_data_type not in self.loaded_langs[lang]: result = self._query_lang(lang=lang, lang_data_type=lang_data_type) data = self._process_lang(result) self.loaded_langs[lang].update({lang_data_type: data}) def get_language_data(self, qid, lang, lang_data_type): self.init_language_data(lang, lang_data_type) current_lang_data = self.loaded_langs[lang][lang_data_type] all_lang_strings = current_lang_data.get(qid, []) if not all_lang_strings and lang_data_type in {'label', 'description'}: all_lang_strings = [''] return all_lang_strings def check_language_data(self, qid, lang_data, lang, lang_data_type): all_lang_strings = set(x.strip().lower() for x in self.get_language_data(qid, lang, lang_data_type)) for s in lang_data: if s.strip().lower() not in all_lang_strings: print('fastrun failed at label: {}, string: {}'.format(lang_data_type, s)) return True return False def get_all_data(self): return self.prop_data def format_query_results(self, r, prop_nr): prop_dt = self.get_prop_datatype(prop_nr) for i in r: for value in {'item', 'sid', 'pq', 'pr', 'ref', 'unit'}: if value in i: i[value] = i[value]['value'].split('/')[-1] for value in {'v', 'qval', 'rval'}: if value in i: if i[value].get("datatype") == 'http://www.w3.org/2001/XMLSchema#dateTime' and not i[value]['value'][0] in '+-': i[value]['value'] = '+' + i[value]['value'] if 'v' in i: if i['v']['type'] == 'uri' and prop_dt == 'wikibase-item': i['v'] = i['v']['value'].split('/')[-1] elif i['v']['type'] == 'literal' and prop_dt == 'quantity': i['v'] = self.format_amount(i['v']['value']) else: i['v'] = i['v']['value'] if type(i['v']) is not dict: self.rev_lookup[i['v']].add(i['item']) if self.case_insensitive: self.rev_lookup_ci[i['v'].casefold()].add(i['item']) if 'qval' in i: qual_prop_dt = self.get_prop_datatype(prop_nr=i['pq']) if i['qval']['type'] == 'uri' and qual_prop_dt == 'wikibase-item': i['qval'] = i['qval']['value'].split('/')[-1] else: i['qval'] = i['qval']['value'] if 'rval' in i: ref_prop_dt = self.get_prop_datatype(prop_nr=i['pr']) if i['rval']['type'] == 'uri' and ref_prop_dt == 'wikibase-item': i['rval'] = i['rval']['value'].split('/')[-1] else: i['rval'] = i['rval']['value'] def format_amount(self, amount): if float(amount) % 1 == 0: amount = int(float(amount)) if not str(amount).startswith('+') and float(amount) >= 0: amount = str('+{}'.format(amount)) return str(amount) def update_frc_from_query(self, r, prop_nr): for i in r: qid = i['item'] if qid not in self.prop_data: self.prop_data[qid] = {prop_nr: dict()} if prop_nr not in self.prop_data[qid]: self.prop_data[qid].update({prop_nr: dict()}) if i['sid'] not in self.prop_data[qid][prop_nr]: self.prop_data[qid][prop_nr].update({i['sid']: dict()}) d = {'v': i['v']} self.prop_data[qid][prop_nr][i['sid']].update(d) if 'qual' not in self.prop_data[qid][prop_nr][i['sid']]: self.prop_data[qid][prop_nr][i['sid']]['qual'] = set() if 'pq' in i and 'qval' in i: self.prop_data[qid][prop_nr][i['sid']]['qual'].add((i['pq'], i['qval'])) if 'ref' not in self.prop_data[qid][prop_nr][i['sid']]: self.prop_data[qid][prop_nr][i['sid']]['ref'] = dict() if 'ref' in i: if i['ref'] not in self.prop_data[qid][prop_nr][i['sid']]['ref']: self.prop_data[qid][prop_nr][i['sid']]['ref'][i['ref']] = set() self.prop_data[qid][prop_nr][i['sid']]['ref'][i['ref']].add((i['pr'], i['rval'])) if 'unit' not in self.prop_data[qid][prop_nr][i['sid']]: self.prop_data[qid][prop_nr][i['sid']]['unit'] = '1' if 'unit' in i: self.prop_data[qid][prop_nr][i['sid']]['unit'] = i['unit'] def _query_data_refs(self, prop_nr): page_size = 10000 page_count = 0 num_pages = None if self.debug: query = """PREFIX wd: <{0}/entity/> PREFIX wdt: <{0}/prop/direct/> PREFIX p: <{0}/prop/> PREFIX ps: <{0}/prop/statement/> SELECT (COUNT(?item) as ?c) where {{ {1} ?item p:{2} ?sid . }}""".format(self.wikibase_url, self.base_filter_string, prop_nr) if self.debug: print(query) r = self.engine.execute_sparql_query(query, endpoint=self.sparql_endpoint_url)['results']['bindings'] count = int(r[0]['c']['value']) num_pages = (int(count) // page_size) + 1 print("Query {}: {}/{}".format(prop_nr, page_count, num_pages)) while True: query = """ PREFIX wd: <**wikibase_url**/entity/> PREFIX wdt: <**wikibase_url**/prop/direct/> PREFIX p: <**wikibase_url**/prop/> PREFIX ps: <**wikibase_url**/prop/statement/> #Tool: wdi_core fastrun SELECT ?item ?qval ?pq ?sid ?v ?ref ?pr ?rval WHERE { { SELECT ?item ?v ?sid where { **base_filter_string** ?item p:**prop_nr** ?sid . ?sid ps:**prop_nr** ?v . } GROUP BY ?item ?v ?sid ORDER BY ?sid OFFSET **offset** LIMIT **page_size** } OPTIONAL { ?sid ?pq ?qval . [] wikibase:qualifier ?pq } OPTIONAL { ?sid prov:wasDerivedFrom ?ref . ?ref ?pr ?rval . [] wikibase:reference ?pr } }""".replace("**offset**", str(page_count * page_size)). replace("**base_filter_string**", self.base_filter_string). replace("**prop_nr**", prop_nr).replace("**page_size**", str(page_size)). replace("**wikibase_url**", self.wikibase_url) if self.debug: print(query) results = self.engine.execute_sparql_query(query, endpoint=self.sparql_endpoint_url)['results']['bindings'] self.format_query_results(results, prop_nr) self.update_frc_from_query(results, prop_nr) page_count += 1 if num_pages: print("Query {}: {}/{}".format(prop_nr, page_count, num_pages)) if len(results) == 0: break def _query_data(self, prop_nr): if self.use_refs: self._query_data_refs(prop_nr) else: query = ''' PREFIX wd: <{0}/entity/> PREFIX wdt: <{0}/prop/direct/> PREFIX p: <{0}/prop/> PREFIX ps: <{0}/prop/statement/> PREFIX psv: <{0}/prop/statement/value/> #Tool: wdi_core fastrun select ?item ?qval ?pq ?sid ?v ?unit where {{ {1} ?item p:{2} ?sid . ?sid ps:{2} ?v . OPTIONAL {{ ?sid ?pq ?qval . [] wikibase:qualifier ?pq }} OPTIONAL {{ ?sid psv:{2} ?valuenode . ?valuenode wikibase:quantityUnit ?unit }} }} '''.format(self.wikibase_url, self.base_filter_string, prop_nr) if self.debug: print(query) r = self.engine.execute_sparql_query(query=query, endpoint=self.sparql_endpoint_url)['results']['bindings'] self.format_query_results(r, prop_nr) self.update_frc_from_query(r, prop_nr) def _query_lang(self, lang, lang_data_type): lang_data_type_dict = { 'label': 'rdfs:label', 'description': 'schema:description', 'aliases': 'skos:altLabel' } query = ''' PREFIX wd: <{0}/entity/> PREFIX wdt: <{0}/prop/direct/> PREFIX p: <{0}/prop/> PREFIX ps: <{0}/prop/statement/> #Tool: wdi_core fastrun SELECT ?item ?label WHERE {{ {1} OPTIONAL {{ ?item {2} ?label FILTER (lang(?label) = "{3}") . }} }} '''.format(self.wikibase_url, self.base_filter_string, lang_data_type_dict[lang_data_type], lang) if self.debug: print(query) return self.engine.execute_sparql_query(query=query, endpoint=self.sparql_endpoint_url)['results']['bindings'] @staticmethod def _process_lang(result): data = defaultdict(set) for r in result: qid = r['item']['value'].split("/")[-1] if 'label' in r: data[qid].add(r['label']['value']) return data @lru_cache(maxsize=100000) def get_prop_datatype(self, prop_nr): item = self.engine(wd_item_id=prop_nr, sparql_endpoint_url=self.sparql_endpoint_url, mediawiki_api_url=self.mediawiki_api_url, wikibase_url=self.wikibase_url) return item.entity_metadata['datatype']
MIT License
napari/napari
napari/_qt/widgets/qt_highlight_preview.py
QtTriangle.maximum
python
def maximum(self): return self._max_value
Return maximum value. Returns ------- int Maximum value of triangle widget.
https://github.com/napari/napari/blob/c4c987c880fe125da608edf427767eafe7f2b3f4/napari/_qt/widgets/qt_highlight_preview.py#L239-L247
import numpy as np from qtpy.QtCore import QSize, Qt, Signal from qtpy.QtGui import QColor, QIntValidator, QPainter, QPainterPath, QPen from qtpy.QtWidgets import ( QFrame, QHBoxLayout, QLabel, QLineEdit, QSlider, QVBoxLayout, QWidget, ) from ...utils.translations import translator trans = translator.load() class QtStar(QFrame): def __init__( self, parent: QWidget = None, value: int = None, ): super().__init__(parent) self._value = value def sizeHint(self): return QSize(100, 100) def minimumSizeHint(self): return QSize(100, 100) def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawStar(qp) qp.end() def value(self): return self._value def setValue(self, value: int): self._value = value self.update() def drawStar(self, qp): width = self.rect().width() height = self.rect().height() col = QColor(135, 206, 235) pen = QPen(col, self._value) pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin) qp.setPen(pen) path = QPainterPath() star_center_x = width / 2 star_center_y = height / 2 if width < height: radius_outer = width * 0.35 else: radius_outer = height * 0.35 golden_ratio = (1 + np.sqrt(5)) / 2 radius_inner = radius_outer / (1 + golden_ratio) theta_start = np.pi / 2 theta_inc = (2 * np.pi) / 10 for n in range(11): theta = theta_start + (n * theta_inc) theta = np.mod(theta, 2 * np.pi) if np.mod(n, 2) == 0: x = radius_outer * np.cos(theta) y = radius_outer * np.sin(theta) else: x = radius_inner * np.cos(theta) y = radius_inner * np.sin(theta) x_adj = star_center_x - x y_adj = star_center_y - y + 3 if n == 0: path.moveTo(x_adj, y_adj) else: path.lineTo(x_adj, y_adj) qp.drawPath(path) class QtTriangle(QFrame): valueChanged = Signal(int) def __init__( self, parent: QWidget = None, value: int = 1, min_value: int = 1, max_value: int = 10, ): super().__init__(parent) self._max_value = max_value self._min_value = min_value self._value = value def mousePressEvent(self, event): perc = event.pos().x() / self.rect().width() value = ((self._max_value - self._min_value) * perc) + self._min_value self.setValue(value) def paintEvent(self, e): qp = QPainter() qp.begin(self) self.drawTriangle(qp) perc = (self._value - self._min_value) / ( self._max_value - self._min_value ) self.drawLine(qp, self.rect().width() * perc) qp.end() def sizeHint(self): return QSize(75, 30) def minimumSizeHint(self): return QSize(75, 30) def drawTriangle(self, qp): width = self.rect().width() height = self.rect().height() col = QColor(135, 206, 235) qp.setPen(QPen(col, 1)) qp.setBrush(col) path = QPainterPath() height = 10 path.moveTo(0, height) path.lineTo(width, height) path.lineTo(width, 0) path.closeSubpath() qp.drawPath(path) def value(self): return self._value def setValue(self, value): self._value = value self.update() def minimum(self): return self._min_value
BSD 3-Clause New or Revised License
aoldoni/tetre
lib/tetre/graph_processing_children.py
Obj.remove_tags
python
def remove_tags(self, root, node_set, spacy_tree): is_applied = False node_set = set(node_set) - self.tags_to_be_removed for child in spacy_tree.children: if child.dep_ in self.tags_to_be_removed: is_applied = True child.no_follow = True return root, node_set, spacy_tree, is_applied
1) Consider the following sentence: "2 Related work Learning to rank has been a promising research area which continuously improves web search relevance (Burges et al." In this case, the dependency parser puts not the action the improves something as a parent of the word "improves" in the in the tree, and adds to it the relcl relation. This method adjusts the tree, bringing the node above under "improves". 2) Now consider the following sentence: "Evaluation on the ACE 2003 corpus shows that, compared with a baseline coreference resolution system of no explicit anaphoricity determination, their method improves the performance by 2.8, 2.2 and 4.5 to 54.5, 64.0 and 60.8 in F1-measure (due to the gain in precision) on the NWIRE, NPAPER and BNEWS domains, respectively, via careful determination of an anaphoricity threshold with proper constraint-based representation and global optimization." "Adding latent states to the smoothing model further improves the POS tagging accuracy (Huang and Yates, 2012)." In this case, the "appos" relation prints further information on the noun that is part of the "obj" node. http://universaldependencies.org/u/dep/appos.html - one can remove it as in all observed cases the extra information wasn't really relevant (wither it were citations, or long subsequent clauses) Args: root: The head of the NLTK tree. node_set: The nodes of the NLTK tree. spacy_tree: The TreeNode object, rooted at the child node. Returns: root: The modified head of the NLTK tree. node_set: The modified nodes of the NLTK tree. spacy_tree: The modified TreeNode object. is_applied: A boolean marking if the rule was applied or not.
https://github.com/aoldoni/tetre/blob/a8b07aa47a9adf7dce46dff96e20be63a761e9f7/lib/tetre/graph_processing_children.py#L160-L204
from tetre.rule_applier import * from tree_utils import find_in_spacynode class Children(RuleApplier): def __init__(self): RuleApplier.__init__(self) self.tags_to_be_removed = {'det', ' ', ''} def bring_grandchild_prep_or_relcl_up_as_child(self, root, node_set, spacy_tree): bring_up = [ ("relcl", "", "mod"), ("acl", "", "mod"), ("advcl", "", "mod"), ("prep", "by", "prep"), ("prep", "to", "prep"), ("prep", "for", "prep"), ("prep", "with", "prep"), ("prep", "whereby", "prep"), ] is_applied = False node = spacy_tree head = spacy_tree.head for dep_, orth_, dep_new_ in bring_up: changed = True while changed: changed = False prep = find_in_spacynode(node, dep_, orth_) if not prep: break prep_head = prep.head.children[:] for i in range(0, len(prep_head)): if prep.dep_ in prep_head[i].dep_ and prep.orth_ == prep_head[i].orth_ and prep.idx == prep_head[i].idx: is_applied = True prep.head.children.pop(i) head.children.append(prep) prep.head = head prep.dep_ = dep_new_ node_set = list(node_set) node_set.append(dep_new_) changed = True break return root, node_set, spacy_tree, is_applied class Obj(Children): def __init__(self): Children.__init__(self) return @RuleApplier.register_function def remove_duplicates(self, root, node_set, spacy_tree): return root, set(node_set), spacy_tree, False @RuleApplier.register_function
MIT License
mukund109/word-mesh
wordmesh/utils.py
PlotlyVisualizer.__init__
python
def __init__(self, words, fontsizes_norm, height, width, filename='temp-plot.html', title=None, textcolors='white', hovertext=None, axis_visible=False, bg_color='black', title_fontcolor='white', title_fontsize='auto', title_font_family='Courier New, monospace', bb_padding=0.08, boundary_padding_factor=1.1): self.words = words self.fontsizes_norm = fontsizes_norm self.height = height self.width = width self.title = title self.textcolors = textcolors self.hovertext = hovertext self.axis_visible = axis_visible self.bg_color = bg_color self.title_fontcolor = title_fontcolor self.title_fontsize = title_fontsize self.title_font_family = title_font_family self.padding = bb_padding self.boundary_padding = boundary_padding_factor self.bounding_box_dimensions, self.real_fontsizes = self.get_bb_dimensions()
Parameters ----------
https://github.com/mukund109/word-mesh/blob/2be945d988d661bd51afa6bd646f944c9c5d202a/wordmesh/utils.py#L19-L44
import numpy as np import plotly.offline as py import plotly.graph_objs as go PLOTLY_FONTSIZE_BBW = 0.6 PLOTLY_FONTSIZE_BBH = 0.972+0.088 class PlotlyVisualizer():
MIT License
datadotworld/data.world-py
datadotworld/client/api.py
RestApiClient.get_user_data
python
def get_user_data(self): try: return self._user_api.get_user_data().to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
Retrieve data for authenticated user :returns: User data, with all attributes :rtype: dict :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> user_data = api_client.get_user_data() # doctest: +SKIP >>> user_data[display_name] # doctest: +SKIP 'Name User'
https://github.com/datadotworld/data.world-py/blob/7e5f474b655f4f0c88cc6862353e4d52c0e0bb31/datadotworld/client/api.py#L501-L519
from __future__ import absolute_import, division import functools import glob import json import os import shutil import uuid import zipfile from os import path import requests import six from datadotworld.client import _swagger from datadotworld.client.content_negotiating_api_client import ( ContentNegotiatingApiClient ) from datadotworld.util import parse_dataset_key, _user_agent from datadotworld.hosts import API_HOST, DOWNLOAD_HOST class RestApiClient(object): def __init__(self, config): self._config = config self._host = "{}/v0".format(API_HOST) swagger_client = _swagger.ApiClient( host=self._host, header_name='Authorization', header_value='Bearer {}'.format(self._config.auth_token)) swagger_client.user_agent = _user_agent() self._build_api_client = functools.partial( ContentNegotiatingApiClient, host=self._host, header_name='Authorization', header_value='Bearer {}'.format(self._config.auth_token), user_agent=_user_agent()) self._datasets_api = _swagger.DatasetsApi(swagger_client) self._user_api = _swagger.UserApi(swagger_client) self._streams_api = _swagger.StreamsApi(swagger_client) self._projects_api = _swagger.ProjectsApi(swagger_client) self._insights_api = _swagger.InsightsApi(swagger_client) self._files_api = _swagger.FilesApi(swagger_client) self._queries_api = _swagger.QueriesApi(swagger_client) self._search_api = _swagger.SearchApi(swagger_client) self._tables_api = _swagger.TablesApi(swagger_client) self._connections_api = _swagger.ConnectionsApi(swagger_client) def get_dataset(self, dataset_key): try: return self._datasets_api.get_dataset( *(parse_dataset_key(dataset_key))).to_dict() except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def create_dataset(self, owner_id, **kwargs): request = self.__build_dataset_obj( lambda: _swagger.DatasetCreateRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility')), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest( url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) try: (_, _, headers) = self._datasets_api.create_dataset_with_http_info( owner_id, request, _return_http_data_only=False) if 'Location' in headers: return headers['Location'] except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def update_dataset(self, dataset_key, **kwargs): request = self.__build_dataset_obj( lambda: _swagger.DatasetPatchRequest(), lambda name, url, expand_archive, description, labels: _swagger.FileCreateOrUpdateRequest( name=name, source=_swagger.FileSourceCreateOrUpdateRequest( url=url, expand_archive=expand_archive) if url is not None else None, description=description, labels=labels), kwargs) owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.patch_dataset(owner_id, dataset_id, request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def replace_dataset(self, dataset_key, **kwargs): request = self.__build_dataset_obj( lambda: _swagger.DatasetPutRequest( title=kwargs.get('title'), visibility=kwargs.get('visibility') ), lambda name, url, expand_archive, description, labels: _swagger.FileCreateRequest( name=name, source=_swagger.FileSourceCreateRequest( url=url, expand_archive=expand_archive), description=description, labels=labels), kwargs) owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.replace_dataset(owner_id, dataset_id, request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def delete_dataset(self, dataset_key): owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.delete_dataset(owner_id, dataset_id) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def add_files_via_url(self, dataset_key, files={}): file_requests = [_swagger.FileCreateOrUpdateRequest( name=file_name, source=_swagger.FileSourceCreateOrUpdateRequest( url=file_info['url'], expand_archive=file_info.get('expand_archive', False)), description=file_info.get('description'), labels=file_info.get('labels'), ) for file_name, file_info in files.items()] owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._files_api.add_files_by_source( owner_id, dataset_id, _swagger.FileBatchUpdateRequest(files=file_requests)) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def sync_files(self, dataset_key): try: self._files_api.sync(*(parse_dataset_key(dataset_key))) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def upload_files(self, dataset_key, files, files_metadata={}, **kwargs): owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._files_api.upload_files(owner_id, dataset_id, files, **kwargs) if files_metadata: self.update_dataset(dataset_key, files=files_metadata) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def upload_file(self, dataset_key, name, file_metadata={}, **kwargs): owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._files_api.upload_file(owner_id, dataset_id, name, **kwargs) if file_metadata: self.update_dataset(dataset_key, files=file_metadata) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def delete_files(self, dataset_key, names): owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._files_api.delete_files_and_sync_sources( owner_id, dataset_id, names) except _swagger.rest.ApiException as e: raise RestApiError(cause=e) def download_datapackage(self, dataset_key, dest_dir): if path.isdir(dest_dir): raise ValueError('dest_dir must be a new directory, ' 'but {} already exists'.format(dest_dir)) owner_id, dataset_id = parse_dataset_key(dataset_key) url = "{0}/datapackage/{1}/{2}".format( DOWNLOAD_HOST, owner_id, dataset_id) headers = { 'User-Agent': _user_agent(), 'Authorization': 'Bearer {0}'.format(self._config.auth_token) } try: response = requests.get(url, headers=headers, stream=True) response.raise_for_status() except requests.RequestException as e: raise RestApiError(cause=e) unzip_dir = path.join(self._config.tmp_dir, str(uuid.uuid4())) os.makedirs(unzip_dir) zip_file = path.join(unzip_dir, 'dataset.zip') with open(zip_file, 'wb') as f: for data in response.iter_content(chunk_size=4096): f.write(data) zip_obj = zipfile.ZipFile(zip_file) zip_obj.extractall(path=unzip_dir) unzipped_descriptor = glob.glob( '{}/**/datapackage.json'.format(unzip_dir)) if not unzipped_descriptor: raise RuntimeError( 'Zip file did not contain a datapackage manifest.') unzipped_dir = path.dirname(unzipped_descriptor[0]) shutil.move(unzipped_dir, dest_dir) shutil.rmtree(unzip_dir, ignore_errors=True) return path.join(dest_dir, 'datapackage.json')
Apache License 2.0
engineering-course/lip_jppnet
utils/utils.py
save
python
def save(saver, sess, logdir, step): if not os.path.exists(logdir): os.makedirs(logdir) model_name = 'model.ckpt' checkpoint_path = os.path.join(logdir, model_name) if not os.path.exists(logdir): os.makedirs(logdir) saver.save(sess, checkpoint_path, global_step=step) print('The checkpoint has been created.')
Save weights. Args: saver: TensorFlow Saver object. sess: TensorFlow session. logdir: path to the snapshots directory. step: current training step.
https://github.com/engineering-course/lip_jppnet/blob/1899e8d18656312b6f9cea1c908205dcdf6c95e5/utils/utils.py#L84-L100
from PIL import Image import numpy as np import tensorflow as tf import os import scipy.misc from scipy.stats import multivariate_normal import matplotlib.pyplot as plt n_classes = 20 label_colours = [(0,0,0) ,(128,0,0),(255,0,0),(0,85,0),(170,0,51),(255,85,0) ,(0,0,85),(0,119,221),(85,85,0),(0,85,85),(85,51,0) ,(52,86,128),(0,128,0),(0,0,255),(51,170,221),(0,255,255) ,(85,255,170),(170,255,85),(255,255,0),(255,170,0)] IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) def decode_labels(mask, num_images=1, num_classes=20): n, h, w, c = mask.shape assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) for i in range(num_images): img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) pixels = img.load() for j_, j in enumerate(mask[i, :, :, 0]): for k_, k in enumerate(j): if k < n_classes: pixels[k_,j_] = label_colours[k] outputs[i] = np.array(img) return outputs def prepare_label(input_batch, new_size, one_hot=True): with tf.name_scope('label_encode'): input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) if one_hot: input_batch = tf.one_hot(input_batch, depth=n_classes) return input_batch def inv_preprocess(imgs, num_images): n, h, w, c = imgs.shape assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) outputs = np.zeros((num_images, h, w, c), dtype=np.uint8) for i in range(num_images): outputs[i] = (imgs[i] + IMG_MEAN)[:, :, ::-1].astype(np.uint8) return outputs
MIT License
rajammanabrolu/worldgeneration
evennia-engine/evennia/evennia/contrib/turnbattle/tb_magic.py
is_in_combat
python
def is_in_combat(character): return bool(character.db.combat_turnhandler)
Returns true if the given character is in combat. Args: character (obj): Character to determine if is in combat or not Returns: (bool): True if in combat or False if not in combat
https://github.com/rajammanabrolu/worldgeneration/blob/5e97df013399e1a401d0a7ec184c4b9eb3100edd/evennia-engine/evennia/evennia/contrib/turnbattle/tb_magic.py#L265-L275
from random import randint from evennia import DefaultCharacter, Command, default_cmds, DefaultScript, create_object from evennia.commands.default.muxcommand import MuxCommand from evennia.commands.default.help import CmdHelp TURN_TIMEOUT = 30 ACTIONS_PER_TURN = 1 def roll_init(character): return randint(1, 1000) def get_attack(attacker, defender): attack_value = randint(1, 100) return attack_value def get_defense(attacker, defender): defense_value = 50 return defense_value def get_damage(attacker, defender): damage_value = randint(15, 25) return damage_value def apply_damage(defender, damage): defender.db.hp -= damage if defender.db.hp <= 0: defender.db.hp = 0 def at_defeat(defeated): defeated.location.msg_contents("%s has been defeated!" % defeated) def resolve_attack(attacker, defender, attack_value=None, defense_value=None): if not attack_value: attack_value = get_attack(attacker, defender) if not defense_value: defense_value = get_defense(attacker, defender) if attack_value < defense_value: attacker.location.msg_contents("%s's attack misses %s!" % (attacker, defender)) else: damage_value = get_damage(attacker, defender) attacker.location.msg_contents( "%s hits %s for %i damage!" % (attacker, defender, damage_value) ) apply_damage(defender, damage_value) if defender.db.hp <= 0: at_defeat(defender) def combat_cleanup(character): for attr in character.attributes.all(): if attr.key[:7] == "combat_": character.attributes.remove(key=attr.key)
MIT License
hipchat/curler
curler/twisted_gears/client.py
GearmanWorker.doJob
python
def doJob(self): return self.getJob().addCallback(self._finishJob)
Do a single job
https://github.com/hipchat/curler/blob/b22bf79ecc4c1985038e0ba183ca7125be4b8ac0/curler/twisted_gears/client.py#L164-L166
import sys import struct from collections import deque from twisted.internet import defer from twisted.protocols import stateful from twisted.python import log from constants import * __all__ = ['GearmanProtocol', 'GearmanWorker', 'GearmanClient'] class GearmanProtocol(stateful.StatefulProtocol): unsolicited = [ WORK_COMPLETE, WORK_FAIL, NOOP, WORK_DATA, WORK_WARNING, WORK_EXCEPTION ] def makeConnection(self, transport): self.receivingCommand = 0 self.deferreds = deque() self.unsolicited_handlers = set() stateful.StatefulProtocol.makeConnection(self, transport) def send_raw(self, cmd, data=''): self.transport.writeSequence([REQ_MAGIC, struct.pack(">II", cmd, len(data)), data]) def send(self, cmd, data=''): self.send_raw(cmd, data) d = defer.Deferred() self.deferreds.append(d) return d def getInitialState(self): return self._headerReceived, HEADER_LEN def connectionLost(self, reason): for d in list(self.deferreds): d.errback(reason) self.deferreds.clear() def _headerReceived(self, header): if header[:4] != RES_MAGIC: log.msg("Invalid header magic returned, failing.") self.transport.loseConnection() return cmd, size = struct.unpack(">II", header[4:]) self.receivingCommand = cmd return self._completed, size def _completed(self, data): if self.receivingCommand in self.unsolicited: self._unsolicited(self.receivingCommand, data) else: d = self.deferreds.popleft() d.callback((self.receivingCommand, data)) self.receivingCommand = 0 return self._headerReceived, HEADER_LEN def _unsolicited(self, cmd, data): for cb in self.unsolicited_handlers: cb(cmd, data) def register_unsolicited(self, cb): self.unsolicited_handlers.add(cb) def unregister_unsolicited(self, cb): self.unsolicited_handlers.discard(cb) def echo(self, data="hello"): return self.send(ECHO_REQ, data) class _GearmanJob(object): def __init__(self, raw_data): self.handle, self.function, self.data = raw_data.split("\0", 2) def __repr__(self): return "<GearmanJob %s func=%s with %d bytes of data>" % (self.handle, self.function, len(self.data)) class GearmanWorker(object): def __init__(self, protocol): self.protocol = protocol self.functions = {} self.sleeping = None self.protocol.register_unsolicited(self._unsolicited) def setId(self, client_id): self.protocol.send_raw(SET_CLIENT_ID, client_id) def registerFunction(self, name, func): self.functions[name] = func self.protocol.send_raw(CAN_DO, name) def _send_job_res(self, cmd, job, data=''): self.protocol.send_raw(cmd, job.handle + "\0" + data) def _sleep(self): if not self.sleeping: self.sleeping = defer.Deferred() self.protocol.send_raw(PRE_SLEEP) return self.sleeping def _unsolicited(self, cmd, data): assert cmd == NOOP if self.sleeping: self.sleeping.callback(None) self.sleeping = None @defer.inlineCallbacks def getJob(self): if self.sleeping: yield self._sleep() stuff = yield self.protocol.send(GRAB_JOB) while stuff[0] == NO_JOB: yield self._sleep() stuff = yield self.protocol.send(GRAB_JOB) defer.returnValue(_GearmanJob(stuff[1])) @defer.inlineCallbacks def _finishJob(self, job): assert job f = self.functions[job.function] assert f try: rv = yield f(job) if rv is None: rv = "" self._send_job_res(WORK_COMPLETE, job, rv) except: etype, emsg, bt = sys.exc_info() self._send_job_res(WORK_EXCEPTION, job, "%s(%s)" % (etype.__name__, emsg)) self._send_job_res(WORK_FAIL, job)
MIT License
xanaduai/strawberryfields
strawberryfields/backends/tfbackend/ops.py
beamsplitter
python
def beamsplitter( theta, phi, mode1, mode2, in_modes, cutoff, pure=True, batched=False, dtype=tf.complex64 ): theta = tf.cast(theta, dtype) phi = tf.cast(phi, dtype) matrix = beamsplitter_matrix(theta, phi, cutoff, batched, dtype) output = two_mode_gate(matrix, mode1, mode2, in_modes, pure, batched) return output
returns beamsplitter unitary matrix on specified input modes
https://github.com/xanaduai/strawberryfields/blob/c1eed81a93419cb9c28a6ca205925691063722ce/strawberryfields/backends/tfbackend/ops.py#L825-L833
from string import ascii_lowercase as indices import tensorflow as tf import numpy as np from scipy.special import factorial from thewalrus.fock_gradients import displacement as displacement_tw from thewalrus.fock_gradients import grad_displacement as grad_displacement_tw from thewalrus.fock_gradients import squeezing as squeezing_tw from thewalrus.fock_gradients import grad_squeezing as grad_squeezing_tw from thewalrus.fock_gradients import beamsplitter as beamsplitter_tw from thewalrus.fock_gradients import grad_beamsplitter as grad_beamsplitter_tw from thewalrus.fock_gradients import mzgate as mzgate_tw from thewalrus.fock_gradients import grad_mzgate as grad_mzgate_tw from thewalrus.fock_gradients import two_mode_squeezing as two_mode_squeezing_tw from thewalrus.fock_gradients import grad_two_mode_squeezing as grad_two_mode_squeezing_tw try: from tensorflow.python.ops.special_math_ops import _einsum_v1 tf.einsum = _einsum_v1 except ImportError: pass max_num_indices = len(indices) def _numer_safe_power(base, exponent, dtype=tf.complex64): if exponent == 0: return tf.ones_like(base, dtype) return base ** exponent def mix(pure_state, batched=False): if not batched: pure_state = tf.expand_dims(pure_state, 0) batch_offset = 1 num_modes = len(pure_state.shape) - batch_offset max_num = (max_num_indices - batch_offset) // 2 if num_modes > max_num: raise ValueError( "Converting state from pure to mixed currently only supported for {} modes.".format( max_num ) ) batch_index = indices[:batch_offset] bra_indices = indices[batch_offset : batch_offset + num_modes] ket_indices = indices[batch_offset + num_modes : batch_offset + 2 * num_modes] eqn_lhs = batch_index + bra_indices + "," + batch_index + ket_indices eqn_rhs = "".join(bdx + kdx for bdx, kdx in zip(bra_indices, ket_indices)) eqn = eqn_lhs + "->" + batch_index + eqn_rhs mixed_state = tf.einsum(eqn, pure_state, tf.math.conj(pure_state)) if not batched: mixed_state = tf.squeeze(mixed_state, 0) return mixed_state def batchify_indices(idxs, batch_size): return [(bdx,) + idxs[i] for bdx in range(batch_size) for i in range(len(idxs))] def unravel_index(ind, tensor_shape): ind = tf.expand_dims(tf.cast(ind, tf.int64), 0) tensor_shape = tf.expand_dims(tf.cast(tensor_shape, tf.int64), 1) strides = tf.math.cumprod(tensor_shape, reverse=True) strides_shifted = tf.math.cumprod(tensor_shape, exclusive=True, reverse=True) unraveled_coords = (ind % strides) // strides_shifted return tf.transpose(unraveled_coords) def squeezed_vacuum_vector(r, theta, cutoff, batched=False, eps=1e-32, dtype=tf.complex64): if batched: batch_size = r.shape[0] r = tf.cast(r, dtype) theta = tf.cast(theta, dtype) c1 = tf.cast( tf.stack( [ tf.sqrt(1 / tf.cosh(r)) * np.sqrt(factorial(k)) / factorial(k / 2.0) for k in range(0, cutoff, 2) ], axis=-1, ), dtype, ) c2 = tf.stack( [ (-0.5 * tf.exp(1j * theta) * tf.cast(tf.tanh(r + eps), dtype)) ** (k / 2.0) for k in range(0, cutoff, 2) ], axis=-1, ) even_coeffs = c1 * c2 ind = [(k,) for k in np.arange(0, cutoff, 2)] shape = [cutoff] if batched: ind = batchify_indices(ind, batch_size) shape = [batch_size] + shape output = tf.scatter_nd(ind, tf.reshape(even_coeffs, [-1]), shape) return output @tf.custom_gradient def single_squeezing_matrix(r, phi, cutoff, dtype=tf.complex64.as_numpy_dtype): r = r.numpy() phi = phi.numpy() gate = squeezing_tw(r, phi, cutoff, dtype) def grad(dy): Dr, Dphi = grad_squeezing_tw(gate, r, phi) grad_r = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dr))) grad_phi = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dphi))) return grad_r, grad_phi, None return gate, grad def squeezer_matrix(r, phi, cutoff, batched=False, dtype=tf.complex64): r = tf.cast(r, dtype) phi = tf.cast(phi, dtype) if batched: return tf.stack( [ single_squeezing_matrix(r_, phi_, cutoff, dtype=dtype.as_numpy_dtype) for r_, phi_ in tf.transpose([r, phi]) ] ) return single_squeezing_matrix(r, phi, cutoff, dtype=dtype.as_numpy_dtype) def phase_shifter_matrix(theta, cutoff, batched=False, dtype=tf.complex64): if batched: batch_size = theta.shape[0] theta = tf.cast(theta, dtype) shape = [cutoff, cutoff] if batched: shape = [batch_size] + shape zero_matrix = tf.zeros(shape=shape, dtype=dtype) diag = [tf.exp(1j * theta * k) for k in np.arange(cutoff, dtype=np.complex64)] if batched: diag = tf.stack(diag, axis=1) diag_matrix = tf.linalg.set_diag(zero_matrix, diag) return diag_matrix def kerr_interaction_matrix(kappa, cutoff, batched=False): coeffs = [tf.exp(1j * kappa * n ** 2) for n in range(cutoff)] if batched: coeffs = tf.stack(coeffs, axis=1) output = tf.linalg.diag(coeffs) return output def cross_kerr_interaction_matrix(kappa, cutoff, batched=False): coeffs = [tf.exp(1j * kappa * n1 * n2) for n1 in range(cutoff) for n2 in range(cutoff)] if batched: coeffs = tf.stack(coeffs, axis=1) output = tf.linalg.diag(coeffs) if batched: output = tf.transpose(tf.reshape(output, [-1] + [cutoff] * 4), [0, 1, 3, 2, 4]) else: output = tf.transpose(tf.reshape(output, [cutoff] * 4), [0, 2, 1, 3]) return output def cubic_phase_matrix( gamma, cutoff, hbar, batched=False, method="self_adjoint_eig", dtype=tf.complex64 ): a, ad = ladder_ops(cutoff) x = np.sqrt(hbar / 2) * tf.cast(a + ad, dtype) x3 = x @ x @ x if batched: x3 = tf.expand_dims(x3, 0) gamma = tf.reshape(gamma, [-1, 1, 1]) H0 = gamma / (3 * hbar) * x3 lambdas, U = tf.linalg.eigh(H0) transpose_list = [1, 0] if batched: transpose_list = [0, 2, 1] if method == "self_adjoint_eig": V = U @ tf.linalg.diag(tf.exp(1j * lambdas)) @ tf.math.conj(tf.transpose(U, transpose_list)) else: raise ValueError("'method' must be either 'self_adjoint_eig' or 'expm'.") return V def loss_superop(T, cutoff, batched=False, dtype=tf.complex64): if not batched: T = tf.expand_dims(T, 0) T = tf.reshape(T, [-1, 1, 1, 1, 1, 1]) T = tf.cast(T, tf.float64) rng = np.arange(cutoff) a = np.reshape(rng, [-1, cutoff, 1, 1, 1, 1]) b = np.reshape(rng, [-1, 1, cutoff, 1, 1, 1]) c = np.reshape(rng, [-1, 1, 1, cutoff, 1, 1]) d = np.reshape(rng, [-1, 1, 1, 1, cutoff, 1]) l = np.reshape(rng, [-1, 1, 1, 1, 1, cutoff]) a_mask = np.where(a == b - l, 1, 0) d_mask = np.where(d == c - l, 1, 0) mask = (l <= np.minimum(b, c)) * a_mask * d_mask exponent = tf.abs((b + c) / 2 - l) * mask T_numer = tf.pow((1 - T), l) * tf.pow(T, exponent) fact_numer = np.sqrt(factorial(b) * factorial(c)) fact_denom = np.sqrt(factorial(b - l) * factorial(c - l)) * factorial(l) fact_denom_masked = np.where(fact_denom > 0, fact_denom, 1) factors = mask * fact_numer / fact_denom_masked l_terms = T_numer * factors output = tf.cast(tf.reduce_sum(l_terms, -1), dtype) if not batched: output = tf.squeeze(output, 0) return output @tf.custom_gradient def single_displacement_matrix(r, phi, cutoff, dtype=tf.complex64.as_numpy_dtype): r = r.numpy() phi = phi.numpy() gate = displacement_tw(r, phi, cutoff, dtype) def grad(dy): Dr, Dphi = grad_displacement_tw(gate, r, phi) grad_r = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dr))) grad_phi = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dphi))) return grad_r, grad_phi, None return gate, grad def displacement_matrix(r, phi, cutoff, batched=False, dtype=tf.complex64): r = tf.cast(r, dtype) phi = tf.cast(phi, dtype) if batched: return tf.stack( [ single_displacement_matrix(r_, phi_, cutoff, dtype=dtype.as_numpy_dtype) for r_, phi_ in tf.transpose([r, phi]) ] ) return single_displacement_matrix(r, phi, cutoff, dtype=dtype.as_numpy_dtype) @tf.custom_gradient def single_beamsplitter_matrix(theta, phi, cutoff, dtype=tf.complex64.as_numpy_dtype): theta = theta.numpy() phi = phi.numpy() gate = beamsplitter_tw(theta, phi, cutoff, dtype) gate = np.transpose(gate, [0, 2, 1, 3]) def grad(dy): Dtheta, Dphi = grad_beamsplitter_tw(np.transpose(gate, [0, 2, 1, 3]), theta, phi) Dtheta = np.transpose(Dtheta, [0, 2, 1, 3]) Dphi = np.transpose(Dphi, [0, 2, 1, 3]) grad_theta = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dtheta))) grad_phi = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dphi))) return grad_theta, grad_phi, None return gate, grad def beamsplitter_matrix(theta, phi, cutoff, batched=False, dtype=tf.complex64): theta = tf.cast(theta, dtype) phi = tf.cast(phi, dtype) if batched: return tf.stack( [ single_beamsplitter_matrix(theta_, phi_, cutoff, dtype=dtype.as_numpy_dtype) for theta_, phi_ in tf.transpose([theta, phi]) ] ) return tf.convert_to_tensor( single_beamsplitter_matrix(theta, phi, cutoff, dtype=dtype.as_numpy_dtype) ) @tf.custom_gradient def single_mzgate_matrix(phi_in, phi_ex, cutoff, dtype=tf.complex64.as_numpy_dtype): phi_in = phi_in.numpy() phi_ex = phi_ex.numpy() gate = mzgate_tw(phi_in, phi_ex, cutoff, dtype) gate = np.transpose(gate, [0, 2, 1, 3]) def grad(dy): Dtheta, Dphi = grad_mzgate_tw(np.transpose(gate, [0, 2, 1, 3]), phi_in, phi_ex) Dtheta = np.transpose(Dtheta, [0, 2, 1, 3]) Dphi = np.transpose(Dphi, [0, 2, 1, 3]) grad_theta = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dtheta))) grad_phi = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dphi))) return grad_theta, grad_phi, None return gate, grad def mzgate_matrix(phi_in, phi_ex, cutoff, batched=False, dtype=tf.complex64): phi_in = tf.cast(phi_in, dtype) phi_ex = tf.cast(phi_ex, dtype) if batched: return tf.stack( [ single_mzgate_matrix(phi_in_, phi_ex_, cutoff, dtype=dtype.as_numpy_dtype) for phi_in_, phi_ex_ in tf.transpose([phi_in, phi_ex]) ] ) return tf.convert_to_tensor( single_mzgate_matrix(phi_in, phi_ex, cutoff, dtype=dtype.as_numpy_dtype) ) @tf.custom_gradient def single_two_mode_squeezing_matrix(theta, phi, cutoff, dtype=tf.complex64.as_numpy_dtype): theta = theta.numpy() phi = phi.numpy() gate = two_mode_squeezing_tw(theta, phi, cutoff, dtype) gate = np.transpose(gate, [0, 2, 1, 3]) def grad(dy): Dr, Dphi = grad_two_mode_squeezing_tw(np.transpose(gate, [0, 2, 1, 3]), theta, phi) Dr = np.transpose(Dr, [0, 2, 1, 3]) Dphi = np.transpose(Dphi, [0, 2, 1, 3]) grad_r = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dr))) grad_phi = tf.math.real(tf.reduce_sum(dy * tf.math.conj(Dphi))) return grad_r, grad_phi, None return gate, grad def two_mode_squeezer_matrix(theta, phi, cutoff, batched=False, dtype=tf.complex64): theta = tf.cast(theta, dtype) phi = tf.cast(phi, dtype) if batched: return tf.stack( [ single_two_mode_squeezing_matrix(theta_, phi_, cutoff, dtype=dtype.as_numpy_dtype) for theta_, phi_ in tf.transpose([theta, phi]) ] ) return tf.convert_to_tensor( single_two_mode_squeezing_matrix(theta, phi, cutoff, dtype=dtype.as_numpy_dtype) ) def fock_state(n, cutoff, pure=True, batched=False, dtype=tf.complex64): if not isinstance(n, (np.ndarray, int)): raise ValueError("'n' is expected to be either an int or a numpy array") if batched: batch_size = n.shape[0] idxs = list(zip(range(batch_size), n)) values = [1.0] * batch_size shape = [batch_size, cutoff] else: idxs = [(n,)] values = [1.0] shape = [cutoff] fock_sparse = tf.scatter_nd(idxs, values, shape) fock = tf.cast(fock_sparse, dtype) if not pure: fock = mix(fock, batched) return fock def coherent_state(r, phi, cutoff, pure=True, batched=False, dtype=tf.complex64): alpha = tf.cast(r, dtype) * tf.exp(1j * tf.cast(phi, dtype)) coh = tf.stack( [ tf.cast(tf.exp(-0.5 * tf.abs(r) ** 2), dtype) * _numer_safe_power(alpha, n, dtype) / tf.cast(np.sqrt(factorial(n)), dtype) for n in range(cutoff) ], axis=-1, ) if not pure: coh = mix(coh, batched) return coh def squeezed_vacuum(r, theta, cutoff, pure=True, batched=False, dtype=tf.complex64): squeezed = squeezed_vacuum_vector(r, theta, cutoff, batched=batched, dtype=dtype) if not pure: squeezed = mix(squeezed, batched) return squeezed def displaced_squeezed( r_d, phi_d, r_s, phi_s, cutoff, pure=True, batched=False, eps=1e-12, dtype=tf.complex64 ): alpha = tf.cast(r_d, dtype) * tf.exp(1j * tf.cast(phi_d, dtype)) r_s = ( tf.cast(r_s, dtype) + eps ) phi_s = tf.cast(phi_s, dtype) phase = tf.exp(1j * phi_s) sinh = tf.sinh(r_s) cosh = tf.cosh(r_s) tanh = tf.tanh(r_s) gamma = alpha * cosh + tf.math.conj(alpha) * phase * sinh hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r_s)) prefactor = tf.expand_dims( tf.exp(-0.5 * alpha * tf.math.conj(alpha) - 0.5 * tf.math.conj(alpha) ** 2 * phase * tanh), -1, ) coeff = tf.stack( [ _numer_safe_power(0.5 * phase * tanh, n / 2.0, dtype) / tf.sqrt(factorial(n) * cosh) for n in range(cutoff) ], axis=-1, ) hermite_terms = tf.stack( [tf.cast(H(n, hermite_arg, dtype), dtype) for n in range(cutoff)], axis=-1 ) squeezed_coh = prefactor * coeff * hermite_terms if not pure: squeezed_coh = mix(squeezed_coh, batched) return squeezed_coh def thermal_state(nbar, cutoff, dtype=tf.complex64): nbar = tf.cast(nbar, dtype) coeffs = tf.stack( [ _numer_safe_power(nbar, n, dtype) / _numer_safe_power(nbar + 1, n + 1, dtype) for n in range(cutoff) ], axis=-1, ) thermal = tf.linalg.diag(coeffs) return thermal def single_mode_gate(matrix, mode, in_modes, pure=True, batched=False): if batched: batch_offset = 1 else: batch_offset = 0 batch_index = indices[:batch_offset] left_gate_str = indices[batch_offset : batch_offset + 2] num_indices = len(in_modes.shape) if pure: num_modes = num_indices - batch_offset mode_size = 1 else: right_gate_str = indices[batch_offset + 2 : batch_offset + 4] num_modes = (num_indices - batch_offset) // 2 mode_size = 2 max_len = len(indices) - 2 * mode_size - batch_offset if num_modes == 0: raise ValueError("'in_modes' must have at least one mode") if num_modes > max_len: raise NotImplementedError( "The max number of supported modes for this operation is currently {}".format(max_len) ) if mode < 0 or mode >= num_modes: raise ValueError("'mode' argument is not compatible with number of in_modes") other_modes_indices = indices[ batch_offset + 2 * mode_size : batch_offset + (1 + num_modes) * mode_size ] if pure: eqn_lhs = "{},{}{}{}{}".format( batch_index + left_gate_str, batch_index, other_modes_indices[: mode * mode_size], left_gate_str[1], other_modes_indices[mode * mode_size :], ) eqn_rhs = "".join( [ batch_index, other_modes_indices[: mode * mode_size], left_gate_str[0], other_modes_indices[mode * mode_size :], ] ) else: eqn_lhs = "{},{}{}{}{}{},{}".format( batch_index + left_gate_str, batch_index, other_modes_indices[: mode * mode_size], left_gate_str[1], right_gate_str[0], other_modes_indices[mode * mode_size :], batch_index + right_gate_str, ) eqn_rhs = "".join( [ batch_index, other_modes_indices[: mode * mode_size], left_gate_str[0], right_gate_str[1], other_modes_indices[mode * mode_size :], ] ) eqn = eqn_lhs + "->" + eqn_rhs einsum_inputs = [matrix, in_modes] if not pure: transposed_axis = [0, 2, 1] if batched else [1, 0] einsum_inputs.append(tf.transpose(tf.math.conj(matrix), transposed_axis)) output = tf.einsum(eqn, *einsum_inputs) return output def two_mode_gate(matrix, mode1, mode2, in_modes, pure=True, batched=False): if batched: batch_offset = 1 else: batch_offset = 0 batch_index = indices[:batch_offset] left_gate_str = indices[batch_offset : batch_offset + 4] num_indices = len(in_modes.shape) if pure: num_modes = num_indices - batch_offset mode_size = 1 else: right_gate_str = indices[batch_offset + 4 : batch_offset + 8] num_modes = (num_indices - batch_offset) // 2 mode_size = 2 max_len = (len(indices) - 4) // mode_size - batch_offset if num_modes == 0: raise ValueError("'in_modes' must have at least one mode") if num_modes > max_len: raise NotImplementedError( "The max number of supported modes for this operation is currently {}".format(max_len) ) min_mode = min(mode1, mode2) max_mode = max(mode1, mode2) if min_mode < 0 or max_mode >= num_modes or mode1 == mode2: raise ValueError("One or more mode numbers are incompatible") other_modes_indices = indices[ batch_offset + 4 * mode_size : batch_offset + 4 * mode_size + mode_size * (num_modes - 2) ] if mode1 == min_mode: lhs_min_mode_indices = left_gate_str[1] lhs_max_mode_indices = left_gate_str[3] rhs_min_mode_indices = left_gate_str[0] rhs_max_mode_indices = left_gate_str[2] else: lhs_min_mode_indices = left_gate_str[3] lhs_max_mode_indices = left_gate_str[1] rhs_min_mode_indices = left_gate_str[2] rhs_max_mode_indices = left_gate_str[0] if not pure: if mode1 == min_mode: lhs_min_mode_indices += right_gate_str[0] lhs_max_mode_indices += right_gate_str[2] rhs_min_mode_indices += right_gate_str[1] rhs_max_mode_indices += right_gate_str[3] else: lhs_min_mode_indices += right_gate_str[2] lhs_max_mode_indices += right_gate_str[0] rhs_min_mode_indices += right_gate_str[3] rhs_max_mode_indices += right_gate_str[1] eqn_lhs = "{},{}{}{}{}{}{}".format( batch_index + left_gate_str, batch_index, other_modes_indices[: min_mode * mode_size], lhs_min_mode_indices, other_modes_indices[min_mode * mode_size : (max_mode - 1) * mode_size], lhs_max_mode_indices, other_modes_indices[(max_mode - 1) * mode_size :], ) if not pure: eqn_lhs += "," + batch_index + right_gate_str eqn_rhs = "".join( [ batch_index, other_modes_indices[: min_mode * mode_size], rhs_min_mode_indices, other_modes_indices[min_mode * mode_size : (max_mode - 1) * mode_size], rhs_max_mode_indices, other_modes_indices[(max_mode - 1) * mode_size :], ] ) eqn = eqn_lhs + "->" + eqn_rhs einsum_inputs = [matrix, in_modes] if not pure: if batched: transpose_list = [0, 2, 1, 4, 3] else: transpose_list = [1, 0, 3, 2] einsum_inputs.append(tf.math.conj(tf.transpose(matrix, transpose_list))) output = tf.einsum(eqn, *einsum_inputs) return output def single_mode_superop(superop, mode, in_modes, pure=True, batched=False): if batched: batch_offset = 1 else: batch_offset = 0 max_len = (len(indices) - 2) // 2 - batch_offset if pure: num_modes = len(in_modes.shape) - batch_offset else: num_modes = (len(in_modes.shape) - batch_offset) // 2 if num_modes > max_len: raise NotImplementedError( "The max number of supported modes for this operation is currently {}".format(max_len) ) if pure: in_modes = mix(in_modes, batched) batch_index = indices[:batch_offset] superop_indices = indices[batch_offset : batch_offset + 4] state_indices = indices[batch_offset + 4 : batch_offset + 4 + 2 * num_modes] left_unchanged_indices = state_indices[: 2 * mode] right_unchanged_indices = state_indices[2 * mode : 2 * (num_modes - 1)] eqn_lhs = ",".join( [ batch_index + superop_indices, batch_index + left_unchanged_indices + superop_indices[1:3] + right_unchanged_indices, ] ) eqn_rhs = "".join( [ batch_index, left_unchanged_indices + superop_indices[0] + superop_indices[3] + right_unchanged_indices, ] ) eqn = "->".join([eqn_lhs, eqn_rhs]) new_state = tf.einsum(eqn, superop, in_modes) return new_state def phase_shifter(theta, mode, in_modes, cutoff, pure=True, batched=False, dtype=tf.complex64): matrix = phase_shifter_matrix(theta, cutoff, batched=batched, dtype=dtype) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output def displacement(r, phi, mode, in_modes, cutoff, pure=True, batched=False, dtype=tf.complex64): r = tf.cast(r, dtype) phi = tf.cast(phi, dtype) matrix = displacement_matrix(r, phi, cutoff, batched, dtype) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output def squeezer(r, theta, mode, in_modes, cutoff, pure=True, batched=False, dtype=tf.complex64): r = tf.cast(r, dtype) theta = tf.cast(theta, dtype) matrix = squeezer_matrix(r, theta, cutoff, batched, dtype) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output def kerr_interaction(kappa, mode, in_modes, cutoff, pure=True, batched=False): matrix = kerr_interaction_matrix(kappa, cutoff, batched) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output def cross_kerr_interaction(kappa, mode1, mode2, in_modes, cutoff, pure=True, batched=False): matrix = cross_kerr_interaction_matrix(kappa, cutoff, batched) output = two_mode_gate(matrix, mode1, mode2, in_modes, pure, batched) return output def cubic_phase( gamma, mode, in_modes, cutoff, hbar=2, pure=True, batched=False, method="self_adjoint_eig", dtype=tf.complex64, ): matrix = cubic_phase_matrix(gamma, cutoff, hbar, batched, method=method, dtype=dtype) output = single_mode_gate(matrix, mode, in_modes, pure, batched) return output
Apache License 2.0
pyglet/pyglet
pyglet/media/drivers/xaudio2/interface.py
XA2SourceVoice.cone_outside_volume
python
def cone_outside_volume(self): if self.is_emitter: return self._emitter.pCone.contents.OuterVolume else: return 0
The volume scaler of the sound beyond the outer cone.
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/media/drivers/xaudio2/interface.py#L529-L534
import weakref from collections import namedtuple, defaultdict import pyglet from pyglet.libs.win32.types import * from pyglet.util import debug_print from pyglet.media.devices import get_audio_device_manager from . import lib_xaudio2 as lib _debug = debug_print('debug_media') class XAudio2Driver: allow_3d = True processor = lib.XAUDIO2_DEFAULT_PROCESSOR category = lib.AudioCategory_GameEffects restart_on_error = True max_frequency_ratio = 2.0 def __init__(self): assert _debug('Constructing XAudio2Driver') self._listener = None self._xaudio2 = None self._dead = False self._emitting_voices = [] self._voice_pool = defaultdict(list) self._in_use = [] self._players = [] self._create_xa2() if self.restart_on_error: audio_devices = get_audio_device_manager() if audio_devices: assert _debug('Audio device instance found.') audio_devices.push_handlers(self) if audio_devices.get_default_output() is None: raise ImportError("No default audio device found, can not create driver.") pyglet.clock.schedule_interval_soft(self._check_state, 0.5) def _check_state(self, dt): if self._dead is True: if self._xaudio2: self._shutdown_xaudio2() else: if not self._xaudio2: self._create_xa2() for player in self._players: player.dispatch_event('on_driver_reset') self._players.clear() def on_default_changed(self, device): if device is None: assert _debug('Error: Default audio device was removed or went missing.') self._dead = True else: if self._dead: assert _debug('Warning: Default audio device added after going missing.') self._dead = False def _create_xa2(self, device_id=None): self._xaudio2 = lib.IXAudio2() try: lib.XAudio2Create(ctypes.byref(self._xaudio2), 0, self.processor) except OSError: raise ImportError("XAudio2 driver could not be initialized.") if _debug: debug = lib.XAUDIO2_DEBUG_CONFIGURATION() debug.LogThreadID = True debug.TraceMask = lib.XAUDIO2_LOG_ERRORS | lib.XAUDIO2_LOG_WARNINGS debug.BreakMask = lib.XAUDIO2_LOG_WARNINGS self._xaudio2.SetDebugConfiguration(ctypes.byref(debug), None) self._master_voice = lib.IXAudio2MasteringVoice() self._xaudio2.CreateMasteringVoice(byref(self._master_voice), lib.XAUDIO2_DEFAULT_CHANNELS, lib.XAUDIO2_DEFAULT_SAMPLERATE, 0, device_id, None, self.category) if self.allow_3d: self.enable_3d() @property def active_voices(self): return self._in_use @property def pooled_voices(self): return [voice for voices in self._voice_pool.values() for voice in voices] @property def all_voices(self): return self.active_voices + self.all_voices def clear_pool(self): for voice in self.pooled_voices: voice.destroy() for voice_key in self._voice_pool: self._voice_pool[voice_key].clear() def clear_active(self): for voice in self._in_use: voice.destroy() self._in_use.clear() def set_device(self, device): self._shutdown_xaudio2() self._create_xa2(device.id) for player in self._players: player.dispatch_event('on_driver_reset') self._players.clear() def _shutdown_xaudio2(self): for voice in self.active_voices: voice.player.on_driver_destroy() self._players.append(voice.player.player) self._delete_driver() def _delete_driver(self): if self._xaudio2: if self.allow_3d: pyglet.clock.unschedule(self._calculate_3d_sources) self.clear_pool() self.clear_active() self._xaudio2.StopEngine() self._xaudio2.Release() self._xaudio2 = None def enable_3d(self): channel_mask = DWORD() self._master_voice.GetChannelMask(byref(channel_mask)) self._x3d_handle = lib.X3DAUDIO_HANDLE() lib.X3DAudioInitialize(channel_mask.value, lib.X3DAUDIO_SPEED_OF_SOUND, self._x3d_handle) self._mvoice_details = lib.XAUDIO2_VOICE_DETAILS() self._master_voice.GetVoiceDetails(byref(self._mvoice_details)) matrix = (FLOAT * self._mvoice_details.InputChannels)() self._dsp_settings = lib.X3DAUDIO_DSP_SETTINGS() self._dsp_settings.SrcChannelCount = 1 self._dsp_settings.DstChannelCount = self._mvoice_details.InputChannels self._dsp_settings.pMatrixCoefficients = matrix pyglet.clock.schedule_interval_soft(self._calculate_3d_sources, 1 / 15.0) @property def volume(self): vol = c_float() self._master_voice.GetVolume(ctypes.byref(vol)) return vol.value @volume.setter def volume(self, value): self._master_voice.SetVolume(value, 0) def _calculate_3d_sources(self, dt): for source_voice in self._emitting_voices: self.apply3d(source_voice) self._xaudio2.CommitChanges(0) def _calculate3d(self, listener, emitter): lib.X3DAudioCalculate( self._x3d_handle, listener, emitter, lib.default_dsp_calculation, self._dsp_settings ) def _apply3d(self, voice, commit): voice.SetOutputMatrix(self._master_voice, 1, self._mvoice_details.InputChannels, self._dsp_settings.pMatrixCoefficients, commit) voice.SetFrequencyRatio(self._dsp_settings.DopplerFactor, commit) def apply3d(self, source_voice, commit=1): self._calculate3d(self._listener.listener, source_voice._emitter) self._apply3d(source_voice._voice, commit) def __del__(self): try: self._delete_driver() pyglet.clock.unschedule(self._check_state) except AttributeError: pass def get_performance(self): pf = lib.XAUDIO2_PERFORMANCE_DATA() self._xaudio2.GetPerformanceData(ctypes.byref(pf)) return pf def create_listener(self): assert self._listener is None, "You can only create one listener." self._listener = XAudio2Listener(self) return self._listener def get_source_voice(self, source, player): voice_key = (source.audio_format.channels, source.audio_format.sample_size) if len(self._voice_pool[voice_key]) > 0: source_voice = self._voice_pool[voice_key].pop(0) source_voice.acquired(player) else: source_voice = self._get_voice(source, player) if source_voice.is_emitter: self._emitting_voices.append(source_voice) self._in_use.append(source_voice) return source_voice def _create_new_voice(self, source, player): voice = lib.IXAudio2SourceVoice() wfx_format = self.create_wave_format(source.audio_format) callback = lib.XA2SourceCallback(player) self._xaudio2.CreateSourceVoice(ctypes.byref(voice), ctypes.byref(wfx_format), 0, self.max_frequency_ratio, callback, None, None) return voice, callback def _get_voice(self, source, player): voice, callback = self._create_new_voice(source, player) return XA2SourceVoice(voice, callback, source.audio_format) def return_voice(self, voice): voice.reset() voice_key = (voice.audio_format.channels, voice.audio_format.sample_size) self._voice_pool[voice_key].append(voice) if voice.is_emitter: self._emitting_voices.remove(voice) @staticmethod def create_buffer(audio_data): if type(audio_data.data) == bytes: data = (ctypes.c_char * audio_data.length)() ctypes.memmove(data, audio_data.data, audio_data.length) else: data = audio_data.data buff = lib.XAUDIO2_BUFFER() buff.AudioBytes = audio_data.length buff.pAudioData = data return buff @staticmethod def create_wave_format(audio_format): wfx = lib.WAVEFORMATEX() wfx.wFormatTag = lib.WAVE_FORMAT_PCM wfx.nChannels = audio_format.channels wfx.nSamplesPerSec = audio_format.sample_rate wfx.wBitsPerSample = audio_format.sample_size wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8 wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign return wfx class XA2SourceVoice: def __init__(self, voice, callback, audio_format): self._voice_state = lib.XAUDIO2_VOICE_STATE() self._voice = voice self._callback = callback self.audio_format = audio_format if audio_format is not None and audio_format.channels == 1: self._emitter = lib.X3DAUDIO_EMITTER() self._emitter.ChannelCount = audio_format.channels self._emitter.CurveDistanceScaler = 1.0 cone = lib.X3DAUDIO_CONE() cone.InnerVolume = 1.0 self._emitter.pCone = pointer(cone) self._emitter.pVolumeCurve = None else: self._emitter = None @property def player(self): return self._callback.xa2_player def delete(self): self._emitter = None self._voice.Stop(0, 0) self._voice.FlushSourceBuffers() self._voice = None self._callback.xa2_player = None def __del__(self): self.destroy() def destroy(self): self._emitter = None if self._voice is not None: try: self._voice.Stop(0, 0) self._voice.FlushSourceBuffers() self._voice.DestroyVoice() except TypeError: pass self._voice = None self._callback = None def acquired(self, player): self._callback.xa2_player = player def reset(self): if self._emitter is not None: self.position = (0, 0, 0) self._voice.Stop(0, 0) self._voice.FlushSourceBuffers() self._callback.xa2_player = None @property def buffers_queued(self): self._voice.GetState(ctypes.byref(self._voice_state), lib.XAUDIO2_VOICE_NOSAMPLESPLAYED) return self._voice_state.BuffersQueued @property def volume(self): vol = c_float() self._voice.GetVolume(ctypes.byref(vol)) return vol.value @volume.setter def volume(self, value): self._voice.SetVolume(value, 0) @property def is_emitter(self): return self._emitter is not None @property def position(self): if self.is_emitter: return self._emitter.Position.x, self._emitter.Position.y, self._emitter.Position.z else: return 0, 0, 0 @position.setter def position(self, position): if self.is_emitter: x, y, z = position self._emitter.Position.x = x self._emitter.Position.y = y self._emitter.Position.z = z @property def min_distance(self): if self.is_emitter: return self._emitter.CurveDistanceScaler else: return 0 @min_distance.setter def min_distance(self, value): if self.is_emitter: if self._emitter.CurveDistanceScaler != value: self._emitter.CurveDistanceScaler = min(value, lib.FLT_MAX) @property def frequency(self): value = c_float() self._voice.GetFrequencyRatio(byref(value)) return value.value @frequency.setter def frequency(self, value): if self.frequency == value: return self._voice.SetFrequencyRatio(value, 0) @property def cone_orientation(self): if self.is_emitter: return self._emitter.OrientFront.x, self._emitter.OrientFront.y, self._emitter.OrientFront.z else: return 0, 0, 0 @cone_orientation.setter def cone_orientation(self, value): if self.is_emitter: x, y, z = value self._emitter.OrientFront.x = x self._emitter.OrientFront.y = y self._emitter.OrientFront.z = z _ConeAngles = namedtuple('_ConeAngles', ['inside', 'outside']) @property def cone_angles(self): if self.is_emitter: return self._ConeAngles(self._emitter.pCone.contents.InnerAngle, self._emitter.pCone.contents.OuterAngle) else: return self._ConeAngles(0, 0) def set_cone_angles(self, inside, outside): if self.is_emitter: self._emitter.pCone.contents.InnerAngle = inside self._emitter.pCone.contents.OuterAngle = outside @property
BSD 3-Clause New or Revised License
sassoftware/python-pipefitter
pipefitter/estimator/regression.py
LogisticRegression.fit
python
def fit(self, table, *args, **kwargs): params = self.get_combined_params(*args, **kwargs) return self._get_super(table).fit(table, **params)
Fit function for logistic regression Parameters ---------- *args : dicts or two-element tuples or consecutive key/value pairs, optional The following types are allowed: * Dictionaries contain key/value pairs of parameters. * Two-element tuples must contain the name of the parameter in the first element and the value in the second element. * Consecutive key/value pairs are also allowed. **kwargs : keyword arguments, optional These keyword arguments are the same as on the constructor. Examples -------- >>> log = LogisticRegression(target='Origin', ... inputs=['MPG_City', 'MPG_Highway', 'Length', ... 'Weight', 'Type', 'Cylinders'], ... nominals = ['Type', 'Cylinders', 'Origin']) >>> model = log.fit(training_data) Returns ------- :class:`LogisticRegressionModel`
https://github.com/sassoftware/python-pipefitter/blob/d3199b72dfd66729753da50e9e15eb303361ee8a/pipefitter/estimator/regression.py#L96-L125
from __future__ import print_function, division, absolute_import, unicode_literals import functools from ..base import BaseEstimator, BaseModel from ..utils.params import (param_def, check_int, check_string, check_boolean, check_float, check_variable, check_variable_list) class LogisticRegression(BaseEstimator): param_defs = dict( intercept=param_def(True, check_boolean), max_effects=param_def(0, functools.partial(check_int, minimum=0)), selection=param_def('none', functools.partial(check_string, valid_values=['none', 'backward', 'forward', 'stepwise'])), sig_level=param_def(0.05, functools.partial(check_float, minimum=0.0, maximum=1.0)), criterion=param_def(None, functools.partial(check_string, allow_none=True, valid_values=['sl', 'aic', 'aicc', 'sbc'])), target=param_def(None, check_variable), nominals=param_def(None, check_variable_list), inputs=param_def(None, check_variable_list), ) def __init__(self, intercept=True, max_effects=0, selection='none', sig_level=0.05, criterion=None, target=None, nominals=None, inputs=None): BaseEstimator.__init__(self, intercept=intercept, max_effects=max_effects, selection=selection, sig_level=sig_level, criterion=criterion, target=target, nominals=nominals, inputs=inputs) if self.params['criterion'] == 'sl' and self.params['selection'] in ['backward', 'lasso']: raise ValueError("criterion='sl' is not valid with " "selection='backward' | 'lasso'")
Apache License 2.0
pybricks/pybricksdev
pybricksdev/ble/lwp3/messages.py
ErrorMessage.code
python
def code(self) -> ErrorCode: return ErrorCode(self._data[4])
Gets an error code describing the error.
https://github.com/pybricks/pybricksdev/blob/3a89ec32d0abc484b898ca4fd8b4b3079112aa70/pybricksdev/ble/lwp3/messages.py#L593-L595
import abc import struct from enum import IntEnum from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Type, Union, overload from .bytecodes import ( Feedback, MAX_NAME_SIZE, AlertKind, AlertOperation, AlertStatus, BatteryKind, BluetoothAddress, DataFormat, EndInfo, ErrorCode, HubAction, HubKind, HubProperty, HubPropertyOperation, HwNetCmd, HwNetExtFamily, HwNetFamily, HwNetSubfamily, InfoKind, IODeviceCapabilities, IODeviceKind, IODeviceMapping, IOEvent, LastNetwork, LWPVersion, MessageKind, ModeCapabilities, ModeInfoKind, PortID, PortInfoFormatSetupCommand, PortOutputCommand, StartInfo, Version, VirtualPortSetupCommand, ) from ...tools.checksum import xor_bytes class AbstractMessage(abc.ABC): @abc.abstractmethod def __init__(self, length: int, kind: MessageKind) -> None: super().__init__() if not isinstance(length, int): raise TypeError("length must be int") if not isinstance(kind, MessageKind): raise TypeError("kind must be MessageKind") self._data = bytearray(length) self._data[0] = length self._data[2] = kind def __bytes__(self) -> bytes: return bytes(self._data) @property def length(self) -> int: return self._data[0] @property def kind(self) -> MessageKind: return MessageKind(self._data[2]) def __repr__(self) -> str: return f"{self.__class__.__name__}()" class AbstractHubPropertyMessage(AbstractMessage): @abc.abstractmethod def __init__( self, length: int, prop: HubProperty, op: HubPropertyOperation ) -> None: super().__init__(length, MessageKind.HUB_PROPERTY) if not isinstance(prop, HubProperty): raise TypeError("prop must be HubProperty") if op not in _HUB_PROPERTY_OPS_MAP[prop]: raise ValueError(f"cannot perform {op} on {prop}") if not isinstance(op, HubPropertyOperation): raise TypeError("op must be HubPropertyOperation") self._data[3] = prop self._data[4] = op @property def prop(self) -> HubProperty: return HubProperty(self._data[3]) @property def op(self) -> HubPropertyOperation: return HubPropertyOperation(self._data[4]) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.prop)})" class _HubPropertyType(NamedTuple): type: type fmt: str max_size: Optional[int] = None _HUB_PROPERTY_TYPE_MAP = { HubProperty.NAME: _HubPropertyType(str, "s", MAX_NAME_SIZE), HubProperty.BUTTON: _HubPropertyType(bool, "?"), HubProperty.FW_VERSION: _HubPropertyType(Version, "i"), HubProperty.HW_VERSION: _HubPropertyType(Version, "i"), HubProperty.RSSI: _HubPropertyType(int, "b"), HubProperty.BATTERY_VOLTAGE: _HubPropertyType(int, "B"), HubProperty.BATTERY_KIND: _HubPropertyType(BatteryKind, "B"), HubProperty.MFG_NAME: _HubPropertyType(str, "s", 15), HubProperty.RADIO_FW_VERSION: _HubPropertyType(str, "s", 15), HubProperty.LWP_VERSION: _HubPropertyType(LWPVersion, "H"), HubProperty.HUB_KIND: _HubPropertyType(HubKind, "B"), HubProperty.HW_NET_ID: _HubPropertyType(LastNetwork, "B"), HubProperty.BDADDR: _HubPropertyType(BluetoothAddress, "6s"), HubProperty.BOOTLOADER_BDADDR: _HubPropertyType(BluetoothAddress, "6s"), HubProperty.HW_NET_FAMILY: _HubPropertyType(HwNetFamily, "B"), HubProperty.VOLUME: _HubPropertyType(int, "B"), } Op = HubPropertyOperation _HUB_PROPERTY_OPS_MAP = { HubProperty.NAME: [ Op.SET, Op.ENABLE_UPDATES, Op.DISABLE_UPDATES, Op.RESET, Op.REQUEST_UPDATE, Op.UPDATE, ], HubProperty.BUTTON: [ Op.ENABLE_UPDATES, Op.DISABLE_UPDATES, Op.REQUEST_UPDATE, Op.UPDATE, ], HubProperty.FW_VERSION: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.HW_VERSION: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.RSSI: [ Op.ENABLE_UPDATES, Op.DISABLE_UPDATES, Op.REQUEST_UPDATE, Op.UPDATE, ], HubProperty.BATTERY_VOLTAGE: [ Op.ENABLE_UPDATES, Op.DISABLE_UPDATES, Op.REQUEST_UPDATE, Op.UPDATE, ], HubProperty.BATTERY_KIND: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.MFG_NAME: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.RADIO_FW_VERSION: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.LWP_VERSION: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.HUB_KIND: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.HW_NET_ID: [Op.SET, Op.RESET, Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.BDADDR: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.BOOTLOADER_BDADDR: [Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.HW_NET_FAMILY: [Op.SET, Op.REQUEST_UPDATE, Op.UPDATE], HubProperty.VOLUME: [ Op.SET, Op.ENABLE_UPDATES, Op.DISABLE_UPDATES, Op.RESET, Op.REQUEST_UPDATE, Op.UPDATE, ], } del Op class AbstractHubPropertyValueMessage(AbstractHubPropertyMessage): _MAX_VALUE_SIZE = 15 @abc.abstractmethod def __init__(self, prop: HubProperty, op: HubPropertyOperation, value: Any) -> None: super().__init__(5 + self._MAX_VALUE_SIZE, prop, op) meta = _HUB_PROPERTY_TYPE_MAP[self.prop] if not isinstance(value, meta.type): raise TypeError( f"expecting value of type {meta.type} but received {type(value)}" ) if meta.max_size is None: fmt = meta.fmt else: if isinstance(value, str): value = value.encode() if len(value) > meta.max_size: raise ValueError("length of value is too long") fmt = f"{len(value)}{meta.fmt}" self._data[0] = 5 + struct.calcsize(fmt) self._data = memoryview(self._data)[: self.length] struct.pack_into(fmt, self._data, 5, value) @property def value(self) -> Any: meta = _HUB_PROPERTY_TYPE_MAP[self.prop] if meta.max_size is None: fmt = meta.fmt else: fmt = f"{self.length - 5}{meta.fmt}" (result,) = struct.unpack_from(fmt, self._data, 5) if meta.type == str: return result.decode().strip("\0") return meta.type(result) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.prop)}, {repr(self.value)})" class HubPropertySet(AbstractHubPropertyValueMessage): def __init__(self, prop: HubProperty, value: Any) -> None: super().__init__(prop, HubPropertyOperation.SET, value) class HubPropertyEnableUpdates(AbstractHubPropertyMessage): def __init__(self, prop: HubProperty) -> None: super().__init__(5, prop, HubPropertyOperation.ENABLE_UPDATES) class HubPropertyDisableUpdates(AbstractHubPropertyMessage): def __init__(self, prop: HubProperty) -> None: super().__init__(5, prop, HubPropertyOperation.DISABLE_UPDATES) class HubPropertyReset(AbstractHubPropertyMessage): def __init__(self, prop: HubProperty) -> None: super().__init__(5, prop, HubPropertyOperation.RESET) class HubPropertyRequestUpdate(AbstractHubPropertyMessage): def __init__(self, prop: HubProperty) -> None: super().__init__(5, prop, HubPropertyOperation.REQUEST_UPDATE) class HubPropertyUpdate(AbstractHubPropertyValueMessage): def __init__(self, prop: HubProperty, value: Any) -> None: super().__init__(prop, HubPropertyOperation.UPDATE, value) class HubActionMessage(AbstractMessage): def __init__(self, action: HubAction) -> None: super().__init__(4, MessageKind.HUB_ACTION) self._data[3] = action @property def action(self) -> HubAction: return HubAction(self._data[3]) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.action)})" class AbstractHubAlertMessage(AbstractMessage): @abc.abstractmethod def __init__(self, length: int, alert: AlertKind, op: AlertOperation) -> None: super().__init__(length, MessageKind.HUB_ALERT) self._data[3] = alert self._data[4] = op @property def alert(self) -> AlertKind: return AlertKind(self._data[3]) @property def op(self) -> AlertOperation: return AlertOperation(self._data[4]) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.alert)})" class HubAlertEnableUpdatesMessage(AbstractHubAlertMessage): def __init__(self, alert: AlertKind) -> None: super().__init__(5, alert, AlertOperation.ENABLE_UPDATES) class HubAlertDisableUpdatesMessage(AbstractHubAlertMessage): def __init__(self, alert: AlertKind) -> None: super().__init__(5, alert, AlertOperation.DISABLE_UPDATES) class HubAlertRequestUpdateMessage(AbstractHubAlertMessage): def __init__(self, alert: AlertKind) -> None: super().__init__(5, alert, AlertOperation.REQUEST_UPDATE) class HubAlertUpdateMessage(AbstractHubAlertMessage): def __init__(self, alert: AlertKind, status: AlertStatus) -> None: super().__init__(6, alert, AlertOperation.UPDATE) self._data[5] = status @property def status(self) -> AlertStatus: return AlertStatus(self._data[5]) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.alert)}, {repr(self.status)})" class AbstractHubAttachedIOMessage(AbstractMessage): @abc.abstractmethod def __init__(self, length: int, port: PortID, event: IOEvent) -> None: super().__init__(length, MessageKind.HUB_ATTACHED_IO) self._data[3] = port self._data[4] = event @property def port(self) -> PortID: return PortID(self._data[3]) @property def event(self) -> IOEvent: return IOEvent(self._data[4]) class HubIODetachedMessage(AbstractHubAttachedIOMessage): def __init__(self, port: PortID) -> None: super().__init__(5, port, IOEvent.DETACHED) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.port)})" class HubIOAttachedMessage(AbstractHubAttachedIOMessage): def __init__( self, port: PortID, device: IODeviceKind, hw_ver: Version, fw_ver: Version ) -> None: super().__init__(15, port, IOEvent.ATTACHED) struct.pack_into("<Hii", self._data, 5, device, hw_ver, fw_ver) @property def device(self) -> IODeviceKind: (result,) = struct.unpack_from("<H", self._data, 5) return IODeviceKind(result) @property def hw_ver(self) -> Version: (result,) = struct.unpack_from("<i", self._data, 7) return Version(result) @property def fw_ver(self) -> Version: (result,) = struct.unpack_from("<i", self._data, 11) return Version(result) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.port)}, {repr(self.device)}, {repr(self.hw_ver)}, {repr(self.fw_ver)})" class HubIOAttachedVirtualMessage(AbstractHubAttachedIOMessage): def __init__( self, port: PortID, device: IODeviceKind, port_a: PortID, port_b: PortID ) -> None: super().__init__(9, port, IOEvent.ATTACHED_VIRTUAL) struct.pack_into("<HBB", self._data, 5, device, port_a, port_b) @property def device(self) -> IODeviceKind: (result,) = struct.unpack_from("<H", self._data, 5) return IODeviceKind(result) @property def port_a(self) -> Version: return PortID(self._data[7]) @property def port_b(self) -> Version: return PortID(self._data[8]) def __repr__(self) -> str: return f"{self.__class__.__name__}({repr(self.port)}, {repr(self.device)}, {repr(self.port_a)}, {repr(self.port_b)})" class ErrorMessage(AbstractMessage): def __init__(self, command: MessageKind, code: ErrorCode) -> None: super().__init__(5, MessageKind.ERROR) self._data[3] = command self._data[4] = code @property def command(self) -> MessageKind: return MessageKind(self._data[3]) @property
MIT License
opennetworkingfoundation/tapi
RI/flask_server/tapi_server/models/tapi_oam_oam_service.py
TapiOamOamService.__init__
python
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, name=None, uuid=None, layer_protocol_name=None, meg_level=None, direction=None, oam_profile=None, end_point=None, meg=None): self.openapi_types = { 'operational_state': TapiCommonOperationalState, 'lifecycle_state': TapiCommonLifecycleState, 'administrative_state': TapiCommonAdministrativeState, 'name': List[TapiCommonNameAndValue], 'uuid': str, 'layer_protocol_name': TapiCommonLayerProtocolName, 'meg_level': int, 'direction': TapiCommonForwardingDirection, 'oam_profile': TapiOamOamProfileRef, 'end_point': List[TapiOamOamServiceEndPoint], 'meg': TapiOamMegRef } self.attribute_map = { 'operational_state': 'operational-state', 'lifecycle_state': 'lifecycle-state', 'administrative_state': 'administrative-state', 'name': 'name', 'uuid': 'uuid', 'layer_protocol_name': 'layer-protocol-name', 'meg_level': 'meg-level', 'direction': 'direction', 'oam_profile': 'oam-profile', 'end_point': 'end-point', 'meg': 'meg' } self._operational_state = operational_state self._lifecycle_state = lifecycle_state self._administrative_state = administrative_state self._name = name self._uuid = uuid self._layer_protocol_name = layer_protocol_name self._meg_level = meg_level self._direction = direction self._oam_profile = oam_profile self._end_point = end_point self._meg = meg
TapiOamOamService - a model defined in OpenAPI :param operational_state: The operational_state of this TapiOamOamService. # noqa: E501 :type operational_state: TapiCommonOperationalState :param lifecycle_state: The lifecycle_state of this TapiOamOamService. # noqa: E501 :type lifecycle_state: TapiCommonLifecycleState :param administrative_state: The administrative_state of this TapiOamOamService. # noqa: E501 :type administrative_state: TapiCommonAdministrativeState :param name: The name of this TapiOamOamService. # noqa: E501 :type name: List[TapiCommonNameAndValue] :param uuid: The uuid of this TapiOamOamService. # noqa: E501 :type uuid: str :param layer_protocol_name: The layer_protocol_name of this TapiOamOamService. # noqa: E501 :type layer_protocol_name: TapiCommonLayerProtocolName :param meg_level: The meg_level of this TapiOamOamService. # noqa: E501 :type meg_level: int :param direction: The direction of this TapiOamOamService. # noqa: E501 :type direction: TapiCommonForwardingDirection :param oam_profile: The oam_profile of this TapiOamOamService. # noqa: E501 :type oam_profile: TapiOamOamProfileRef :param end_point: The end_point of this TapiOamOamService. # noqa: E501 :type end_point: List[TapiOamOamServiceEndPoint] :param meg: The meg of this TapiOamOamService. # noqa: E501 :type meg: TapiOamMegRef
https://github.com/opennetworkingfoundation/tapi/blob/1f3fd9483d5674552c5a31206c97399c8c151897/RI/flask_server/tapi_server/models/tapi_oam_oam_service.py#L30-L94
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from tapi_server.models.base_model_ import Model from tapi_server.models.tapi_common_admin_state_pac import TapiCommonAdminStatePac from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState from tapi_server.models.tapi_common_forwarding_direction import TapiCommonForwardingDirection from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState from tapi_server.models.tapi_oam_meg_ref import TapiOamMegRef from tapi_server.models.tapi_oam_oam_constraint import TapiOamOamConstraint from tapi_server.models.tapi_oam_oam_profile_ref import TapiOamOamProfileRef from tapi_server.models.tapi_oam_oam_service_end_point import TapiOamOamServiceEndPoint from tapi_server import util class TapiOamOamService(Model):
Apache License 2.0
liberai/nspm
gsoc/zheyuan/pipeline/paraphrase_questions.py
paraphrase_questions
python
def paraphrase_questions(tokenizer, device, model, sentence): sentence = sentence.replace("<A>", "XYZ") text = "paraphrase: " + sentence + " </s>" max_len = 256 encoding = tokenizer.encode_plus(text, pad_to_max_length=True, return_tensors="pt") input_ids, attention_masks = encoding["input_ids"].to(device), encoding["attention_mask"].to(device) beam_outputs = model.generate( input_ids=input_ids, attention_mask=attention_masks, do_sample=True, max_length=256, top_k=120, top_p=0.98, early_stopping=True, num_return_sequences=10 ) print("\nOriginal Question ::") print(sentence) print("\n") print("Paraphrased Questions :: ") final_outputs = [] for beam_output in beam_outputs: sent = tokenizer.decode(beam_output, skip_special_tokens=True, clean_up_tokenization_spaces=True) if sent.replace("?", " ?").lower() != sentence.lower() and sent.replace("?", " ?") not in final_outputs: if has_NNP(sent.replace("?", " ?"), count_NNP(sent.replace("?", " ?"))): sent = re.sub('XYZ', '<A>', sent, flags=re.IGNORECASE) final_outputs.append(sent.replace("?", " ?")) else: print("******************", sent.replace("?", " ?")) sentence = sentence.replace("XYZ", "<A>") return final_outputs
@param tokenizer: Tokenizer is in charge of preparing the inputs for a model @param device: Device the model will be run on @param model: The pre-trained model @param sentence: The sentence need to be templates @return: final_outputs: the candidates of templates questions
https://github.com/liberai/nspm/blob/cc352dbbda6751e8cf19769c9440c03e31687829/gsoc/zheyuan/pipeline/paraphrase_questions.py#L61-L101
import tensorflow_hub as hub import tensorflow as tf import zipfile import requests, zipfile, io import os import re import argparse import torch from transformers import T5ForConditionalGeneration,T5Tokenizer from constant import Constant from textual_similarity import similarities, minDistance, words_distance, tags_distance, has_NNP, count_NNP from bert_classifier import load_model, predict const = Constant() const.URL = "https://datascience-models-ramsri.s3.amazonaws.com/t5_paraphraser.zip" const.MODEL_DIR = "~/Downloads/model_save1/" def get_pretrained_model(zip_file_url): model_name = zip_file_url.split("/")[-1].replace(".zip", "") folder_path = './{}'.format(model_name) print('Getting pretained model {}'.format(model_name)) if not os.path.exists(folder_path): os.system("curl --output ./t5_paraphraser.zip https://datascience-models-ramsri.s3.amazonaws.com/t5_paraphraser.zip") os.system("unzip ./t5_paraphraser.zip -d ./t5_paraphraser") else: print("Folder available: ", folder_path) print('Finish {}'.format(model_name)) return folder_path def prepare_model(folder_path): model = T5ForConditionalGeneration.from_pretrained(folder_path) tokenizer = T5Tokenizer.from_pretrained('t5-base') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device ", device) model = model.to(device) return tokenizer, device, model def set_seed(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
MIT License
openstack/keystone
keystone/federation/backends/base.py
FederationDriverBase.list_mappings
python
def list_mappings(self): raise exception.NotImplemented()
List all mappings. :returns: list of mapping refs :rtype: list of dicts
https://github.com/openstack/keystone/blob/1e7ecca881a51144d61ae8026e1a77d6669997e2/keystone/federation/backends/base.py#L218-L225
import abc from keystone import exception class FederationDriverBase(object, metaclass=abc.ABCMeta): @abc.abstractmethod def create_idp(self, idp_id, idp): raise exception.NotImplemented() @abc.abstractmethod def delete_idp(self, idp_id): raise exception.NotImplemented() @abc.abstractmethod def get_idp(self, idp_id): raise exception.NotImplemented() @abc.abstractmethod def get_idp_from_remote_id(self, remote_id): raise exception.NotImplemented() @abc.abstractmethod def update_idp(self, idp_id, idp): raise exception.NotImplemented() @abc.abstractmethod def create_protocol(self, idp_id, protocol_id, protocol): raise exception.NotImplemented() @abc.abstractmethod def update_protocol(self, idp_id, protocol_id, protocol): raise exception.NotImplemented() @abc.abstractmethod def get_protocol(self, idp_id, protocol_id): raise exception.NotImplemented() @abc.abstractmethod def list_protocols(self, idp_id): raise exception.NotImplemented() @abc.abstractmethod def delete_protocol(self, idp_id, protocol_id): raise exception.NotImplemented() @abc.abstractmethod def create_mapping(self, mapping_id, mapping): raise exception.NotImplemented() @abc.abstractmethod def delete_mapping(self, mapping_id): raise exception.NotImplemented() @abc.abstractmethod def update_mapping(self, mapping_id, mapping_ref): raise exception.NotImplemented() @abc.abstractmethod
Apache License 2.0