repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
fangshi1991/zipline_chstock
zipline/pipeline/engine.py
SimplePipelineEngine._mask_and_dates_for_term
python
def _mask_and_dates_for_term(self, term, workspace, graph, dates): mask = term.mask offset = graph.extra_rows[mask] - graph.extra_rows[term] return workspace[mask][offset:], dates[offset:]
Load mask and mask row labels for term.
https://github.com/fangshi1991/zipline_chstock/blob/7911642780fa57f92e1705b9c0acaeb837b3d98f/zipline/pipeline/engine.py#L240-L246
from abc import ( ABCMeta, abstractmethod, ) from uuid import uuid4 from six import ( iteritems, with_metaclass, ) from numpy import array from pandas import ( DataFrame, date_range, MultiIndex, ) from toolz import groupby, juxt from toolz.curried.operator import getitem from zipline.lib.adjusted_array import ensure_ndarray from zipline.errors import NoFurtherDataError from zipline.utils.numpy_utils import repeat_first_axis, repeat_last_axis from zipline.utils.pandas_utils import explode from .term import AssetExists, LoadableTerm class PipelineEngine(with_metaclass(ABCMeta)): @abstractmethod def run_pipeline(self, pipeline, start_date, end_date): raise NotImplementedError("run_pipeline") class NoOpPipelineEngine(PipelineEngine): def run_pipeline(self, pipeline, start_date, end_date): return DataFrame( index=MultiIndex.from_product( [date_range(start=start_date, end=end_date, freq='D'), ()], ), columns=sorted(pipeline.columns.keys()), ) class SimplePipelineEngine(object): __slots__ = ( '_get_loader', '_calendar', '_finder', '_root_mask_term', '__weakref__', ) def __init__(self, get_loader, calendar, asset_finder): self._get_loader = get_loader self._calendar = calendar self._finder = asset_finder self._root_mask_term = AssetExists() def run_pipeline(self, pipeline, start_date, end_date): if end_date < start_date: raise ValueError( "start_date must be before or equal to end_date \n" "start_date=%s, end_date=%s" % (start_date, end_date) ) screen_name = uuid4().hex graph = pipeline.to_graph(screen_name, self._root_mask_term) extra_rows = graph.extra_rows[self._root_mask_term] root_mask = self._compute_root_mask(start_date, end_date, extra_rows) dates, assets, root_mask_values = explode(root_mask) outputs = self.compute_chunk( graph, dates, assets, initial_workspace={self._root_mask_term: root_mask_values}, ) out_dates = dates[extra_rows:] screen_values = outputs.pop(screen_name) return self._to_narrow(outputs, screen_values, out_dates, assets) def _compute_root_mask(self, start_date, end_date, extra_rows): calendar = self._calendar finder = self._finder start_idx, end_idx = self._calendar.slice_locs(start_date, end_date) if start_idx < extra_rows: raise NoFurtherDataError( msg="Insufficient data to compute Pipeline mask: " "start date was %s, " "earliest known date was %s, " "and %d extra rows were requested." % ( start_date, calendar[0], extra_rows, ), ) lifetimes = finder.lifetimes( calendar[start_idx - extra_rows:end_idx], include_start_date=False ) assert lifetimes.index[extra_rows] == start_date assert lifetimes.index[-1] == end_date if not lifetimes.columns.unique: columns = lifetimes.columns duplicated = columns[columns.duplicated()].unique() raise AssertionError("Duplicated sids: %d" % duplicated) existed = lifetimes.iloc[extra_rows:].any() ret = lifetimes.loc[:, existed] shape = ret.shape assert shape[0] * shape[1] != 0, 'root mask cannot be empty' return ret
Apache License 2.0
olitheolix/aiokubernetes
aiokubernetes/models/v1_stateful_set_condition.py
V1StatefulSetCondition.__init__
python
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None): self._last_transition_time = None self._message = None self._reason = None self._status = None self._type = None self.discriminator = None if last_transition_time is not None: self.last_transition_time = last_transition_time if message is not None: self.message = message if reason is not None: self.reason = reason self.status = status self.type = type
V1StatefulSetCondition - a model defined in Swagger
https://github.com/olitheolix/aiokubernetes/blob/266718b210dff2a9b2212183261ea89adf89115e/aiokubernetes/models/v1_stateful_set_condition.py#L48-L65
import pprint import re class V1StatefulSetCondition(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'last_transition_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str' } attribute_map = { 'last_transition_time': 'lastTransitionTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type' }
Apache License 2.0
gobot1234/steam.py
steam/abc.py
BaseUser.favourite_badge
python
async def favourite_badge(self) -> FavouriteBadge | None: badge = await self._state.fetch_user_favourite_badge(self.id64) if not badge.has_favorite_badge: return return FavouriteBadge( id=UserBadge.try_value(badge.badgeid), item_id=badge.communityitemid, type=badge.item_type, border_colour=badge.border_color, game=StatefulGame(self._state, id=badge.appid) if badge.appid else None, level=badge.level, )
The user's favourite badge
https://github.com/gobot1234/steam.py/blob/73fcb94eeba7d0d318252b899af80516efc17b0a/steam/abc.py#L583-L596
from __future__ import annotations import abc import asyncio import re from collections.abc import Coroutine from datetime import datetime from typing import TYPE_CHECKING, Any, TypeVar import attr from typing_extensions import Final, Protocol, TypedDict, runtime_checkable from .badge import FavouriteBadge, UserBadges from .comment import Comment from .enums import * from .errors import WSException from .game import Game, StatefulGame, UserGame, WishlistGame from .iterators import AsyncIterator, CommentsIterator from .models import URL, Ban from .profile import * from .trade import Inventory from .utils import ( _INVITE_HEX, _INVITE_MAPPING, InstanceType, Intable, TypeType, UniverseType, cached_slot_property, id64_from_url, make_id64, ) if TYPE_CHECKING: from _typeshed import Self from aiohttp import ClientSession from .clan import Clan from .group import Group from .http import StrOrURL from .image import Image from .message import Authors from .protobufs.chat import Mentions from .state import ConnectionState from .user import User __all__ = ( "SteamID", "Message", "Channel", ) C = TypeVar("C", bound="Commentable") M_co = TypeVar("M_co", bound="Message", covariant=True) class UserDict(TypedDict): steamid: str personaname: str primaryclanid: str profileurl: str realname: str communityvisibilitystate: int profilestate: int commentpermission: int personastate: int personastateflags: int avatar: str avatarmedium: str avatarfull: str avatarhash: str loccountrycode: str locstatecode: int loccityid: int gameid: str gameextrainfo: str timecreated: int lastlogoff: int last_logon: int last_seen_online: int class SteamID(metaclass=abc.ABCMeta): __slots__ = ("__BASE",) def __init__( self, id: Intable = 0, type: TypeType | None = None, universe: UniverseType | None = None, instance: InstanceType | None = None, ): self.__BASE: Final = make_id64(id, type, universe, instance) def __int__(self) -> int: return self.__BASE def __eq__(self, other: Any) -> bool: try: return self.__BASE == int(other) except (TypeError, ValueError): return NotImplemented def __str__(self) -> str: return str(self.__BASE) def __hash__(self) -> int: return hash(self.__BASE) def __repr__(self) -> str: return f"SteamID(id={self.id}, type={self.type}, universe={self.universe}, instance={self.instance})" @property def instance(self) -> int: return (self.__BASE >> 32) & 0xFFFFF @property def type(self) -> Type: return Type((self.__BASE >> 52) & 0xF) @property def universe(self) -> Universe: return Universe((self.__BASE >> 56) & 0xFF) @property def id64(self) -> int: return self.__BASE @property def id(self) -> int: return self.__BASE & 0xFFFFFFFF @property def id2(self) -> str: return f"STEAM_{self.universe.value}:{self.id % 2}:{self.id >> 1}" @property def id2_zero(self) -> str: return self.id2.replace("_1", "_0") @property def id3(self) -> str: type_char = TypeChar(self.type).name instance = None if self.type in (Type.AnonGameServer, Type.Multiseat): instance = self.instance elif self.type == Type.Individual: if self.instance != 1: instance = self.instance elif self.type == Type.Chat: if self.instance & InstanceFlag.Clan: type_char = "c" elif self.instance & InstanceFlag.Lobby: type_char = "L" else: type_char = "T" parts = [type_char, str(self.universe.value), str(self.id)] if instance is not None: parts.append(str(instance)) return f"[{':'.join(parts)}]" @property def invite_code(self) -> str | None: if self.type == Type.Individual and self.is_valid(): invite_code = re.sub(f"[{_INVITE_HEX}]", lambda x: _INVITE_MAPPING[x.group()], f"{self.id:x}") split_idx = len(invite_code) // 2 return invite_code if split_idx == 0 else f"{invite_code[:split_idx]}-{invite_code[split_idx:]}" @property def invite_url(self) -> str | None: code = self.invite_code return f"https://s.team/p/{code}" if code else None @property def community_url(self) -> str | None: suffix = { Type.Individual: "profiles", Type.Clan: "gid", } try: return f"https://steamcommunity.com/{suffix[self.type]}/{self.id64}" except KeyError: return None def is_valid(self) -> bool: if self.type == Type.Invalid or self.type >= Type.Max: return False if self.universe == Universe.Invalid or self.universe >= Universe.Max: return False if self.type == Type.Individual and (self.id == 0 or self.instance > 4): return False if self.type == Type.Clan and (self.id == 0 or self.instance != 0): return False if self.type == Type.GameServer and self.id == 0: return False if self.type == Type.AnonGameServer and self.id == 0 and self.instance == 0: return False return True @staticmethod async def from_url(url: StrOrURL, session: ClientSession | None = None) -> SteamID | None: id64 = await id64_from_url(url, session) return SteamID(id64) if id64 else None class Commentable(Protocol): __slots__ = () _state: ConnectionState @property @abc.abstractmethod def _commentable_kwargs(self) -> dict[str, Any]: raise NotImplementedError async def comment(self: Self, content: str, *, subscribe: bool = True) -> Comment[Self]: return await self._state.post_comment(self, content, subscribe) def comments( self: Self, oldest_first: bool = False, limit: int | None = None, before: datetime | None = None, after: datetime | None = None, ) -> CommentsIterator[Self]: return CommentsIterator( oldest_first=oldest_first, owner=self, state=self._state, limit=limit, before=before, after=after ) class BaseUser(SteamID, Commentable): __slots__ = ( "name", "game", "state", "flags", "country", "primary_clan", "trade_url", "real_name", "avatar_url", "last_seen_online", "created_at", "last_logoff", "last_logon", "privacy_state", "community_url", "_is_commentable", "_setup_profile", "_level", "_state", ) def __init__(self, state: ConnectionState, data: UserDict): super().__init__(data["steamid"]) self._state = state self.name: str self.real_name: str | None = None self.community_url: str | None = None self.avatar_url: str | None = None self.primary_clan: Clan | None = None self.country: str | None = None self.created_at: datetime | None = None self.last_logoff: datetime | None = None self.last_logon: datetime | None = None self.last_seen_online: datetime | None = None self.game: StatefulGame | None = None self.state: PersonaState | None = None self.flags: PersonaStateFlag | None = None self.privacy_state: CommunityVisibilityState | None = None self._update(data) def _update(self, data: UserDict) -> None: self.name = data["personaname"] self.real_name = data.get("realname") or self.real_name self.community_url = data.get("profileurl") or super().community_url self.avatar_url = data.get("avatarfull") or self.avatar_url self.trade_url = URL.COMMUNITY / f"tradeoffer/new/?partner={self.id}" from .clan import Clan self.primary_clan = ( Clan(self._state, data["primaryclanid"]) if "primaryclanid" in data else self.primary_clan ) self.country = data.get("loccountrycode") or self.country self.created_at = datetime.utcfromtimestamp(data["timecreated"]) if "timecreated" in data else self.created_at self.last_logoff = datetime.utcfromtimestamp(data["lastlogoff"]) if "lastlogoff" in data else self.last_logoff self.last_logon = datetime.utcfromtimestamp(data["last_logon"]) if "last_logon" in data else self.last_logon self.last_seen_online = ( datetime.utcfromtimestamp(data["last_seen_online"]) if "last_seen_online" in data else self.last_seen_online ) self.game = ( StatefulGame(self._state, name=data["gameextrainfo"], id=data["gameid"]) if "gameid" in data else self.game ) self.state = PersonaState.try_value(data.get("personastate", 0)) or self.state self.flags = PersonaStateFlag.try_value(data.get("personastateflags", 0)) or self.flags self.privacy_state = CommunityVisibilityState.try_value(data.get("communityvisibilitystate", 0)) self._is_commentable = bool(data.get("commentpermission")) self._setup_profile = bool(data.get("profilestate")) def __repr__(self) -> str: attrs = ("name", "state", "id", "type", "universe", "instance") resolved = [f"{attr}={getattr(self, attr)!r}" for attr in attrs] return f"<{self.__class__.__name__} {' '.join(resolved)}>" def __str__(self) -> str: return self.name def __del__(self): self._state._users.pop(self.id64, None) @property def _commentable_kwargs(self) -> dict[str, Any]: return { "id64": self.id64, "thread_type": 10, } @property def mention(self) -> str: return f"[mention={self.id}]@{self.name}[/mention]" async def inventory(self, game: Game) -> Inventory: resp = await self._state.http.get_user_inventory(self.id64, game.id, game.context_id) return Inventory(state=self._state, data=resp, owner=self, game=game) async def friends(self) -> list[User]: friends = await self._state.http.get_friends(self.id64) return [self._state._store_user(friend) for friend in friends] async def games(self) -> list[UserGame]: data = await self._state.http.get_user_games(self.id64) games = data["response"].get("games", []) return [UserGame(self._state, game) for game in games] async def clans(self) -> list[Clan]: clans = [] async def getter(gid: int) -> None: try: clan = await self._state.client.fetch_clan(gid) except WSException as exc: if exc.code == Result.RateLimitExceeded: await asyncio.sleep(20) return await getter(gid) raise else: clans.append(clan) resp = await self._state.http.get_user_clans(self.id64) for clan in resp["response"]["groups"]: await getter(int(clan["gid"])) return clans async def bans(self) -> Ban: resp = await self._state.http.get_user_bans(self.id64) resp = resp["players"][0] resp["EconomyBan"] = resp["EconomyBan"] != "none" return Ban(data=resp) async def badges(self) -> UserBadges: resp = await self._state.http.get_user_badges(self.id64) return UserBadges(self._state, data=resp["response"]) async def level(self) -> int: if self._state.http.api_key is not None: resp = await self._state.http.get_user_level(self.id64) return resp["response"]["player_level"] return self._level async def wishlist(self) -> list[WishlistGame]: data = await self._state.http.get_wishlist(self.id64) return [WishlistGame(self._state, id=id, data=game_info) for id, game_info in data.items()]
MIT License
albertogeniola/merossiot
meross_iot/utilities/limiter.py
RateLimitChecker.__init__
python
def __init__(self, global_burst_rate: int = 6, global_time_window: timedelta = timedelta(seconds=1), global_tokens_per_interval: int = 2, device_burst_rate: int = 1, device_time_window: timedelta = timedelta(seconds=1), device_tokens_per_interval: int = 1, device_max_command_queue: int = 5): self._global_limiter = TokenBucketRateLimiterWithBackoff(window_interval=global_time_window, tokens_per_interval=global_tokens_per_interval, max_burst_size=global_burst_rate, backoff_logic=ExponentialBackoff( start_backoff_seconds=0.5, max_backoff_seconds=10 )) self._devices_limiters = {} self._device_burst_rate = device_burst_rate self._device_time_window = device_time_window self._device_tokens_per_interval = device_tokens_per_interval self._max_command_queue = device_max_command_queue self._current_command_queue_size = {}
Constructor :param global_burst_rate: Global burst rate, max number of commands that can be executed within the global_time_window :param global_time_window: Time window in seconds that is used to aggregate the API counting :param global_tokens_per_interval: Number of calls allowed within the time interval at run time (globally) :param device_burst_rate: Per device burst rate, max number of commands that can be executed on a specific device within he device_time_window :param device_time_window: Time window in seconds that is used to aggregate the API counting for a given device :param device_tokens_per_interval: Number of calls allowed within the time interval at run time (per device) :param device_max_command_queue: Maximum number of commands that can be delayed for a given device, after which commands are dropped
https://github.com/albertogeniola/merossiot/blob/4522d6822edbc7dfc454bd3b278e006565d842ff/meross_iot/utilities/limiter.py#L158-L190
from abc import ABC, abstractmethod from datetime import timedelta from enum import Enum from time import time import logging from typing import Dict, Tuple from meross_iot.model.enums import Namespace _LIMITER = logging.getLogger("meross_iot.manager.apilimiter") class BackoffLogic(ABC): @abstractmethod def reset(self) -> None: pass @abstractmethod def wait_interval(self) -> float: pass class ExponentialBackoff(BackoffLogic): def __init__(self, start_backoff_seconds: float, max_backoff_seconds: float): self._baseline = start_backoff_seconds self._cap = max_backoff_seconds self._failures = 0 def wait_interval(self) -> float: current = min(self._baseline * pow(2, self._failures), self._cap) self._failures += 1 return current def reset(self) -> None: self._failures = 0 class TokenBucketRateLimiterWithBackoff(object): def __init__(self, window_interval: timedelta, tokens_per_interval: int, max_burst_size: int, backoff_logic: BackoffLogic): self._window_interval_seconds = window_interval.total_seconds() self._tokens_per_interval = tokens_per_interval self._max_burst = max_burst_size self._backoff_logic = backoff_logic self._limit_hits_in_window = 0 self._current_window_end = time() - self._window_interval_seconds self._remaining_tokens = 0 def update_tokens(self): return self._add_tokens() def _add_tokens(self): now = time() if now < self._current_window_end: return n_intervals = (now - self._current_window_end) // self._window_interval_seconds + 1 n_tokens = n_intervals * self._tokens_per_interval self._remaining_tokens = min(self._remaining_tokens + n_tokens, self._max_burst) self._current_window_end = now + self._window_interval_seconds self._limit_hits_in_window = 0 @property def current_over_limit_hits(self) -> int: self._add_tokens() return self._limit_hits_in_window @property def over_limit_percentace(self): self._add_tokens() return (self._limit_hits_in_window / self._max_burst) * 100 @property def current_window_hitrate(self) -> int: self._add_tokens() return self._max_burst - self._remaining_tokens @property def current_window_capacity(self): self._add_tokens() return (self._limit_hits_in_window / self._max_burst) * 100 def check_limit_reached(self) -> Tuple[bool, float]: self._add_tokens() if self._remaining_tokens > 0: self._remaining_tokens -= 1 self._backoff_logic.reset() return False, 0 self._limit_hits_in_window += 1 wait_interval = self._backoff_logic.wait_interval() return True, wait_interval class RateLimitResult(Enum): NotLimited = 0, GlobalLimitReached = 1, PerDeviceLimitReached = 2 class RateLimitResultStrategy(Enum): PerformCall = 0, DelayCall = 1, DropCall = 2 class RateLimitChecker(object):
MIT License
ngageoint/sarpy
sarpy/io/phase_history/cphd1_elements/Dwell.py
DwellType.NumCODTimes
python
def NumCODTimes(self): if self.CODTimes is None: return 0 else: return len(self.CODTimes)
int: The number of cod time polynomial elements.
https://github.com/ngageoint/sarpy/blob/91405721a7e6ffe7c76dd7b143915fee4bee1e82/sarpy/io/phase_history/cphd1_elements/Dwell.py#L125-L133
__classification__ = "UNCLASSIFIED" __author__ = "Thomas McCullough" from typing import List from sarpy.io.xml.base import Serializable from sarpy.io.xml.descriptors import StringDescriptor, SerializableDescriptor, SerializableListDescriptor from sarpy.io.complex.sicd_elements.blocks import Poly2DType from .base import DEFAULT_STRICT class CODTimeType(Serializable): _fields = ('Identifier', 'CODTimePoly') _required = _fields Identifier = StringDescriptor( 'Identifier', _required, strict=DEFAULT_STRICT, docstring='String that uniquely identifies this COD Time ' 'polynomial.') CODTimePoly = SerializableDescriptor( 'CODTimePoly', Poly2DType, _required, strict=DEFAULT_STRICT, docstring='The polynomial.') def __init__(self, Identifier=None, CODTimePoly=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Identifier = Identifier self.CODTimePoly = CODTimePoly super(CODTimeType, self).__init__(**kwargs) class DwellTimeType(Serializable): _fields = ('Identifier', 'DwellTimePoly') _required = _fields Identifier = StringDescriptor( 'Identifier', _required, strict=DEFAULT_STRICT, docstring='String that uniquely identifies this Dwell Time ' 'polynomial.') DwellTimePoly = SerializableDescriptor( 'DwellTimePoly', Poly2DType, _required, strict=DEFAULT_STRICT, docstring='The polynomial.') def __init__(self, Identifier=None, DwellTimePoly=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.Identifier = Identifier self.DwellTimePoly = DwellTimePoly super(DwellTimeType, self).__init__(**kwargs) class DwellType(Serializable): _fields = ('NumCODTimes', 'CODTimes', 'NumDwellTimes', 'DwellTimes') _required = ('CODTimes', 'DwellTimes') _collections_tags = { 'CODTimes': {'array': False, 'child_tag': 'CODTime'}, 'DwellTimes': {'array': False, 'child_tag': 'DwellTime'}} CODTimes = SerializableListDescriptor( 'CODTimes', CODTimeType, _collections_tags, _required, strict=DEFAULT_STRICT, docstring='The Center of Dwell (COD) time polynomials.') DwellTimes = SerializableListDescriptor( 'DwellTimes', DwellTimeType, _collections_tags, _required, strict=DEFAULT_STRICT, docstring='The dwell time polynomials.') def __init__(self, CODTimes=None, DwellTimes=None, **kwargs): if '_xml_ns' in kwargs: self._xml_ns = kwargs['_xml_ns'] if '_xml_ns_key' in kwargs: self._xml_ns_key = kwargs['_xml_ns_key'] self.CODTimes = CODTimes self.DwellTimes = DwellTimes super(DwellType, self).__init__(**kwargs) @property
MIT License
galarzaa90/tibia.py
tibiapy/abc.py
BaseNews.url
python
def url(self): return self.get_url(self.id)
:class:`str`: The URL to the Tibia.com page of the news entry.
https://github.com/galarzaa90/tibia.py/blob/babcb1648fb99bf5ac0fd0162b38244cbcd21b9d/tibiapy/abc.py#L449-L451
from __future__ import annotations import abc import datetime import enum import json from collections import OrderedDict from typing import Callable, TYPE_CHECKING from tibiapy.utils import get_tibia_url if TYPE_CHECKING: from tibiapy import PvpType, WorldLocation class Serializable: _serializable_properties = () @classmethod def __slots_inherited__(cls): slots = [] for base in cls.__bases__: try: slots.extend(base.__slots_inherited__()) except AttributeError: continue slots.extend(getattr(cls, "__slots__", [])) slots.extend(getattr(cls, "_serializable_properties", [])) return tuple(OrderedDict.fromkeys(slots)) def keys(self): return list(self.__slots_inherited__()) def __getitem__(self, item): if item in self.keys(): try: return getattr(self, item) except AttributeError: return None else: raise KeyError(item) def __setitem__(self, key, value): if key in self.keys(): setattr(self, key, value) else: raise KeyError(key) @staticmethod def _try_dict(obj): try: if isinstance(obj, datetime.datetime): return obj.isoformat() if isinstance(obj, datetime.timedelta): return int(obj.total_seconds()) if isinstance(obj, enum.Flag): return [str(i) for i in obj] if isinstance(obj, enum.Enum): return str(obj) return {k: v for k, v in dict(obj).items() if v is not None} except TypeError: return str(obj) def to_json(self, *, indent=None, sort_keys=False): return json.dumps({k: v for k, v in dict(self).items() if v is not None}, indent=indent, sort_keys=sort_keys, default=self._try_dict) class BaseAnnouncement(metaclass=abc.ABCMeta): announcement_id: int __slots__ = ( "announcement_id", ) def __eq__(self, other): if isinstance(other, self.__class__): return other.announcement_id == self.announcement_id return False @property def url(self): return self.get_url(self.announcement_id) @classmethod def get_url(cls, announcement_id): return get_tibia_url("forum", None, action="announcement", announcementid=announcement_id) class BaseBoard(metaclass=abc.ABCMeta): __slots__ = ( "board_id", ) def __eq__(self, o: object) -> bool: if isinstance(o, self.__class__): return self.board_id == o.board_id return False def __repr__(self): return f"<{self.__class__.__name__} board_id={self.board_id!r}>" @property def url(self): return self.get_url(self.board_id) @classmethod def get_url(cls, board_id, page=1, age=30): return get_tibia_url("forum", None, action="board", boardid=board_id, pagenumber=page, threadage=age) @classmethod def get_world_boards_url(cls): return get_tibia_url("forum", "worldboards") @classmethod def get_trade_boards_url(cls): return get_tibia_url("forum", "tradeboards") @classmethod def get_community_boards_url(cls): return get_tibia_url("forum", "communityboards") @classmethod def get_support_boards_url(cls): return get_tibia_url("forum", "supportboards") class BaseCharacter(metaclass=abc.ABCMeta): __slots__ = ( "name", ) def __eq__(self, o: object) -> bool: if isinstance(o, self.__class__): return self.name.lower() == o.name.lower() return False def __repr__(self): return f"<{self.__class__.__name__} name={self.name!r}>" @property def url(self): return self.get_url(self.name) @classmethod def get_url(cls, name): return get_tibia_url("community", "characters", name=name) class BaseGuild(metaclass=abc.ABCMeta): __slots__ = ( "name", ) def __repr__(self): return f"<{self.__class__.__name__} name={self.name!r}>" def __eq__(self, other): if isinstance(other, self.__class__): return self.name == other.name return False @property def url(self): return self.get_url(self.name) @property def url_wars(self): return self.get_url_wars(self.name) @classmethod def get_url(cls, name): return get_tibia_url("community", "guilds", page="view", GuildName=name) @classmethod def get_url_wars(cls, name): return get_tibia_url("community", "guilds", page="guildwars", action="view", GuildName=name) class BaseHouse(metaclass=abc.ABCMeta): __slots__ = ( "name", ) def __repr__(self): return f"<{self.__class__.__name__} name={self.name!r}>" def __eq__(self, o: object) -> bool: if isinstance(o, self.__class__): return self.name.lower() == o.name.lower() return False @classmethod def get_url(cls, house_id, world): return get_tibia_url("community", "houses", page="view", houseid=house_id, world=world) class BaseNews(metaclass=abc.ABCMeta): __slots__ = ( "id", ) def __eq__(self, o: object) -> bool: if isinstance(o, self.__class__): return self.id == o.id return False @property
Apache License 2.0
demisto/demisto-sdk
demisto_sdk/commands/common/hook_validations/script.py
ScriptValidator._get_arg_to_required_dict
python
def _get_arg_to_required_dict(cls, script_json): arg_to_required = {} args = script_json.get('args', []) for arg in args: arg_to_required[arg.get('name')] = arg.get('required', False) return arg_to_required
Get a dictionary arg name to its required status. Args: script_json (dict): Dictionary of the examined script. Returns: dict. arg name to its required status.
https://github.com/demisto/demisto-sdk/blob/8d8767c2dfec77b67c35f4e1022e30ed2893e864/demisto_sdk/commands/common/hook_validations/script.py#L94-L107
import os import re from typing import Optional from demisto_sdk.commands.common.constants import (API_MODULES_PACK, DEPRECATED_REGEXES, PYTHON_SUBTYPES, TYPE_PWSH) from demisto_sdk.commands.common.errors import Errors from demisto_sdk.commands.common.hook_validations.content_entity_validator import ContentEntityValidator from demisto_sdk.commands.common.hook_validations.docker import DockerImageValidator from demisto_sdk.commands.common.tools import ( get_core_pack_list, get_file_version_suffix_if_exists, get_files_in_dir, get_pack_name, server_version_compare) class ScriptValidator(ContentEntityValidator): def is_valid_version(self) -> bool: if self.current_file.get('commonfields', {}).get('version') != self.DEFAULT_VERSION: error_message, error_code = Errors.wrong_version() if self.handle_error(error_message, error_code, file_path=self.file_path, suggested_fix=Errors.suggest_fix(self.file_path)): return False return True @classmethod def _is_sub_set(cls, supposed_bigger_list, supposed_smaller_list): for check_item in supposed_smaller_list: if check_item not in supposed_bigger_list: return False return True def is_backward_compatible(self): if not self.old_file: return True is_breaking_backwards = [ self.is_context_path_changed(), self.is_added_required_args(), self.is_arg_changed(), self.is_there_duplicates_args(), self.is_changed_subtype() ] if self.file_path == 'Scripts/SaneDocReport/SaneDocReport.yml': return not any(is_breaking_backwards[1:]) return not any(is_breaking_backwards) def is_valid_file(self, validate_rn=True): is_script_valid = all([ super().is_valid_file(validate_rn), self.is_valid_subtype(), self.is_id_equals_name(), self.is_docker_image_valid(), self.is_valid_pwsh(), self.is_valid_script_file_path(), self.is_there_separators_in_names(), self.name_not_contain_the_type() ]) if not self.old_file: is_script_valid = all([ is_script_valid, self.is_valid_name() ]) core_packs_list = get_core_pack_list() pack = get_pack_name(self.file_path) is_core = True if pack in core_packs_list else False if is_core: is_script_valid = all([ is_script_valid, self.no_incident_in_core_pack() ]) return is_script_valid @classmethod
MIT License
thesadru/animethemes-dl
animethemes_dl/parsers/dldata.py
get_formatter
python
def get_formatter(**kwargs) -> Dict[str,str]: formatter = {} for t,d in kwargs.items(): for k,v in d.items(): if (not isinstance(v,(list,dict,bool)) and not k.endswith('ated_at') ): formatter[t+'_'+k] = v formatter['video_filetype'] = 'webm' formatter['anime_filename'] = formatter['video_filename'].split('-')[0] return formatter
Generates a formatter dict used for formatting filenames. Takes in kwargs of Dict[str,Any]. Does not keep lists, dicts and bools. Automatically filters out` .endswith('ated_at')` for animethemes-dl. Also adds `{video_filetype:webm,anime_filename:...}`.
https://github.com/thesadru/animethemes-dl/blob/059afa407d4e07e7420a2e08cc6019b51ced7770/animethemes_dl/parsers/dldata.py#L88-L107
import logging import re import string from os import PathLike from os.path import join, realpath, splitext from typing import Dict, List, Optional, Tuple from ..models import (AnimeListSite, AnimeThemeAnime, AnimeThemeEntry, AnimeThemeTheme, AnimeThemeVideo, DownloadData) from ..options import OPTIONS from .parser import get_animethemes from .utils import Measure logger = logging.getLogger('animethemes-dl') FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|') FILENAME_BANNED = set('<>:"/\\|?*') FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED) FEATURED_RE = re.compile(r"""^ (.*?) # song name (?: \ \(?feat\.\ ( [\w\ ]+ # artist name (?:\([\w\ ]+\))? # artists second name )\)? | \(([\w\ ]+)\) # comment enclosed in "()" (?:\ (.+))? # after comment details )? $""",re.VERBOSE) def is_entry_wanted(entry: AnimeThemeEntry) -> bool: for k in ('spoiler','nsfw'): v = OPTIONS['filter'][k] if v is not None and entry[k] ^ v: return False return True def is_video_wanted(video: AnimeThemeVideo) -> bool: for k in ('nc','subbed','lyrics','uncen'): v = OPTIONS['filter'][k] if v is not None and video[k] ^ v: return False if video['resolution'] < OPTIONS['filter']['resolution']: return False if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']: return False if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: return False return True def get_amount_episodes(episodes: str) -> int: a = 0 for ep in episodes.split(', '): if '-' in ep: start,end = ep.split('-') a += int(end)-int(start) else: a += int(ep) return a def strip_illegal_chars(filename: str) -> str: if OPTIONS['download']['ascii']: return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII) else: return ''.join(i for i in filename if i not in FILENAME_BANNED)
MIT License
magenta/ddsp
ddsp/core.py
safe_log
python
def safe_log(x, eps=1e-5): safe_x = tf.where(x <= eps, eps, x) return tf.math.log(safe_x)
Avoid taking the log of a non-positive number.
https://github.com/magenta/ddsp/blob/56266e9c255019df050a3c20255caa2beaa912ac/ddsp/core.py#L179-L182
import collections import copy from typing import Any, Dict, Optional, Sequence, Text, TypeVar import gin import numpy as np from scipy import fftpack import tensorflow.compat.v2 as tf Number = TypeVar('Number', int, float, np.ndarray, tf.Tensor) def tf_float32(x): if isinstance(x, tf.Tensor): return tf.cast(x, dtype=tf.float32) else: return tf.convert_to_tensor(x, tf.float32) def make_iterable(x): if x is None: return [] elif isinstance(x, (np.ndarray, tf.Tensor)): return [x] else: return x if isinstance(x, collections.Iterable) else [x] def to_dict(x, keys): if isinstance(x, dict): return x else: x = make_iterable(x) if len(keys) != len(x): raise ValueError(f'Keys: {keys} must be the same length as {x}') return dict(zip(keys, x)) def copy_if_tf_function(x): return copy.copy(x) if not tf.executing_eagerly() else x def nested_keys(nested_dict: Dict[Text, Any], delimiter: Text = '/', prefix: Text = '') -> Sequence[Text]: keys = [] for k, v in nested_dict.items(): key = k if not prefix else f'{prefix}{delimiter}{k}' if not isinstance(v, dict): keys.append(key) else: dict_keys = nested_keys(v, prefix=key) keys += dict_keys return keys def nested_lookup(nested_key: Text, nested_dict: Dict[Text, Any], delimiter: Text = '/') -> tf.Tensor: keys = nested_key.split(delimiter) value = nested_dict for key in keys: try: value = value[key] except KeyError: raise KeyError(f'Key \'{key}\' as a part of nested key \'{nested_key}\' ' 'not found during nested dictionary lookup, out of ' f'available keys: {nested_keys(nested_dict)}') return value def leaf_key(nested_key: Text, delimiter: Text = '/') -> tf.Tensor: keys = nested_key.split(delimiter) return keys[-1] def map_shape(x: Dict[Text, tf.Tensor]) -> Dict[Text, Sequence[int]]: return tf.nest.map_structure(lambda t: list(tf.shape(t).numpy()), x) def pad_axis(x, padding=(0, 0), axis=0, **pad_kwargs): n_end_dims = len(x.shape) - axis - 1 n_end_dims *= n_end_dims > 0 paddings = [[0, 0]] * axis + [list(padding)] + [[0, 0]] * n_end_dims return tf.pad(x, paddings, **pad_kwargs) def safe_divide(numerator, denominator, eps=1e-7): safe_denominator = tf.where(denominator == 0.0, eps, denominator) return numerator / safe_denominator
Apache License 2.0
usi-systems/p4benchmark
p4gen/p4template.py
control
python
def control(fwd_tbl, applies): d = { 'fwd_tbl' : fwd_tbl, 'applies': applies } return read_template('template/controls/ingress.txt', d)
This method returns the apply statement and apply forward_table used in the control flow :param tbl_name: the name of the table :type tbl_name: str :param applies: the apply statement for other table :type applies: str :returns: str -- the code in plain text :raises: None
https://github.com/usi-systems/p4benchmark/blob/e1b22c106c3458f757a362f57027670cee286c47/p4gen/p4template.py#L170-L183
from string import Template from pkg_resources import resource_string def read_template(filename, binding={}): src = Template(resource_string(__name__, filename)) return src.substitute(binding) def p4_define(): p4_define = read_template('template/define.txt') return p4_define def ethernet_header(): return read_template('template/headers/ethernet.txt') def ethernet(): ethernet_hdr = read_template('template/headers/ethernet.txt') parse_eth = read_template('template/parsers/parse_ethernet.txt') return (ethernet_hdr + parse_eth) def ipv4(checksum=False): ipv4_hdr = read_template('template/headers/ipv4.txt') if checksum: parse_ipv4 = read_template('template/parsers/parse_ipv4_checksum.txt') else: parse_ipv4 = read_template('template/parsers/parse_ipv4.txt') return (ipv4_hdr + parse_ipv4) def tcp(): tcp_hdr = read_template('template/headers/tcp.txt') parse_tcp = read_template('template/parsers/parse_tcp.txt') return (tcp_hdr + parse_tcp) def nop_action(): return read_template('template/actions/nop.txt') def forward_table(): d = { 'tbl_name': 'forward_table' } return read_template('template/tables/forward_table.txt', d) def add_table_no_match(tbl_name, actions='_nop;', tbl_size=1): binding = {'tbl_name': tbl_name, 'actions': actions, 'tbl_size': tbl_size} return read_template('template/tables/table_no_match.txt', binding) def add_default_rule(tbl_name, default_action): binding = {'tbl_name': tbl_name, 'default_action': default_action} return read_template('template/commands/default_action.txt', binding) def add_table(tbl_name, matches='', actions='', tbl_size=1): binding = { 'tbl_name': tbl_name, 'matches' : matches, 'actions' : actions, 'tbl_size': tbl_size} return read_template('template/tables/table.txt', binding) def apply_table(tbl_name): return read_template('template/controls/apply_table.txt', {'tbl_name': tbl_name})
Apache License 2.0
huychau/drf-registration
drf_registration/api/register.py
ActivateView.get
python
def get(self, request, uidb64, token): user = get_user_from_uid(uidb64) if user and activation_token.check_token(user, token): set_user_verified(user) send_email_welcome(user) if drfr_settings.USER_ACTIVATE_SUCCESS_TEMPLATE: return render(request, drfr_settings.USER_ACTIVATE_SUCCESS_TEMPLATE) return HttpResponse(_('Your account has been activate successfully.')) if drfr_settings.USER_ACTIVATE_FAILED_TEMPLATE: return render(request, drfr_settings.USER_ACTIVATE_FAILED_TEMPLATE) return HttpResponse( _('Either the provided activation token is invalid or this account has already been activated.') )
Override to get the activation uid and token Args: request (object): Request object uidb64 (string): The uid token (string): The user token
https://github.com/huychau/drf-registration/blob/5327a3373306280f4e114a181b02ecc177cf602f/drf_registration/api/register.py#L91-L117
from django.contrib.auth import password_validation from django.http import HttpResponse from django.shortcuts import render from django.utils.translation import gettext as _ from django.views import View from rest_framework import status from rest_framework.generics import CreateAPIView from rest_framework.response import Response from rest_framework.views import APIView from drf_registration.settings import drfr_settings from drf_registration.tokens import activation_token from drf_registration.utils.common import import_string, import_string_list from drf_registration.utils.domain import get_current_domain from drf_registration.utils.email import send_verify_email, send_email_welcome from drf_registration.utils.users import ( get_user_profile_data, get_user_serializer, has_user_activate_token, has_user_verify_code, set_user_verified, get_user_from_uid, ) class RegisterSerializer(get_user_serializer()): def validate_password(self, value): password_validation.validate_password(value, self.instance) return value def create(self, validated_data): user = super().create(validated_data) user.set_password(validated_data['password']) user_verified = not (has_user_activate_token() or has_user_verify_code()) set_user_verified(user, user_verified) user.save() return user class RegisterView(CreateAPIView): permission_classes = import_string_list(drfr_settings.REGISTER_PERMISSION_CLASSES) serializer_class = import_string(drfr_settings.REGISTER_SERIALIZER) def create(self, request, *args, **kwargs): serializer = self.serializer_class(data=request.data) serializer.is_valid(raise_exception=True) user = serializer.save() data = get_user_profile_data(user) domain = get_current_domain(request) if has_user_activate_token() or has_user_verify_code(): send_verify_email(user, domain) else: send_email_welcome(user) return Response(data, status=status.HTTP_201_CREATED) class VerifyView(APIView): class ActivateView(View):
MIT License
miyuchina/mistletoe
mistletoe/base_renderer.py
BaseRenderer.render_raw_text
python
def render_raw_text(self, token): return token.content
Default render method for RawText. Simply return token.content.
https://github.com/miyuchina/mistletoe/blob/c6cfd1a615cd4907ab37c2e653fade7613fe979a/mistletoe/base_renderer.py#L141-L145
import re import sys from mistletoe import block_token, span_token class BaseRenderer(object): _parse_name = re.compile(r"([A-Z][a-z]+|[A-Z]+(?![a-z]))") def __init__(self, *extras): self.render_map = { 'Strong': self.render_strong, 'Emphasis': self.render_emphasis, 'InlineCode': self.render_inline_code, 'RawText': self.render_raw_text, 'Strikethrough': self.render_strikethrough, 'Image': self.render_image, 'Link': self.render_link, 'AutoLink': self.render_auto_link, 'EscapeSequence': self.render_escape_sequence, 'Heading': self.render_heading, 'SetextHeading': self.render_heading, 'Quote': self.render_quote, 'Paragraph': self.render_paragraph, 'CodeFence': self.render_block_code, 'BlockCode': self.render_block_code, 'List': self.render_list, 'ListItem': self.render_list_item, 'Table': self.render_table, 'TableRow': self.render_table_row, 'TableCell': self.render_table_cell, 'ThematicBreak': self.render_thematic_break, 'LineBreak': self.render_line_break, 'Document': self.render_document, } self._extras = extras for token in extras: if issubclass(token, span_token.SpanToken): token_module = span_token else: token_module = block_token token_module.add_token(token) render_func = getattr(self, self._cls_to_func(token.__name__)) self.render_map[token.__name__] = render_func self.footnotes = {} def render(self, token): return self.render_map[token.__class__.__name__](token) def render_inner(self, token): return ''.join(map(self.render, token.children)) def __enter__(self): return self def __exit__(self, exception_type, exception_val, traceback): block_token.reset_tokens() span_token.reset_tokens() @classmethod def _cls_to_func(cls, cls_name): snake = '_'.join(map(str.lower, cls._parse_name.findall(cls_name))) return 'render_{}'.format(snake) @staticmethod def _tokens_from_module(module): return [getattr(module, name) for name in module.__all__]
MIT License
surfriderfoundationeurope/mot
src/mot/object_detection/modeling/model_fpn.py
fpn_map_rois_to_levels
python
def fpn_map_rois_to_levels(boxes): sqrtarea = tf.sqrt(tf_area(boxes)) level = tf.cast(tf.floor( 4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32) level_ids = [ tf.where(level <= 2), tf.where(tf.equal(level, 3)), tf.where(tf.equal(level, 4)), tf.where(level >= 5)] level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2)) for i, x in enumerate(level_ids)] num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2)) for i, x in enumerate(level_ids)] add_moving_summary(*num_in_levels) level_boxes = [tf.gather(boxes, ids) for ids in level_ids] return level_ids, level_boxes
Assign boxes to level 2~5. Args: boxes (nx4): Returns: [tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level. [tf.Tensor]: 4 tensors, the gathered boxes in each level. Be careful that the returned tensor could be empty.
https://github.com/surfriderfoundationeurope/mot/blob/3434955863767736486bdd45caf33656b594face/src/mot/object_detection/modeling/model_fpn.py#L72-L102
import itertools import numpy as np import tensorflow as tf from tensorpack.models import Conv2D, FixedUnPooling, MaxPooling, layer_register from tensorpack.tfutils.argscope import argscope from tensorpack.tfutils.scope_utils import under_name_scope from tensorpack.tfutils.summary import add_moving_summary from tensorpack.tfutils.tower import get_current_tower_context from tensorpack.utils.argtools import memoized from mot.object_detection.config import config as cfg from mot.object_detection.utils.box_ops import area as tf_area from mot.object_detection.modeling.backbone import GroupNorm from mot.object_detection.modeling.model_box import roi_align from mot.object_detection.modeling.model_rpn import generate_rpn_proposals, rpn_losses, get_all_anchors @layer_register(log_shape=True) def fpn_model(features): assert len(features) == 4, features num_channel = cfg.FPN.NUM_CHANNEL use_gn = cfg.FPN.NORM == 'GN' def upsample2x(name, x): try: resize = tf.compat.v2.image.resize_images with tf.name_scope(name): shp2d = tf.shape(x)[2:] x = tf.transpose(x, [0, 2, 3, 1]) x = resize(x, shp2d * 2, 'nearest') x = tf.transpose(x, [0, 3, 1, 2]) return x except AttributeError: return FixedUnPooling( name, x, 2, unpool_mat=np.ones((2, 2), dtype='float32'), data_format='channels_first') with argscope(Conv2D, data_format='channels_first', activation=tf.identity, use_bias=True, kernel_initializer=tf.variance_scaling_initializer(scale=1.)): lat_2345 = [Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1) for i, c in enumerate(features)] if use_gn: lat_2345 = [GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)] lat_sum_5432 = [] for idx, lat in enumerate(lat_2345[::-1]): if idx == 0: lat_sum_5432.append(lat) else: lat = lat + upsample2x('upsample_lat{}'.format(6 - idx), lat_sum_5432[-1]) lat_sum_5432.append(lat) p2345 = [Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3) for i, c in enumerate(lat_sum_5432[::-1])] if use_gn: p2345 = [GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)] p6 = MaxPooling('maxpool_p6', p2345[-1], pool_size=1, strides=2, data_format='channels_first', padding='VALID') return p2345 + [p6] @under_name_scope()
MIT License
rhyssiyan/der-classil.pytorch
inclearn/convnet/resnet.py
resnet50
python
def resnet50(pretrained=False, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model
Constructs a ResNet-50 model.
https://github.com/rhyssiyan/der-classil.pytorch/blob/d711034c550bcac40a6ec7dfa1c65a79589efe93/inclearn/convnet/resnet.py#L215-L222
import torch.nn as nn import torch.utils.model_zoo as model_zoo from torch.nn import functional as F __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, remove_last_relu=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.remove_last_relu = remove_last_relu def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity if not self.remove_last_relu: out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = conv1x1(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = conv1x1(planes, planes * self.expansion) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, nf=64, zero_init_residual=True, dataset='cifar', start_class=0, remove_last_relu=False): super(ResNet, self).__init__() self.remove_last_relu = remove_last_relu self.inplanes = nf if 'cifar' in dataset: self.conv1 = nn.Sequential(nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(nf), nn.ReLU(inplace=True)) elif 'imagenet' in dataset: if start_class == 0: self.conv1 = nn.Sequential( nn.Conv2d(3, nf, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(nf), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) else: self.conv1 = nn.Sequential( nn.Conv2d(3, nf, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(nf), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) self.layer1 = self._make_layer(block, 1 * nf, layers[0]) self.layer2 = self._make_layer(block, 2 * nf, layers[1], stride=2) self.layer3 = self._make_layer(block, 4 * nf, layers[2], stride=2) self.layer4 = self._make_layer(block, 8 * nf, layers[3], stride=2, remove_last_relu=remove_last_relu) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.out_dim = 8 * nf * block.expansion for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, remove_last_relu=False, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion if remove_last_relu: for i in range(1, blocks - 1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, remove_last_relu=True)) else: for _ in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def reset_bn(self): for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.reset_running_stats() def forward(self, x): x = self.conv1(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x def resnet18(pretrained=False, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False, **kwargs): model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model
MIT License
marcelm/xopen
src/xopen/__init__.py
PipedCompressionReader.__init__
python
def __init__( self, path, program_args: List[str], mode: str = "r", threads_flag: Optional[str] = None, threads: Optional[int] = None, ): if mode not in ('r', 'rt', 'rb'): raise ValueError("Mode is '{}', but it must be 'r', 'rt' or 'rb'".format(mode)) self._program_args = program_args program_args = program_args + ['-cd', path] if threads_flag is not None: if threads is None: threads = 1 program_args += [f"{threads_flag}{threads}"] self._threads = threads self.process = Popen(program_args, stdout=PIPE, stderr=PIPE) self.name = path assert self.process.stdout is not None _set_pipe_size_to_max(self.process.stdout.fileno()) self._mode = mode if 'b' not in mode: self._file: IO = io.TextIOWrapper(self.process.stdout) else: self._file = self.process.stdout self.closed = False self._wait_for_output_or_process_exit() self._raise_if_error()
Raise an OSError when pigz could not be found.
https://github.com/marcelm/xopen/blob/793018655f41642a9037129338ce6ae8db289feb/src/xopen/__init__.py#L260-L300
__all__ = [ "xopen", "PipedGzipReader", "PipedGzipWriter", "PipedIGzipReader", "PipedIGzipWriter", "PipedPigzReader", "PipedPigzWriter", "PipedPBzip2Reader", "PipedPBzip2Writer", "PipedPythonIsalReader", "PipedPythonIsalWriter", "__version__", ] import gzip import sys import io import os import bz2 import lzma import stat import signal import pathlib import subprocess import tempfile import time from abc import ABC, abstractmethod from subprocess import Popen, PIPE, DEVNULL from typing import Optional, TextIO, AnyStr, IO, List, Set from ._version import version as __version__ try: from isal import igzip, isal_zlib except ImportError: igzip = None isal_zlib = None try: import fcntl if not hasattr(fcntl, "F_SETPIPE_SZ") and sys.platform == "linux": setattr(fcntl, "F_SETPIPE_SZ", 1031) except ImportError: fcntl = None _MAX_PIPE_SIZE_PATH = pathlib.Path("/proc/sys/fs/pipe-max-size") try: _MAX_PIPE_SIZE = int(_MAX_PIPE_SIZE_PATH.read_text()) except OSError: _MAX_PIPE_SIZE = None def _available_cpu_count() -> int: try: return len(os.sched_getaffinity(0)) except AttributeError: pass import re try: with open('/proc/self/status') as f: status = f.read() m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', status) if m: res = bin(int(m.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except OSError: pass try: import multiprocessing return multiprocessing.cpu_count() except (ImportError, NotImplementedError): return 1 def _set_pipe_size_to_max(fd: int) -> None: if not hasattr(fcntl, "F_SETPIPE_SZ") or not _MAX_PIPE_SIZE: return try: fcntl.fcntl(fd, fcntl.F_SETPIPE_SZ, _MAX_PIPE_SIZE) except OSError: pass def _can_read_concatenated_gz(program: str) -> bool: fd, temp_path = tempfile.mkstemp(suffix=".gz", prefix="xopen.") try: with open(temp_path, "wb") as temp_file: temp_file.write(gzip.compress(b"AB") + gzip.compress(b"CD")) try: result = subprocess.run([program, "-c", "-d", temp_path], check=True, stderr=PIPE, stdout=PIPE) return result.stdout == b"ABCD" except subprocess.CalledProcessError: return False finally: os.close(fd) os.remove(temp_path) class Closing(ABC): def __enter__(self): return self def __exit__(self, *exc_info): self.close() def __del__(self): try: self.close() except Exception: pass @abstractmethod def close(self): class PipedCompressionWriter(Closing): def __init__(self, path, program_args: List[str], mode='wt', compresslevel: Optional[int] = None, threads_flag: Optional[str] = None, threads: Optional[int] = None): if mode not in ('w', 'wt', 'wb', 'a', 'at', 'ab'): raise ValueError( "Mode is '{}', but it must be 'w', 'wt', 'wb', 'a', 'at' or 'ab'".format(mode)) self.outfile = open(path, mode) self.closed: bool = False self.name: str = path self._mode: str = mode self._program_args: List[str] = program_args self._threads_flag: Optional[str] = threads_flag if threads is None: threads = min(_available_cpu_count(), 4) self._threads = threads try: self.process = self._open_process( mode, compresslevel, threads, self.outfile) except OSError: self.outfile.close() raise assert self.process.stdin is not None _set_pipe_size_to_max(self.process.stdin.fileno()) if 'b' not in mode: self._file = io.TextIOWrapper(self.process.stdin) else: self._file = self.process.stdin def __repr__(self): return "{}('{}', mode='{}', program='{}', threads={})".format( self.__class__.__name__, self.name, self._mode, " ".join(self._program_args), self._threads, ) def _open_process( self, mode: str, compresslevel: Optional[int], threads: int, outfile: TextIO, ) -> Popen: program_args: List[str] = self._program_args[:] if threads != 0 and self._threads_flag is not None: program_args += [f"{self._threads_flag}{threads}"] extra_args = [] if 'w' in mode and compresslevel is not None: extra_args += ['-' + str(compresslevel)] kwargs = dict(stdin=PIPE, stdout=outfile, stderr=DEVNULL) if sys.platform != 'win32': kwargs['close_fds'] = True process = Popen(program_args + extra_args, **kwargs) return process def write(self, arg: AnyStr) -> None: self._file.write(arg) def close(self) -> None: if self.closed: return self.closed = True self._file.close() retcode = self.process.wait() self.outfile.close() if retcode != 0: raise OSError( "Output {} process terminated with exit code {}".format( " ".join(self._program_args), retcode)) def __iter__(self): return self def __next__(self): raise io.UnsupportedOperation('not readable') class PipedCompressionReader(Closing): _allowed_exit_code: Optional[int] = -signal.SIGTERM _allowed_exit_message: Optional[bytes] = None
MIT License
fichtefoll/filehistory
file_history.py
FileHistory.__init__
python
def __init__(self): self.__load_settings() self.__load_history() self.__clear_context() if self.DELETE_ALL_ON_STARTUP: sublime.set_timeout_async(lambda: self.delete_all_history(), 0) elif self.CLEANUP_ON_STARTUP: sublime.set_timeout_async(lambda: self.clean_history(False), 0)
Class to manage the file-access history
https://github.com/fichtefoll/filehistory/blob/afb0fdeaeb21ee5dd87b384c2fafabd7d206319f/file_history.py#L33-L42
import os import hashlib import json import time import re import shutil import glob from textwrap import dedent import sublime import sublime_plugin class Singleton(type): _instance = None def __call__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(Singleton, cls).__call__(*args, **kwargs) return cls._instance class FileHistory(metaclass=Singleton): SETTINGS_CALLBACK_KEY = 'FileHistory-reload' PRINT_DEBUG = False SETTINGS_FILE = 'FileHistory.sublime-settings' INDENT_SIZE = 2 DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d @ %H:%M:%S' OLD_DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S'
MIT License
datastax/python-driver
tests/unit/test_types.py
DateRangeDeserializationTests._deserialize_date_range
python
def _deserialize_date_range(self, truncate_kwargs, precision, round_up_truncated_upper_value, increment_loop_variable): def truncate_date(number): dt = datetime.datetime.fromtimestamp(number / 1000.0, tz=utc_timezone) dt = dt.replace(**truncate_kwargs) return round((dt - self.epoch).total_seconds() * 1000.0) for i in range(1000): lower_value = increment_loop_variable(self.starting_lower_value, i) upper_value = increment_loop_variable(self.starting_upper_value, i) dr = DateRange(DateRangeBound(lower_value, precision), DateRangeBound(upper_value, precision)) self.assertEqual(truncate_date(lower_value), dr.lower_bound.milliseconds) upper_value = round_up_truncated_upper_value(truncate_date(upper_value)) self.assertEqual(upper_value, dr.upper_bound.milliseconds)
This functions iterates over several DateRange objects determined by lower_value upper_value which are given as a value that represents seconds since the epoch. We want to make sure the lower_value is correctly rounded down and the upper value is correctly rounded up. In the case of rounding down we verify that the rounded down value has the appropriate fields set to the minimum they could possible have. That is 1 for months, 1 for days, 0 for hours, 0 for minutes, 0 for seconds, 0 for microseconds. We use the generic function truncate_date which depends on truncate_kwargs for this In the case of rounding up we verify that the rounded up value has the appropriate fields set to the maximum they could possible have. This is calculated by round_up_truncated_upper_value which input is the truncated value from before. It is passed as an argument as the way of calculating this is is different for every precision. :param truncate_kwargs: determine what values to truncate in truncate_date :param precision: :class:`~util.DateRangePrecision` :param round_up_truncated_upper_value: this is a function that gets a truncated date and returns a new date with some fields set to the maximum possible value :param increment_loop_variable: this is a function that given a starting value and the iteration value returns a new date to serve as lower_bound/upper_bound. We need this because the value by which dates are incremented depends on if the precision is seconds, minutes, hours, days and months :return:
https://github.com/datastax/python-driver/blob/12a8adce943fe37a05ad6580e8bd302b65c2d93a/tests/unit/test_types.py#L766-L816
try: import unittest2 as unittest except ImportError: import unittest import datetime import tempfile import time from binascii import unhexlify import six import cassandra from cassandra import util from cassandra.cqltypes import ( CassandraType, DateRangeType, DateType, DecimalType, EmptyValue, LongType, SetType, UTF8Type, cql_typename, int8_pack, int64_pack, lookup_casstype, lookup_casstype_simple, parse_casstype_args, int32_pack, Int32Type, ListType, MapType ) from cassandra.encoder import cql_quote from cassandra.pool import Host from cassandra.metadata import Token from cassandra.policies import ConvictionPolicy, SimpleConvictionPolicy from cassandra.protocol import ( read_inet, read_longstring, read_string, read_stringmap, write_inet, write_longstring, write_string, write_stringmap ) from cassandra.query import named_tuple_factory from cassandra.util import ( OPEN_BOUND, Date, DateRange, DateRangeBound, DateRangePrecision, Time, ms_timestamp_from_datetime, datetime_from_timestamp ) from tests.unit.util import check_sequence_consistency class TypeTests(unittest.TestCase): def test_lookup_casstype_simple(self): self.assertEqual(lookup_casstype_simple('AsciiType'), cassandra.cqltypes.AsciiType) self.assertEqual(lookup_casstype_simple('LongType'), cassandra.cqltypes.LongType) self.assertEqual(lookup_casstype_simple('BytesType'), cassandra.cqltypes.BytesType) self.assertEqual(lookup_casstype_simple('BooleanType'), cassandra.cqltypes.BooleanType) self.assertEqual(lookup_casstype_simple('CounterColumnType'), cassandra.cqltypes.CounterColumnType) self.assertEqual(lookup_casstype_simple('DecimalType'), cassandra.cqltypes.DecimalType) self.assertEqual(lookup_casstype_simple('DoubleType'), cassandra.cqltypes.DoubleType) self.assertEqual(lookup_casstype_simple('FloatType'), cassandra.cqltypes.FloatType) self.assertEqual(lookup_casstype_simple('InetAddressType'), cassandra.cqltypes.InetAddressType) self.assertEqual(lookup_casstype_simple('Int32Type'), cassandra.cqltypes.Int32Type) self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType) self.assertEqual(lookup_casstype_simple('ByteType'), cassandra.cqltypes.ByteType) self.assertEqual(lookup_casstype_simple('ShortType'), cassandra.cqltypes.ShortType) self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype_simple('IntegerType'), cassandra.cqltypes.IntegerType) self.assertEqual(lookup_casstype_simple('MapType'), cassandra.cqltypes.MapType) self.assertEqual(lookup_casstype_simple('ListType'), cassandra.cqltypes.ListType) self.assertEqual(lookup_casstype_simple('SetType'), cassandra.cqltypes.SetType) self.assertEqual(lookup_casstype_simple('CompositeType'), cassandra.cqltypes.CompositeType) self.assertEqual(lookup_casstype_simple('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype_simple('ReversedType'), cassandra.cqltypes.ReversedType) self.assertEqual(lookup_casstype_simple('DurationType'), cassandra.cqltypes.DurationType) self.assertEqual(lookup_casstype_simple('DateRangeType'), cassandra.cqltypes.DateRangeType) self.assertEqual(str(lookup_casstype_simple('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) def test_lookup_casstype(self): self.assertEqual(lookup_casstype('AsciiType'), cassandra.cqltypes.AsciiType) self.assertEqual(lookup_casstype('LongType'), cassandra.cqltypes.LongType) self.assertEqual(lookup_casstype('BytesType'), cassandra.cqltypes.BytesType) self.assertEqual(lookup_casstype('BooleanType'), cassandra.cqltypes.BooleanType) self.assertEqual(lookup_casstype('CounterColumnType'), cassandra.cqltypes.CounterColumnType) self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype('DecimalType'), cassandra.cqltypes.DecimalType) self.assertEqual(lookup_casstype('DoubleType'), cassandra.cqltypes.DoubleType) self.assertEqual(lookup_casstype('FloatType'), cassandra.cqltypes.FloatType) self.assertEqual(lookup_casstype('InetAddressType'), cassandra.cqltypes.InetAddressType) self.assertEqual(lookup_casstype('Int32Type'), cassandra.cqltypes.Int32Type) self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type) self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType) self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType) self.assertEqual(lookup_casstype('ByteType'), cassandra.cqltypes.ByteType) self.assertEqual(lookup_casstype('ShortType'), cassandra.cqltypes.ShortType) self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType) self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType) self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType) self.assertEqual(lookup_casstype('MapType'), cassandra.cqltypes.MapType) self.assertEqual(lookup_casstype('ListType'), cassandra.cqltypes.ListType) self.assertEqual(lookup_casstype('SetType'), cassandra.cqltypes.SetType) self.assertEqual(lookup_casstype('CompositeType'), cassandra.cqltypes.CompositeType) self.assertEqual(lookup_casstype('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType) self.assertEqual(lookup_casstype('ReversedType'), cassandra.cqltypes.ReversedType) self.assertEqual(lookup_casstype('DurationType'), cassandra.cqltypes.DurationType) self.assertEqual(lookup_casstype('DateRangeType'), cassandra.cqltypes.DateRangeType) self.assertEqual(str(lookup_casstype('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown'))) self.assertRaises(ValueError, lookup_casstype, 'AsciiType~') def test_casstype_parameterized(self): self.assertEqual(LongType.cass_parameterized_type_with(()), 'LongType') self.assertEqual(LongType.cass_parameterized_type_with((), full=True), 'org.apache.cassandra.db.marshal.LongType') self.assertEqual(SetType.cass_parameterized_type_with([DecimalType], full=True), 'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)') self.assertEqual(LongType.cql_parameterized_type(), 'bigint') subtypes = (cassandra.cqltypes.UTF8Type, cassandra.cqltypes.UTF8Type) self.assertEqual('map<text, text>', cassandra.cqltypes.MapType.apply_parameters(subtypes).cql_parameterized_type()) def test_datetype_from_string(self): for format in cassandra.cqltypes.cql_timestamp_formats: date_string = str(datetime.datetime.now().strftime(format)) cassandra.cqltypes.DateType.interpret_datestring(date_string) def test_cql_typename(self): self.assertEqual(cql_typename('DateType'), 'timestamp') self.assertEqual(cql_typename('org.apache.cassandra.db.marshal.ListType(IntegerType)'), 'list<varint>') def test_named_tuple_colname_substitution(self): colnames = ("func(abc)", "[applied]", "func(func(abc))", "foo_bar", "foo_bar_") rows = [(1, 2, 3, 4, 5)] result = named_tuple_factory(colnames, rows)[0] self.assertEqual(result[0], result.func_abc) self.assertEqual(result[1], result.applied) self.assertEqual(result[2], result.func_func_abc) self.assertEqual(result[3], result.foo_bar) self.assertEqual(result[4], result.foo_bar_) def test_parse_casstype_args(self): class FooType(CassandraType): typename = 'org.apache.cassandra.db.marshal.FooType' def __init__(self, subtypes, names): self.subtypes = subtypes self.names = names @classmethod def apply_parameters(cls, subtypes, names): return cls(subtypes, [unhexlify(six.b(name)) if name is not None else name for name in names]) class BarType(FooType): typename = 'org.apache.cassandra.db.marshal.BarType' ctype = parse_casstype_args(''.join(( 'org.apache.cassandra.db.marshal.FooType(', '63697479:org.apache.cassandra.db.marshal.UTF8Type,', 'BarType(61646472657373:org.apache.cassandra.db.marshal.UTF8Type),', '7a6970:org.apache.cassandra.db.marshal.UTF8Type', ')'))) self.assertEqual(FooType, ctype.__class__) self.assertEqual(UTF8Type, ctype.subtypes[0]) self.assertIsInstance(ctype.subtypes[1], BarType) self.assertEqual([UTF8Type], ctype.subtypes[1].subtypes) self.assertEqual([b"address"], ctype.subtypes[1].names) self.assertEqual(UTF8Type, ctype.subtypes[2]) self.assertEqual([b'city', None, b'zip'], ctype.names) def test_empty_value(self): self.assertEqual(str(EmptyValue()), 'EMPTY') def test_datetype(self): now_time_seconds = time.time() now_datetime = datetime.datetime.utcfromtimestamp(now_time_seconds) now_timestamp = now_time_seconds * 1e3 self.assertEqual(DateType.serialize(now_datetime, 0), DateType.serialize(now_timestamp, 0)) expected = 0 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime.utcfromtimestamp(expected)) expected = 2 ** 33 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(2242, 3, 16, 12, 56, 32)) expected = -770172256 self.assertEqual(DateType.deserialize(int64_pack(1000 * expected), 0), datetime.datetime(1945, 8, 5, 23, 15, 44)) expected = 1424817268.274 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2015, 2, 24, 22, 34, 28, 274000)) expected = 2177403010.123 self.assertEqual(DateType.deserialize(int64_pack(int(1000 * expected)), 0), datetime.datetime(2038, 12, 31, 10, 10, 10, 123000)) def test_collection_null_support(self): int_list = ListType.apply_parameters([Int32Type]) value = ( int32_pack(2) + int32_pack(-1) + int32_pack(4) + int32_pack(42) ) self.assertEqual( [None, 42], int_list.deserialize(value, 3) ) set_list = SetType.apply_parameters([Int32Type]) self.assertEqual( {None, 42}, set(set_list.deserialize(value, 3)) ) value = ( int32_pack(2) + int32_pack(4) + int32_pack(42) + int32_pack(-1) + int32_pack(-1) + int32_pack(4) + int32_pack(42) ) map_list = MapType.apply_parameters([Int32Type, Int32Type]) self.assertEqual( [(42, None), (None, 42)], map_list.deserialize(value, 3)._items ) def test_write_read_string(self): with tempfile.TemporaryFile() as f: value = u'test' write_string(f, value) f.seek(0) self.assertEqual(read_string(f), value) def test_write_read_longstring(self): with tempfile.TemporaryFile() as f: value = u'test' write_longstring(f, value) f.seek(0) self.assertEqual(read_longstring(f), value) def test_write_read_stringmap(self): with tempfile.TemporaryFile() as f: value = {'key': 'value'} write_stringmap(f, value) f.seek(0) self.assertEqual(read_stringmap(f), value) def test_write_read_inet(self): with tempfile.TemporaryFile() as f: value = ('192.168.1.1', 9042) write_inet(f, value) f.seek(0) self.assertEqual(read_inet(f), value) with tempfile.TemporaryFile() as f: value = ('2001:db8:0:f101::1', 9042) write_inet(f, value) f.seek(0) self.assertEqual(read_inet(f), value) def test_cql_quote(self): self.assertEqual(cql_quote(u'test'), "'test'") self.assertEqual(cql_quote('test'), "'test'") self.assertEqual(cql_quote(0), '0') ZERO = datetime.timedelta(0) class UTC(datetime.tzinfo): def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO try: utc_timezone = datetime.timezone.utc except AttributeError: utc_timezone = UTC() class DateRangeTypeTests(unittest.TestCase): dt = datetime.datetime(1990, 2, 3, 13, 58, 45, 777777) timestamp = 1485963732404 def test_month_rounding_creation_failure(self): feb_stamp = ms_timestamp_from_datetime( datetime.datetime(2018, 2, 25, 18, 59, 59, 0) ) dr = DateRange(OPEN_BOUND, DateRangeBound(feb_stamp, DateRangePrecision.MONTH)) dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) self.assertEqual(dt.day, 28) feb_stamp_leap_year = ms_timestamp_from_datetime( datetime.datetime(2016, 2, 25, 18, 59, 59, 0) ) dr = DateRange(OPEN_BOUND, DateRangeBound(feb_stamp_leap_year, DateRangePrecision.MONTH)) dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) self.assertEqual(dt.day, 29) def test_decode_precision(self): self.assertEqual(DateRangeType._decode_precision(6), 'MILLISECOND') def test_decode_precision_error(self): with self.assertRaises(ValueError): DateRangeType._decode_precision(-1) def test_encode_precision(self): self.assertEqual(DateRangeType._encode_precision('SECOND'), 5) def test_encode_precision_error(self): with self.assertRaises(ValueError): DateRangeType._encode_precision('INVALID') def test_deserialize_single_value(self): serialized = (int8_pack(0) + int64_pack(self.timestamp) + int8_pack(3)) self.assertEqual( DateRangeType.deserialize(serialized, 5), util.DateRange(value=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000), precision='HOUR') ) ) def test_deserialize_closed_range(self): serialized = (int8_pack(1) + int64_pack(self.timestamp) + int8_pack(2) + int64_pack(self.timestamp) + int8_pack(6)) self.assertEqual( DateRangeType.deserialize(serialized, 5), util.DateRange( lower_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 0, 0), precision='DAY' ), upper_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 42, 12, 404000), precision='MILLISECOND' ) ) ) def test_deserialize_open_high(self): serialized = (int8_pack(2) + int64_pack(self.timestamp) + int8_pack(3)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 0), precision='HOUR' ), upper_bound=util.OPEN_BOUND ) ) def test_deserialize_open_low(self): serialized = (int8_pack(3) + int64_pack(self.timestamp) + int8_pack(4)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.OPEN_BOUND, upper_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 42, 20, 1000), precision='MINUTE' ) ) ) def test_deserialize_single_open(self): self.assertEqual( util.DateRange(value=util.OPEN_BOUND), DateRangeType.deserialize(int8_pack(5), 5) ) def test_serialize_single_value(self): serialized = (int8_pack(0) + int64_pack(self.timestamp) + int8_pack(5)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( value=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 42, 12), precision='SECOND' ) ) ) def test_serialize_closed_range(self): serialized = (int8_pack(1) + int64_pack(self.timestamp) + int8_pack(5) + int64_pack(self.timestamp) + int8_pack(0)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15, 42, 12), precision='SECOND' ), upper_bound=util.DateRangeBound( value=datetime.datetime(2017, 12, 31), precision='YEAR' ) ) ) def test_serialize_open_high(self): serialized = (int8_pack(2) + int64_pack(self.timestamp) + int8_pack(2)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1), precision='DAY' ), upper_bound=util.OPEN_BOUND ) ) def test_serialize_open_low(self): serialized = (int8_pack(2) + int64_pack(self.timestamp) + int8_pack(3)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.DateRangeBound( value=datetime.datetime(2017, 2, 1, 15), precision='HOUR' ), upper_bound=util.OPEN_BOUND ) ) def test_deserialize_both_open(self): serialized = (int8_pack(4)) deserialized = DateRangeType.deserialize(serialized, 5) self.assertEqual( deserialized, util.DateRange( lower_bound=util.OPEN_BOUND, upper_bound=util.OPEN_BOUND ) ) def test_serialize_single_open(self): serialized = DateRangeType.serialize(util.DateRange( value=util.OPEN_BOUND, ), 5) self.assertEqual(int8_pack(5), serialized) def test_serialize_both_open(self): serialized = DateRangeType.serialize(util.DateRange( lower_bound=util.OPEN_BOUND, upper_bound=util.OPEN_BOUND ), 5) self.assertEqual(int8_pack(4), serialized) def test_failure_to_serialize_no_value_object(self): self.assertRaises(ValueError, DateRangeType.serialize, object(), 5) def test_failure_to_serialize_no_bounds_object(self): class no_bounds_object(object): value = lower_bound = None self.assertRaises(ValueError, DateRangeType.serialize, no_bounds_object, 5) def test_serialized_value_round_trip(self): vals = [six.b('\x01\x00\x00\x01%\xe9a\xf9\xd1\x06\x00\x00\x01v\xbb>o\xff\x00'), six.b('\x01\x00\x00\x00\xdcm\x03-\xd1\x06\x00\x00\x01v\xbb>o\xff\x00')] for serialized in vals: self.assertEqual( serialized, DateRangeType.serialize(DateRangeType.deserialize(serialized, 0), 0) ) def test_serialize_zero_datetime(self): DateRangeType.serialize(util.DateRange( lower_bound=(datetime.datetime(1970, 1, 1), 'YEAR'), upper_bound=(datetime.datetime(1970, 1, 1), 'YEAR') ), 5) def test_deserialize_zero_datetime(self): DateRangeType.deserialize( (int8_pack(1) + int64_pack(0) + int8_pack(0) + int64_pack(0) + int8_pack(0)), 5 ) class DateRangeDeserializationTests(unittest.TestCase): starting_lower_value = 1514744108923 starting_upper_value = 2148761288922 epoch = datetime.datetime(1970, 1, 1, tzinfo=utc_timezone) def test_deserialize_date_range_milliseconds(self): for i in range(1000): lower_value = self.starting_lower_value + i upper_value = self.starting_upper_value + i dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.MILLISECOND), DateRangeBound(upper_value, DateRangePrecision.MILLISECOND)) self.assertEqual(lower_value, dr.lower_bound.milliseconds) self.assertEqual(upper_value, dr.upper_bound.milliseconds) def test_deserialize_date_range_seconds(self): def truncate_last_figures(number, n=3): return int(str(number)[:-n] + '0' * n) for i in range(1000): lower_value = self.starting_lower_value + i * 900 upper_value = self.starting_upper_value + i * 900 dr = DateRange(DateRangeBound(lower_value, DateRangePrecision.SECOND), DateRangeBound(upper_value, DateRangePrecision.SECOND)) self.assertEqual(truncate_last_figures(lower_value), dr.lower_bound.milliseconds) upper_value = truncate_last_figures(upper_value) + 999 self.assertEqual(upper_value, dr.upper_bound.milliseconds) def test_deserialize_date_range_minutes(self): self._deserialize_date_range({"second": 0, "microsecond": 0}, DateRangePrecision.MINUTE, lambda x: x + 59 * 1000 + 999, lambda original_value, i: original_value + i * 900 * 50) def test_deserialize_date_range_hours(self): self._deserialize_date_range({"minute": 0, "second": 0, "microsecond": 0}, DateRangePrecision.HOUR, lambda x: x + 59 * 60 * 1000 + 59 * 1000 + 999, lambda original_value, i: original_value + i * 900 * 50 * 60) def test_deserialize_date_range_day(self): self._deserialize_date_range({"hour": 0, "minute": 0, "second": 0, "microsecond": 0}, DateRangePrecision.DAY, lambda x: x + 23 * 60 * 60 * 1000 + 59 * 60 * 1000 + 59 * 1000 + 999, lambda original_value, i: original_value + i * 900 * 50 * 60 * 24) @unittest.skip("This is currently failig, see PYTHON-912") def test_deserialize_date_range_month(self): def get_upper_bound(seconds): dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=32) dt = dt.replace(day=1) - datetime.timedelta(microseconds=1) return int((dt - self.epoch).total_seconds() * 1000) self._deserialize_date_range({"day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0}, DateRangePrecision.MONTH, get_upper_bound, lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30) def test_deserialize_date_range_year(self): def get_upper_bound(seconds): dt = datetime.datetime.fromtimestamp(seconds / 1000.0, tz=utc_timezone) dt = dt + datetime.timedelta(days=370) dt = dt.replace(day=1) - datetime.timedelta(microseconds=1) diff = time.mktime(dt.timetuple()) - time.mktime(self.epoch.timetuple()) return diff * 1000 + 999 self._deserialize_date_range({"month": 1, "day": 1, "hour": 0, "minute": 0, "second": 0, "microsecond": 0}, DateRangePrecision.YEAR, get_upper_bound, lambda original_value, i: original_value + i * 900 * 50 * 60 * 24 * 30 * 12 * 7)
Apache License 2.0
tylerbutler/engineer
engineer/plugins/core.py
JinjaEnvironmentPlugin.get_globals
python
def get_globals(cls): return cls.globals
If required, subclasses can override this method to return a dict of functions to add to the Jinja environment globally. The default implementation simply returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.globals`.
https://github.com/tylerbutler/engineer/blob/1fdcae512a828ea681be8c469f6863b974260614/engineer/plugins/core.py#L307-L313
import logging __author__ = 'Tyler Butler <[email protected]>' def find_plugins(entrypoint): try: import pkg_resources except ImportError: pkg_resources = None if pkg_resources is None: return for entrypoint in pkg_resources.iter_entry_points(entrypoint): yield entrypoint.name, entrypoint.load() def load_plugins(): from engineer.plugins import bundled for name, module in find_plugins('engineer.plugins'): pass def get_all_plugin_types(): return ThemeProvider, PostProcessor, CommandPlugin, JinjaEnvironmentPlugin class PluginMount(type): def __init__(cls, name, bases, attrs): if not hasattr(cls, 'plugins'): cls.plugins = [] else: cls.plugins.append(cls) class PluginMixin(object): @classmethod def get_name(cls): return '.'.join([cls.__module__, cls.__name__]) @classmethod def get_logger(cls): return logging.getLogger(cls.get_name()) @classmethod def handle_settings(cls, config_dict, settings): return config_dict class ThemeProvider(PluginMixin): __metaclass__ = PluginMount paths = () class PostProcessor(PluginMixin): __metaclass__ = PluginMount @classmethod def preprocess(cls, post, metadata): return post, metadata @classmethod def postprocess(cls, post): return post class CommandPlugin(PluginMixin): __metaclass__ = PluginMount @classmethod def active(cls): return True @classmethod def add_command(cls, subparser, main_parser, common_parser): raise NotImplementedError() class JinjaEnvironmentPlugin(PluginMixin): __metaclass__ = PluginMount filters = {} globals = {} @classmethod def _add_filters(cls, jinja_env): logger = cls.get_logger() filters = cls.get_filters() for filter_name, filter_function in filters.iteritems(): if filter_name in jinja_env.filters: logger.warning("Jinja filter name conflict. " "A plugin is trying to add a filter with a name that conflicts with an existing filter. " "Filter name: %s" % filter_name) else: jinja_env.filters[filter_name] = filter_function logger.debug("Registered Jinja filter: %s" % filter_name) @classmethod def _add_globals(cls, jinja_env): logger = cls.get_logger() global_list = cls.get_globals() for global_name, the_global in global_list.iteritems(): if global_name in jinja_env.globals: logger.warning("Jinja global name conflict. " "A plugin is trying to add a global with a name that conflicts with an existing global. " "Global name: %s" % global_name) else: jinja_env.globals[global_name] = the_global logger.debug("Registered Jinja global: %s" % global_name) @classmethod def update_environment(cls, jinja_env): cls._add_filters(jinja_env) cls._add_globals(jinja_env) @classmethod def get_filters(cls): return cls.filters @classmethod
MIT License
openschc/openschc
src/stats/statsct.py
Statsct.initialize
python
def initialize(init_time=None): dprint('Init statsct module') if init_time is None: init_time = time.time() Statsct.results['init_time'] = init_time Statsct.results['packet_list'] = [] Statsct.sender_packets['packet_list'] = [] Statsct.receiver_packets['packet_list'] = [] Statsct.src_id = None Statsct.dst_id = None Statsct.device_rule = dict() Statsct.gw_rule = dict() Statsct.gw_rule['fragSender'] = [] Statsct.channel_occupancy = 0 Statsct.goodput = 0 Statsct.total_delay = 0 Statsct.reliability = 0 Statsct.total_packet_send = dict() Statsct.msg_type = "" Statsct.packet_info = dict() Statsct.last_msg_type = "" Statsct.succ_packets = 0 Statsct.fail_packets = 0 Statsct.total_data_send = 0 Statsct.msg_type_queue = [] Statsct.channel_occupancy_sender = 0 Statsct.channel_occupancy_receiver = 0
Class to initializa the static class creates the file to write and the instance of the class
https://github.com/openschc/openschc/blob/7b0c165a27936d8f2732a90844a00c5ade23eea5/src/stats/statsct.py#L75-L103
try: from ucollections import defaultdict except ImportError: from collections import defaultdict try: from ucollections import OrderedDict except ImportError: from collections import OrderedDict try: import utime as time except ImportError: import time import sys from .toa_calculator import get_toa import frag_msg from gen_utils import dprint, dpprint sys.path.append("..") from gen_base_import import * from gen_utils import dprint, dpprint SCHC_FRAG = "SCHC_FRAG" SCHC_ACK_OK = "SCHC_ACK_OK" SCHC_ACK_KO = "SCHC_ACK_KO" SCHC_SENDER_ABORT = "SCHC_SENDER_ABORT" SCHC_RECEIVER_ABORT = "SCHC_RECEIVER_ABORT" SCHC_ALL_1 = "SCHC_ALL_1" class Statsct(object): src_id = None dst_id = None results = OrderedDict() sender_packets = OrderedDict() receiver_packets = OrderedDict() SF = 8 dc = 1 device_rule = dict() gw_rule = dict() channel_occupancy = 0 goodput = 0 total_delay = 0 reliability = 0 packet_info = dict() total_packet_send = dict() total_data_send = 0 msg_type = "" packet_length = 0 succ_packets = 0 fail_packets = 0 last_msg_type = '' msg_type_queue = [] channel_occupancy_sender = 0 channel_occupancy_receiver = 0 background_traffic = None position = None current_time = None @staticmethod
MIT License
morgan-stanley/treadmill
lib/python/treadmill/yamlwrapper.py
_repr_none
python
def _repr_none(dumper, _data): return dumper.represent_scalar(u'tag:yaml.org,2002:null', '~')
Fix yaml None representation (use ~).
https://github.com/morgan-stanley/treadmill/blob/f18267c665baf6def4374d21170198f63ff1cde4/lib/python/treadmill/yamlwrapper.py#L56-L59
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import six import yaml from yaml import YAMLError try: from yaml import CSafeLoader as Loader from yaml import CSafeDumper as Dumper except ImportError: from yaml import SafeLoader as Loader from yaml import SafeDumper as Dumper def _repr_bytes(dumper, data): unicode_data = data.decode() return _repr_unicode(dumper, unicode_data) def _repr_unicode(dumper, data): if u'\n' in data: return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|') else: return dumper.represent_scalar(u'tag:yaml.org,2002:str', data) if six.PY2: yaml.add_representer(str, _repr_bytes) yaml.add_representer(unicode, _repr_unicode) else: yaml.add_representer(str, _repr_unicode) def _repr_tuple(dumper, data): return dumper.represent_list(list(data)) yaml.add_representer(tuple, _repr_tuple)
Apache License 2.0
annoviko/pyclustering
pyclustering/cluster/cure.py
cure.__process_by_python
python
def __process_by_python(self): self.__create_queue() self.__create_kdtree() while len(self.__queue) > self.__number_cluster: cluster1 = self.__queue[0] cluster2 = cluster1.closest self.__queue.remove(cluster1) self.__queue.remove(cluster2) self.__delete_represented_points(cluster1) self.__delete_represented_points(cluster2) merged_cluster = self.__merge_clusters(cluster1, cluster2) self.__insert_represented_points(merged_cluster) cluster_relocation_requests = [] if len(self.__queue) > 0: merged_cluster.closest = self.__queue[0] merged_cluster.distance = self.__cluster_distance(merged_cluster, merged_cluster.closest) for item in self.__queue: distance = self.__cluster_distance(merged_cluster, item) if distance < merged_cluster.distance: merged_cluster.closest = item merged_cluster.distance = distance if (item.closest is cluster1) or (item.closest is cluster2): if item.distance < distance: (item.closest, item.distance) = self.__closest_cluster(item, distance) if item.closest is None: item.closest = merged_cluster item.distance = distance else: item.closest = merged_cluster item.distance = distance cluster_relocation_requests.append(item) self.__insert_cluster(merged_cluster) for item in cluster_relocation_requests: self.__relocate_cluster(item) self.__clusters = [cure_cluster_unit.indexes for cure_cluster_unit in self.__queue] self.__representors = [cure_cluster_unit.rep for cure_cluster_unit in self.__queue] self.__means = [cure_cluster_unit.mean for cure_cluster_unit in self.__queue]
! @brief Performs cluster analysis using python code.
https://github.com/annoviko/pyclustering/blob/bf4f51a472622292627ec8c294eb205585e50f52/pyclustering/cluster/cure.py#L165-L230
import numpy from pyclustering.cluster.encoder import type_encoding from pyclustering.utils import euclidean_distance_square from pyclustering.container.kdtree import kdtree from pyclustering.core.wrapper import ccore_library import pyclustering.core.cure_wrapper as wrapper class cure_cluster: def __init__(self, point, index): self.points = [ ] self.indexes = -1 self.mean = None self.rep = [ ] if point is not None: self.points = [ point ] self.indexes = [ index ] self.mean = point self.rep = [ point ] self.closest = None self.distance = float('inf') def __repr__(self): return "%s, %s" % (self.distance, self.points) class cure: def __init__(self, data, number_cluster, number_represent_points = 5, compression = 0.5, ccore = True): self.__pointer_data = self.__prepare_data_points(data) self.__clusters = None self.__representors = None self.__means = None self.__number_cluster = number_cluster self.__number_represent_points = number_represent_points self.__compression = compression self.__ccore = ccore if self.__ccore: self.__ccore = ccore_library.workable() self.__validate_arguments() def process(self): if self.__ccore is True: self.__process_by_ccore() else: self.__process_by_python() return self def __process_by_ccore(self): cure_data_pointer = wrapper.cure_algorithm(self.__pointer_data, self.__number_cluster, self.__number_represent_points, self.__compression) self.__clusters = wrapper.cure_get_clusters(cure_data_pointer) self.__representors = wrapper.cure_get_representors(cure_data_pointer) self.__means = wrapper.cure_get_means(cure_data_pointer) wrapper.cure_data_destroy(cure_data_pointer)
BSD 3-Clause New or Revised License
ucy-linc-lab/fogify
connectors/materialized_connectors/DockerBasedConnectors.py
SwarmConnector.down
python
def down(self, timeout=60): try: subprocess.check_output(['docker', 'stack', 'rm', 'fogify']) except Exception as e: print(e) finished = False for i in range(int(timeout / 5)): sleep(5) if self.count_services() == 0: finished = True break if not finished: raise Exception("The deployment is not down")
Undeploys a running infrastructure :param timeout: The duration that the system will wait until it raises exception
https://github.com/ucy-linc-lab/fogify/blob/80dee9e2079ef45c49a6cd6629a3bf0b31461afb/connectors/materialized_connectors/DockerBasedConnectors.py#L347-L364
import copy import json import logging import os import socket import subprocess from time import sleep import docker from flask_api import exceptions from utils.host_info import HostInfo from FogifyModel.base import Node, FogifyModel from connectors.base import BasicConnector class CommonDockerSuperclass(BasicConnector): def __init__(self, model: FogifyModel = None, path=os.getcwd() + os.environ['UPLOAD_FOLDER'] if 'UPLOAD_FOLDER' in os.environ else "", frequency=int(os.environ['CPU_FREQ']) if 'CPU_FREQ' in os.environ else 2400, cpu_oversubscription= int(os.environ[ 'CPU_OVERSUBSCRIPTION_PERCENTAGE']) if 'CPU_OVERSUBSCRIPTION_PERCENTAGE' in os.environ else 0, ram_oversubscription= int(os.environ[ 'RAM_OVERSUBSCRIPTION_PERCENTAGE']) if 'RAM_OVERSUBSCRIPTION_PERCENTAGE' in os.environ else 0, node_name=os.environ['MANAGER_NAME'] if 'MANAGER_NAME' in os.environ else 'localhost', host_ip=os.environ['HOST_IP'] if 'HOST_IP' in os.environ else None ): self.model = model self.frequency = frequency self.path = path self.file = "fogified-swarm.yaml" self.cpu_oversubscription = cpu_oversubscription self.ram_oversubscription = ram_oversubscription self.node_name = node_name self.host_ip = host_ip @classmethod def check_status(cls, *_args, **_kwargs): CurrentClass = cls def decorator(func): def wrapper(*args, **kwargs): options = ['available', 'running'] if not len(_args) > 0: raise exceptions.APIException('You have to select at least one option:' + str(options)) option = str(_args[0]) if option not in options: raise exceptions.APIException('You have to select an option from:' + str(options)) if option == 'available': if int(CurrentClass.count_services(status=None)) < 1: return func(*args, **kwargs) raise exceptions.APIException('The system has a deployed instance.') if option == 'running': if int(CurrentClass.count_services(status=None)) > 0: return func(*args, **kwargs) raise exceptions.APIException('The system is available.') return wrapper return decorator def generate_files(self): res = {'version': '3.7'} res['networks'] = {i.name: {'external': True} for i in self.model.networks} res['services'] = {} for blueprint in self.model.topology: if blueprint.service not in self.model.services: raise Exception("Model error: There is no service with name %s" % blueprint.service) service = copy.deepcopy(self.model.services[blueprint.service]) if 'networks' not in service: service['networks'] = {} else: service['networks'] = {i: {} for i in service['networks']} for network in blueprint.networks: if type(network) == str: res['networks'][network] = {'external': True} service['networks'][network] = {} elif type(network) == dict and 'name' in network: res['networks'][network['name']] = {'external': True} service['networks'][network['name']] = {} temp_node = self.model.node_object(blueprint.node) service['deploy'] = self.node_representation(temp_node) service['deploy']['replicas'] = blueprint.replicas res['services'][blueprint.service_name] = service return res def node_representation(self, node: Node): real_specs = list({"node.labels." + i for i in node.get_specifications()}) res = {"placement": {"constraints": real_specs if len(real_specs) > 0 else []}} if "node.labels.main_cluster_node!=True" in real_specs: return res if "node.labels.main_cluster_node==False" in real_specs: return res res["placement"]["constraints"].append("node.labels.main_cluster_node==True") caps = self.__node_capabilities(node) res['resources'] = { 'limits': { 'cpus': "{0:.1f}".format(caps['upper_cpu_bound']), 'memory': str(caps['upper_memory_bound']) + "G" }, 'reservations': { 'cpus': "{0:.1f}".format(caps['lower_cpu_bound']), 'memory': str(caps['lower_memory_bound']) + "G" } } return res def count_networks(self): count = subprocess.getoutput('docker network ls | grep fogify | wc -l') return int(count) if count.isnumeric() else -1 def __node_capabilities(self, node: Node): memory = node.get_memory_value_in_gb() lower_memory_bound = memory - memory * self.ram_oversubscription / 100 cpu = node.get_processor_cores() * node.get_processor_clock_speed() / self.frequency lower_cpu_bound = cpu - cpu * self.cpu_oversubscription / 100 return { 'upper_cpu_bound': cpu, 'lower_cpu_bound': lower_cpu_bound, 'upper_memory_bound': memory, 'lower_memory_bound': lower_memory_bound } def inject_labels(self, labels={}, **kwargs): pass def get_container_ips(self, container_id): nets = json.loads( subprocess.getoutput("docker inspect --format '{{json .NetworkSettings.Networks}}' %s" % container_id)) return {network: nets[network]['IPAddress'] for network in nets} def get_host_data_path(self, container_id): try: return subprocess.getoutput("docker inspect --format='{{.GraphDriver.Data.MergedDir}}' %s" % container_id) except Exception: logging.error("The system did not find the host's docker disk space (that is used for user-defined metrics).", exc_info=True) return None class DockerComposeConnector(CommonDockerSuperclass): @classmethod def count_services(cls, service_name: str = None, status: str = "Running") -> int: com = "docker ps --format '{{.Names}}' | grep fogify_" if service_name: com += ' | grep fogify_' + str(service_name) res = subprocess.getoutput(com + ' | wc -l') if len(res) > 0 and res.split(" ")[-1].isnumeric(): return int(res.split(" ")[-1]) return 0 def deploy(self, timeout=60): count = self.model.service_count() subprocess.check_output( ['docker-compose', '-f', self.path + self.file, '-p', 'fogify', '--compatibility', 'up', '-d']) if count is None: return finished = False for i in range(int(timeout / 5)): sleep(5) finished = self.count_services() == count if finished: return if not finished: raise Exception("The process does not finish") def scale(self, service, instances): return subprocess.getoutput( 'docker-compose -f ' + self.path + self.file + ' -p fogify --compatibility up --scale ' + service + "=" + str( instances) + " -d" ) def get_all_instances(self): try: rows = subprocess.getoutput("""docker ps --format '{{.Names}}'""").split("\n") node_name = self.node_name fin_res = {node_name: []} for name in rows: if name.startswith("fogify_"): fin_res[node_name].append(name) return fin_res except Exception: logging.error("The connector could not return the docker instances.", exc_info=True) return {} def down(self, timeout=60): try: subprocess.check_output( ['docker-compose', '-f', self.path + self.file, '-p', 'fogify', 'down', '--remove-orphans'] ) except Exception as e: logging.error("The undeploy failed. Please undeploy the stack manually (e.g. docker stop $(docker ps -q) )", exc_info=True) finished = False for i in range(int(timeout / 5)): sleep(5) finished = self.count_services() == 0 if finished: return if not finished: logging.error("The services did not removed yet. Please check the issue manually.", exc_info=True) def get_nodes(self): name = os.environ['MANAGER_NAME'] if 'MANAGER_NAME' in os.environ else 'localhost' return {name: socket.gethostbyname(name)} def create_network(self, network): com = ['docker', 'network', 'create', '-d', 'bridge', '--attachable', network['name']] subprocess.check_output(com) @classmethod def return_deployment(cls): client = docker.from_env() containers = client.containers.list() res = {} for container in containers: if container.name.startswith('fogify_'): service = container.attrs['Config']['Labels']["com.docker.compose.service"] if service not in res: res[service] = [] res[service].append(container.name) return res @classmethod def event_attr_to_information(cls, event): attrs = event['Actor']['Attributes'] service_name, container_id, container_name = None, None, None if 'com.docker.compose.project' in attrs and attrs['com.docker.compose.project'] == 'fogify': client = docker.from_env() container_id = event['id'] service_name = attrs['com.docker.compose.service'] container = client.containers.get(container_id) container_name = container.attrs['Name'].replace("/", "") client.close() return dict( service_name=service_name, container_id=container_id, container_name=container_name ) @classmethod def instance_name(cls, alias: str) -> str: if alias.startswith("fogify_"): return alias[len("fogify_"):] else: return alias def get_running_container_processing(self, service): try: com = "docker inspect fogify_%s --format '{{.HostConfig.NanoCPUs}}'" % service return int(subprocess.getoutput(com)) / 1000000000 except Exception as ex: print(ex) return None def get_container_ip_for_network(self, container_id, network): nets = self.get_container_ips(container_id) if network not in nets: return None return nets[network] def get_ips_for_service(self, service): res = {} if not service.startswith("fogify_"): service = "fogify_" + service com = """docker ps --format '{ "{{ .Names }}": "{{.ID}}" }' | grep %s""" % service name_id_pairs = subprocess.getoutput(com).split("\n") containers = [] for name_id_pair in name_id_pairs: try: containers.append(json.loads(name_id_pair)) except Exception as ex: logging.warning('The service %s returns invalid Container-ip %s' % (service, name_id_pair)) for container in containers: for name in container: for net, ip in self.get_container_ips(container[name]).items(): if net not in res: res[net] = [] res[net].append(ip) return res class SwarmConnector(CommonDockerSuperclass): def scale(self, service, instances): client = docker.from_env() for instance_service in client.services.list(): is_fogifyed = instance_service.name.startswith("fogify_") contains_service_name = str(instance_service.name).find(service) > -1 if is_fogifyed and contains_service_name: return instance_service.scale(instances) def get_running_container_processing(self, service): try: com = "docker service inspect fogify_%s --format '{{.Spec.TaskTemplate.Resources.Limits.NanoCPUs}}'" % service return int(subprocess.getoutput(com))/1000000000 except Exception as ex: print(ex) return None def get_all_instances(self): try: name_node_pairs = [json.loads(s) for s in subprocess.getoutput( """docker stack ps -f "desired-state=running" --format '{ "{{.Name}}": "{{.Node}}" }' fogify""").split( "\n")] res = {} for pair in name_node_pairs: for name, node in pair.items(): if node not in res: res[node] = [] res[node].append(name) return res except Exception: return {} @classmethod def count_services(cls, service_name: str = None, status: str = "Running"): com = 'docker stack ps fogify' if status: com += ' | grep ' + status if service_name: com += ' | grep ' + service_name res = subprocess.getoutput(com + ' | wc -l') if len(res) > 0 and res.split(" ")[-1].isnumeric(): return int(res.split(" ")[-1]) return 0
Apache License 2.0
samschott/maestral
src/maestral/utils/__init__.py
chunks
python
def chunks(lst: list, n: int, consume: bool = False) -> Iterator[list]: if consume: while lst: chunk = lst[0:n] del lst[0:n] yield chunk else: for i in range(0, len(lst), n): yield lst[i : i + n]
Partitions an iterable into chunks of length ``n``. :param lst: Iterable to partition. :param n: Chunk size. :param consume: If True, the list will be consumed (emptied) during the iteration. This can be used to free memory in case of large lists. :returns: Iterator over chunks.
https://github.com/samschott/maestral/blob/a0cd0ebbfecae65d71337fc35a54d1f3fab7ab5a/src/maestral/utils/__init__.py#L35-L53
import os from types import TracebackType from packaging.version import Version from typing import Iterator, TypeVar, Optional, Iterable, Tuple, Type _N = TypeVar("_N", float, int) ExecInfoType = Tuple[Type[BaseException], BaseException, Optional[TracebackType]] def natural_size(num: float, unit: str = "B", sep: bool = True) -> str: sep_char = " " if sep else "" for prefix in ("", "K", "M", "G"): if abs(num) < 1000.0: return f"{num:3.1f}{sep_char}{prefix}{unit}" num /= 1000.0 prefix = "T" return f"{num:.1f}{sep_char}{prefix}{unit}"
MIT License
hpe-container-platform-community/hpecp-python-library
hpecp/k8s_cluster.py
K8sClusterHostConfig.to_dict
python
def to_dict(self): return {"node": self.node, "role": self.role}
Returns a dict representation of the object. Returns ------- dict Example ------- >>> .to_dict() { 'node': '/api/v2/worker/k8shost/12', 'role': 'master' }
https://github.com/hpe-container-platform-community/hpecp-python-library/blob/625fb25c99698a2203b394ef39a253e2b4f0d7c9/hpecp/k8s_cluster.py#L241-L256
from __future__ import absolute_import import re from distutils.version import LooseVersion from enum import Enum from requests.structures import CaseInsensitiveDict from .base_resource import AbstractResource, AbstractWaitableResourceController try: basestring except NameError: basestring = str class K8sClusterStatus(Enum): ready = 1 creating = 2 updating = 3 upgrading = 4 deleting = 5 error = 6 warning = 7 class K8sCluster(AbstractResource): all_fields = [ "id", "name", "description", "k8s_version", "addons", "created_by_user_id", "created_by_user_name", "created_time", "k8shosts_config", "admin_kube_config", "dashboard_token", "api_endpoint_access", "dashboard_endpoint_access", "cert_data", "status", "status_message", "_links", ] default_display_fields = [ "id", "name", "description", "k8s_version", "status", ] @property def name(self): return self.json["label"]["name"] @property def description(self): return self.json["label"]["description"] @property def k8s_version(self): return self.json["k8s_version"] @property def addons(self): if "addons" in self.json: return self.json["addons"] else: return "" @property def created_by_user_id(self): return self.json["created_by_user_id"] @property def created_by_user_name(self): return self.json["created_by_user_name"] @property def created_time(self): return self.json["created_time"] @property def k8shosts_config(self): return self.json["k8shosts_config"] @property def admin_kube_config(self): if "admin_kube_config" in self.json: return self.json["admin_kube_config"] else: return "" @property def dashboard_token(self): if "dashboard_token" in self.json: return self.json["dashboard_token"] else: return "" @property def api_endpoint_access(self): if "api_endpoint_access" in self.json: return self.json["api_endpoint_access"] else: return "" @property def dashboard_endpoint_access(self): if "dashboard_endpoint_access" in self.json: return self.json["dashboard_endpoint_access"] else: return "" @property def cert_data(self): try: return self.json["cert_data"] except KeyError: return None @property def status(self): return self.json["status"] @property def status_message(self): if "status_message" in self.json: return self.json["status_message"] else: return "" class K8sClusterHostConfig: @classmethod def create_from_list(cls, noderole): assert ( len(noderole) == 2 ), "'noderole' list must have two values [ node, role ]" return K8sClusterHostConfig(node=noderole[0], role=noderole[1]) def __init__(self, node, role): assert isinstance(node, basestring), "'node' must be an string" assert re.match( r"\/api\/v2\/worker\/k8shost\/[0-9]+", node ), "'node' must have format '/api/v2/worker/k8shost/[0-9]+'" assert role in [ "master", "worker", ], "'role' must one of ['master, worker']" self.node = node self.role = role
MIT License
team-ocean/veros
veros/tools/setup.py
interpolate
python
def interpolate(coords, var, interp_coords, missing_value=None, fill=True, kind="linear"): if len(coords) != len(interp_coords) or len(coords) != var.ndim: raise ValueError("Dimensions of coordinates and values do not match") if missing_value is not None: invalid_mask = npx.isclose(var, missing_value) var = npx.where(invalid_mask, npx.nan, var) if var.ndim > 1 and coords[0].ndim == 1: interp_grid = npx.rollaxis(npx.array(npx.meshgrid(*interp_coords, indexing="ij")), 0, len(interp_coords) + 1) else: interp_grid = interp_coords coords = [onp.array(c) for c in coords] var = scipy.interpolate.interpn( coords, onp.array(var), interp_grid, bounds_error=False, fill_value=npx.nan, method=kind ) var = npx.asarray(var) if fill: var = fill_holes(var) return var
Interpolate globally defined data to a different (regular) grid. Arguments: coords: Tuple of coordinate arrays for each dimension. var (:obj:`ndarray` of dim (nx1, ..., nxd)): Variable data to interpolate. interp_coords: Tuple of coordinate arrays to interpolate to. missing_value (optional): Value denoting cells of missing data in ``var``. Is replaced by `NaN` before interpolating. Defaults to `None`, which means no replacement is taking place. fill (bool, optional): Whether `NaN` values should be replaced by the nearest finite value after interpolating. Defaults to ``True``. kind (str, optional): Order of interpolation. Supported are `nearest` and `linear` (default). Returns: :obj:`ndarray` containing the interpolated values on the grid spanned by ``interp_coords``.
https://github.com/team-ocean/veros/blob/db4bbf20118d4608cf0dd1f571d5274a4b3f012a/veros/tools/setup.py#L8-L49
from veros.core.operators import numpy as npx import numpy as onp import scipy.interpolate import scipy.spatial
MIT License
jyveapp/django-pgclone
pgclone/database.py
get_url
python
def get_url(db_config): return ( f'postgresql://{db_config["USER"]}:{db_config["PASSWORD"]}' f'@{db_config["HOST"]}:{db_config["PORT"]}/{db_config["NAME"]}' )
Convert a database dictionary config to a url
https://github.com/jyveapp/django-pgclone/blob/89b26372201e50393cb6a0b0dbb2bbc707d01c22/pgclone/database.py#L30-L35
import copy from django.conf import settings def get_default_config(): return copy.deepcopy(settings.DATABASES['default']) def make_config(db_name): for db in settings.DATABASES.values(): if db.get('NAME') == db_name: raise RuntimeError( f'pgclone cannot use temporary database named "{db_name}"' ' since it is already configured in settings.DATABASES.' ) db_config = get_default_config() db_config['NAME'] = db_name return db_config
BSD 3-Clause New or Revised License
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_1/tfvc/tfvc_client.py
TfvcClient.get_labels
python
def get_labels(self, request_data, project=None, top=None, skip=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if request_data is not None: if request_data.label_scope is not None: query_parameters['requestData.labelScope'] = request_data.label_scope if request_data.name is not None: query_parameters['requestData.name'] = request_data.name if request_data.owner is not None: query_parameters['requestData.owner'] = request_data.owner if request_data.item_label_filter is not None: query_parameters['requestData.itemLabelFilter'] = request_data.item_label_filter if request_data.max_item_count is not None: query_parameters['requestData.maxItemCount'] = request_data.max_item_count if request_data.include_links is not None: query_parameters['requestData.includeLinks'] = request_data.include_links if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='a5d9bd7f-b661-4d0e-b9be-d9c16affae54', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcLabelRef]', self._unwrap_collection(response))
GetLabels. Get a collection of shallow label references. :param :class:`<TfvcLabelRequestData> <azure.devops.v5_1.tfvc.models.TfvcLabelRequestData>` request_data: labelScope, name, owner, and itemLabelFilter :param str project: Project ID or project name :param int top: Max number of labels to return :param int skip: Number of labels to skip :rtype: [TfvcLabelRef]
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_1/tfvc/tfvc_client.py#L628-L663
 from msrest import Serializer, Deserializer from ...client import Client from . import models class TfvcClient(Client): def __init__(self, base_url=None, creds=None): super(TfvcClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = '8aa40520-446d-40e6-89f6-9c9f9ce44c48' def get_branch(self, path, project=None, include_parent=None, include_children=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if include_parent is not None: query_parameters['includeParent'] = self._serialize.query('include_parent', include_parent, 'bool') if include_children is not None: query_parameters['includeChildren'] = self._serialize.query('include_children', include_children, 'bool') response = self._send(http_method='GET', location_id='bc1f417e-239d-42e7-85e1-76e80cb2d6eb', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TfvcBranch', response) def get_branches(self, project=None, include_parent=None, include_children=None, include_deleted=None, include_links=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if include_parent is not None: query_parameters['includeParent'] = self._serialize.query('include_parent', include_parent, 'bool') if include_children is not None: query_parameters['includeChildren'] = self._serialize.query('include_children', include_children, 'bool') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') response = self._send(http_method='GET', location_id='bc1f417e-239d-42e7-85e1-76e80cb2d6eb', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcBranch]', self._unwrap_collection(response)) def get_branch_refs(self, scope_path, project=None, include_deleted=None, include_links=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') response = self._send(http_method='GET', location_id='bc1f417e-239d-42e7-85e1-76e80cb2d6eb', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcBranchRef]', self._unwrap_collection(response)) def get_changeset_changes(self, id=None, skip=None, top=None, continuation_token=None): route_values = {} if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') query_parameters = {} if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') response = self._send(http_method='GET', location_id='f32b86f2-15b9-4fe6-81b1-6f8938617ee5', version='5.1', route_values=route_values, query_parameters=query_parameters) response_value = self._deserialize('[TfvcChange]', self._unwrap_collection(response)) continuation_token = self._get_continuation_token(response) return self.GetChangesetChangesResponseValue(response_value, continuation_token) class GetChangesetChangesResponseValue(object): def __init__(self, value, continuation_token): self.value = value self.continuation_token = continuation_token def create_changeset(self, changeset, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(changeset, 'TfvcChangeset') response = self._send(http_method='POST', location_id='0bc8f0a4-6bfb-42a9-ba84-139da7b99c49', version='5.1', route_values=route_values, content=content) return self._deserialize('TfvcChangesetRef', response) def get_changeset(self, id, project=None, max_change_count=None, include_details=None, include_work_items=None, max_comment_length=None, include_source_rename=None, skip=None, top=None, orderby=None, search_criteria=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') query_parameters = {} if max_change_count is not None: query_parameters['maxChangeCount'] = self._serialize.query('max_change_count', max_change_count, 'int') if include_details is not None: query_parameters['includeDetails'] = self._serialize.query('include_details', include_details, 'bool') if include_work_items is not None: query_parameters['includeWorkItems'] = self._serialize.query('include_work_items', include_work_items, 'bool') if max_comment_length is not None: query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int') if include_source_rename is not None: query_parameters['includeSourceRename'] = self._serialize.query('include_source_rename', include_source_rename, 'bool') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if orderby is not None: query_parameters['$orderby'] = self._serialize.query('orderby', orderby, 'str') if search_criteria is not None: if search_criteria.item_path is not None: query_parameters['searchCriteria.itemPath'] = search_criteria.item_path if search_criteria.author is not None: query_parameters['searchCriteria.author'] = search_criteria.author if search_criteria.from_date is not None: query_parameters['searchCriteria.fromDate'] = search_criteria.from_date if search_criteria.to_date is not None: query_parameters['searchCriteria.toDate'] = search_criteria.to_date if search_criteria.from_id is not None: query_parameters['searchCriteria.fromId'] = search_criteria.from_id if search_criteria.to_id is not None: query_parameters['searchCriteria.toId'] = search_criteria.to_id if search_criteria.follow_renames is not None: query_parameters['searchCriteria.followRenames'] = search_criteria.follow_renames if search_criteria.include_links is not None: query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links if search_criteria.mappings is not None: query_parameters['searchCriteria.mappings'] = search_criteria.mappings response = self._send(http_method='GET', location_id='0bc8f0a4-6bfb-42a9-ba84-139da7b99c49', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TfvcChangeset', response) def get_changesets(self, project=None, max_comment_length=None, skip=None, top=None, orderby=None, search_criteria=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if max_comment_length is not None: query_parameters['maxCommentLength'] = self._serialize.query('max_comment_length', max_comment_length, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if orderby is not None: query_parameters['$orderby'] = self._serialize.query('orderby', orderby, 'str') if search_criteria is not None: if search_criteria.item_path is not None: query_parameters['searchCriteria.itemPath'] = search_criteria.item_path if search_criteria.author is not None: query_parameters['searchCriteria.author'] = search_criteria.author if search_criteria.from_date is not None: query_parameters['searchCriteria.fromDate'] = search_criteria.from_date if search_criteria.to_date is not None: query_parameters['searchCriteria.toDate'] = search_criteria.to_date if search_criteria.from_id is not None: query_parameters['searchCriteria.fromId'] = search_criteria.from_id if search_criteria.to_id is not None: query_parameters['searchCriteria.toId'] = search_criteria.to_id if search_criteria.follow_renames is not None: query_parameters['searchCriteria.followRenames'] = search_criteria.follow_renames if search_criteria.include_links is not None: query_parameters['searchCriteria.includeLinks'] = search_criteria.include_links if search_criteria.mappings is not None: query_parameters['searchCriteria.mappings'] = search_criteria.mappings response = self._send(http_method='GET', location_id='0bc8f0a4-6bfb-42a9-ba84-139da7b99c49', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) def get_batched_changesets(self, changesets_request_data): content = self._serialize.body(changesets_request_data, 'TfvcChangesetsRequestData') response = self._send(http_method='POST', location_id='b7e7c173-803c-4fea-9ec8-31ee35c5502a', version='5.1', content=content) return self._deserialize('[TfvcChangesetRef]', self._unwrap_collection(response)) def get_changeset_work_items(self, id=None): route_values = {} if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') response = self._send(http_method='GET', location_id='64ae0bea-1d71-47c9-a9e5-fe73f5ea0ff4', version='5.1', route_values=route_values) return self._deserialize('[AssociatedWorkItem]', self._unwrap_collection(response)) def get_items_batch(self, item_request_data, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(item_request_data, 'TfvcItemRequestData') response = self._send(http_method='POST', location_id='fe6f827b-5f64-480f-b8af-1eca3b80e833', version='5.1', route_values=route_values, content=content) return self._deserialize('[[TfvcItem]]', self._unwrap_collection(response)) def get_items_batch_zip(self, item_request_data, project=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') content = self._serialize.body(item_request_data, 'TfvcItemRequestData') response = self._send(http_method='POST', location_id='fe6f827b-5f64-480f-b8af-1eca3b80e833', version='5.1', route_values=route_values, content=content, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_item(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TfvcItem', response) def get_item_content(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_items(self, project=None, scope_path=None, recursion_level=None, include_links=None, version_descriptor=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) def get_item_text(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_item_zip(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.1', route_values=route_values, query_parameters=query_parameters, accept_media_type='application/zip') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_label_items(self, label_id, top=None, skip=None): route_values = {} if label_id is not None: route_values['labelId'] = self._serialize.url('label_id', label_id, 'str') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['$skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='06166e34-de17-4b60-8cd1-23182a346fda', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) def get_label(self, label_id, request_data, project=None): route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if label_id is not None: route_values['labelId'] = self._serialize.url('label_id', label_id, 'str') query_parameters = {} if request_data is not None: if request_data.label_scope is not None: query_parameters['requestData.labelScope'] = request_data.label_scope if request_data.name is not None: query_parameters['requestData.name'] = request_data.name if request_data.owner is not None: query_parameters['requestData.owner'] = request_data.owner if request_data.item_label_filter is not None: query_parameters['requestData.itemLabelFilter'] = request_data.item_label_filter if request_data.max_item_count is not None: query_parameters['requestData.maxItemCount'] = request_data.max_item_count if request_data.include_links is not None: query_parameters['requestData.includeLinks'] = request_data.include_links response = self._send(http_method='GET', location_id='a5d9bd7f-b661-4d0e-b9be-d9c16affae54', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TfvcLabel', response)
MIT License
deanmalmgren/flo
flo/tasks/graph.py
TaskGraph._run_helper
python
def _run_helper(self, starting_tasks, do_run_func, mock_run): self.logger.info(self.duration_message(starting_tasks)) for task in self.iter_tasks(starting_tasks): if do_run_func(task): if mock_run: task.mock_run() else: try: task.timed_run() except (KeyboardInterrupt, ShellError), error: self.save_state( override_resource_states={task.name: ''}, ) sys.exit(getattr(error, 'exit_code', 1)) if not mock_run: self.save_state()
This is a convenience method that is used to slightly modify the behavior of running a workflow depending on the circumstances.
https://github.com/deanmalmgren/flo/blob/40ba3ce29a03cecb74bf809e40061e5e5c9d6a6b/flo/tasks/graph.py#L368-L386
import sys import os import time import csv import collections import datetime import glob from distutils.util import strtobool import json import networkx as nx from ..exceptions import NonUniqueTask, ShellError, CommandLineException from .. import colors from .. import shell from .. import resources from .. import logger from .task import Task class TaskGraph(object): internals_path = ".flo" state_path = os.path.join(internals_path, "state.csv") duration_path = os.path.join(internals_path, "duration.csv") log_path = os.path.join(internals_path, "flo.log") archive_dir = os.path.join(internals_path, "archive") def __init__(self, config_path, task_kwargs_list): self.task_list = [] self.task_dict = {} self.config_path = config_path self.root_directory = os.path.dirname(config_path) directory = os.path.dirname(self.abs_state_path) if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(self.abs_archive_dir): os.makedirs(self.abs_archive_dir) self.resource_dict = {} self.task_durations = {} self.logger = logger.configure(self) self.successful = False for task_kwargs in task_kwargs_list: task = Task(self, **task_kwargs) self._link_dependencies() self._load_state() nx_graph = self.get_networkx_graph(task_id_only=True) if not nx.is_directed_acyclic_graph(nx_graph): msg = "This task graph has the following dependency cycles:\n\n" cycles = nx.simple_cycles(nx_graph) for cycle in cycles: msg += ' %s\n' % cycle[0] for task_id in cycle[1:]: msg += ' -> %s\n' % task_id raise CommandLineException(msg) def iter_tasks(self, tasks=None): source_tasks = tasks or self.get_source_tasks() distances = {} horizon = list(source_tasks) done, horizon_set = set(), set(source_tasks) source_tasks = set(source_tasks) while horizon: found = False for task in horizon: if ( task in source_tasks or not task.upstream_tasks.difference(done) ): found = True break if not found: raise Exception("NOT FOUND %s" % ([t.id for t in horizon], )) if task in source_tasks: distance = 0 else: distance = max(map(distances.get, task.upstream_tasks)) + 1 distances[task] = distance horizon.remove(task) horizon_set.discard(task) done.add(task) for downstream_task in task.downstream_tasks.difference(done): if downstream_task not in horizon_set: horizon.append(downstream_task) horizon_set.add(downstream_task) decorated_list = [] for task, distance in distances.iteritems(): decorated_list.append(( distance, self.task_list.index(task), task, )) decorated_list.sort() for distance, index, task in decorated_list: yield task def get_source_tasks(self): source_tasks = [] for task in self.task_list: if not task.upstream_tasks: source_tasks.append(task) return source_tasks def get_sink_tasks(self): sink_tasks = [] for task in self.task_list: if not task.downstream_tasks: sink_tasks.append(task) return sink_tasks def get_out_of_sync_tasks(self): out_of_sync_tasks = [] for task in self.iter_tasks(): if not task.in_sync(): out_of_sync_tasks.append(task) return out_of_sync_tasks def add(self, task): self.task_list.append(task) if task.creates in self.task_dict: raise NonUniqueTask( "task `creates` '%s' is not unique" % task.creates ) self.task_dict[task.creates] = task def remove_node_substituting_dependencies(self, task_id): task = self.task_dict.pop(task_id) self.task_list.remove(task) if task_id in self.task_durations: self.task_durations.pop(task_id) task.disconnect_resources() task.substitute_dependencies() del task def subgraph_needed_for(self, start_at, end_at): assert start_at or end_at, "one of {start_at,end_at} must be a task id" start, end = map(self.task_dict.get, [start_at, end_at]) if None in [start, end]: graph = self.get_networkx_graph() if start: task_subset = nx.descendants(graph, start) task_subset.add(start) elif end: task_subset = nx.ancestors(graph, end) task_subset.add(end) elif start == end: task_subset = set([start]) else: graph = self.get_networkx_graph() task_subset = set() for path in nx.all_simple_paths(graph, start, end): task_subset.update(path) tasks_kwargs_list = [task.yaml_data for task in self.task_list if task in task_subset] subgraph = TaskGraph(self.config_path, tasks_kwargs_list) return subgraph def get_networkx_graph(self, task_id_only=False): graph = nx.DiGraph() if task_id_only: graph.add_nodes_from([task.id for task in self.task_list]) for node in self.task_list: for child in node.downstream_tasks: graph.add_edge(node.id, child.id) else: graph.add_nodes_from(self.task_list) for node in graph: for child in node.downstream_tasks: graph.add_edge(node, child) return graph def _dereference_alias_helper(self, name): if name is None: return None for task in self.task_list: if task.alias == name: return task.creates def _dereference_depends_aliases(self): for task in self.task_list: if isinstance(task.depends, (list, tuple)): for i, d in enumerate(task.depends): dd = self._dereference_alias_helper(d) if dd is not None: task.depends[i] = dd else: dd = self._dereference_alias_helper(task.depends) if dd is not None: task.depends = dd def _link_dependencies(self): for task in self.task_list: for resource in task.depends_resources: if isinstance(resource.creates_task, Task): task.add_task_dependency(resource.creates_task) def get_user_clean_confirmation(self, task_list=None, include_internals=False): self.logger.info(colors.red( "Please confirm that you want to delete the following files:" )) time.sleep(0.5) task_list = task_list or self.task_list if include_internals: self.logger.info(green(self.internals_path)) for task in task_list: self.logger.info(task.creates_message()) yesno = raw_input(colors.red("Delete aforementioned files? [Y/n] ")) if yesno == '': yesno = 'y' return strtobool(yesno) def clean(self, task_list=None, include_internals=False): if os.path.exists(self.abs_state_path) and task_list is None: os.remove(self.abs_state_path) if include_internals: shell.run(self.root_directory, "rm -rf %s" % self.internals_path) self.logger.info( "removed %s" % colors.green(self.internals_path) ) task_list = task_list or self.task_list for task in task_list: task.clean() def status_json(self): result = {"nodes": [], "links": []} node_index = {} for i, task in enumerate(self.iter_tasks()): node_index[task] = i result["nodes"].append({ "task_id": task.id, "duration": self.task_durations.get(task.id, None), "in_sync": task.in_sync(), }) for task in node_index: for child in task.downstream_tasks: result["links"].append({ "source": node_index[task], "target": node_index[child], }) return json.dumps(result) def duration_string(self, duration): if duration < 10 * 60: return "%.2f" % (duration) + " s" elif duration < 2 * 60 * 60: return "%.2f" % (duration / 60) + " m" elif duration < 2 * 60 * 60 * 24: return "%.2f" % (duration / 60 / 60) + " h" else: return "%.2f" % (duration / 60 / 60 / 24) + " d" def duration_message(self, tasks, color=colors.blue): if tasks is None: tasks = list(self.iter_tasks()) if len(tasks) == 0: return "No tasks are out of sync in this workflow (%s)" % ( os.path.relpath(self.config_path, os.getcwd()) ) min_duration = 0.0 for task in tasks: min_duration += self.task_durations.get(task.id, 0.0) max_duration, n_unknown, n_tasks = 0.0, 0, 0 for task in self.iter_tasks(tasks): n_tasks += 1 try: max_duration += self.task_durations[task.id] except KeyError: n_unknown += 1 msg = '' if n_unknown > 0: msg += "There are %d new tasks with unknown durations.\n" % ( n_unknown, ) if len(tasks) == n_tasks: msg += "The remaining %d tasks need to be executed,\n" % n_tasks else: msg += "The remaining %d to %d tasks need to be executed,\n" % ( len(tasks), n_tasks, ) if max_duration == min_duration == 0.0: msg += "which will take an indeterminate amount of time." elif max_duration == min_duration: msg += "which will take approximately %s." % ( self.duration_string(min_duration), ) else: msg += "which will take between %s and %s." % ( self.duration_string(min_duration), self.duration_string(max_duration), ) if color: msg = color(msg) return msg
MIT License
2ndwatch/cloudendure-python
cloudendure/cloudendure_api/models/cloud_endure_subnet.py
CloudEndureSubnet.__init__
python
def __init__(self, subnet_id=None, network_id=None, name=None): self._subnet_id = None self._network_id = None self._name = None self.discriminator = None if subnet_id is not None: self.subnet_id = subnet_id if network_id is not None: self.network_id = network_id if name is not None: self.name = name
CloudEndureSubnet - a model defined in Swagger
https://github.com/2ndwatch/cloudendure-python/blob/f81d1be1422b7c19adedb06c584803eaaa811919/cloudendure/cloudendure_api/models/cloud_endure_subnet.py#L36-L47
import pprint import re import six class CloudEndureSubnet: """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = {"subnet_id": "str", "network_id": "str", "name": "str"} attribute_map = {"subnet_id": "subnetId", "network_id": "networkId", "name": "name"}
MIT License
maximtrp/ranger-archives
compress.py
compress.tab
python
def tab(self, tabnum): extension = ['.7z', '.zip', '.tar.gz', '.tar.bz2', '.tar.xz'] return ['compress ' + os.path.basename(self.fm.thisdir.path) + ext for ext in extension]
Complete with current folder name
https://github.com/maximtrp/ranger-archives/blob/f19bdd4190997f29bad52a5584d6490a988fbfda/compress.py#L53-L57
import os.path from re import search from ranger.api.commands import Command from ranger.core.loader import CommandLoader from .archives_utils import parse_escape_args, get_compression_command class compress(Command): def execute(self): cwd = self.fm.thisdir marked_files = cwd.get_selection() files_num = len(marked_files) if not marked_files: return filenames = [os.path.relpath(f.path, cwd.path) for f in marked_files] flags = parse_escape_args(self.line.strip())[1:] archive_name = None if flags: flags_last = flags.pop() if search(r".*?\.\w+", flags_last) is None: flags += [flags_last] else: archive_name = flags_last if not archive_name: archive_name = os.path.basename(self.fm.thisdir.path) + '.zip' command = get_compression_command(archive_name, flags, filenames) files_num_str = f'{files_num} objects' if files_num > 1 else '1 object' descr = f"Compressing {files_num_str} -> " + os.path.basename(archive_name) obj = CommandLoader(args=command, descr=descr, read=True) def refresh(_): _cwd = self.fm.get_directory(cwd.path) _cwd.load_content() obj.signal_bind('after', refresh) self.fm.loader.add(obj)
MIT License
docusign/docusign-python-client
docusign_esign/models/tab_account_settings.py
TabAccountSettings.note_tabs_enabled
python
def note_tabs_enabled(self, note_tabs_enabled): self._note_tabs_enabled = note_tabs_enabled
Sets the note_tabs_enabled of this TabAccountSettings. # noqa: E501 :param note_tabs_enabled: The note_tabs_enabled of this TabAccountSettings. # noqa: E501 :type: str
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/tab_account_settings.py#L626-L635
import pprint import re import six from docusign_esign.client.configuration import Configuration class TabAccountSettings(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'allow_tab_order': 'str', 'allow_tab_order_metadata': 'SettingsMetadata', 'approve_decline_tabs_enabled': 'str', 'approve_decline_tabs_metadata': 'SettingsMetadata', 'calculated_fields_enabled': 'str', 'calculated_fields_metadata': 'SettingsMetadata', 'checkbox_tabs_enabled': 'str', 'check_box_tabs_metadata': 'SettingsMetadata', 'data_field_regex_enabled': 'str', 'data_field_regex_metadata': 'SettingsMetadata', 'data_field_size_enabled': 'str', 'data_field_size_metadata': 'SettingsMetadata', 'draw_tabs_enabled': 'str', 'draw_tabs_metadata': 'SettingsMetadata', 'first_last_email_tabs_enabled': 'str', 'first_last_email_tabs_metadata': 'SettingsMetadata', 'list_tabs_enabled': 'str', 'list_tabs_metadata': 'SettingsMetadata', 'note_tabs_enabled': 'str', 'note_tabs_metadata': 'SettingsMetadata', 'prefill_tabs_enabled': 'str', 'prefill_tabs_metadata': 'SettingsMetadata', 'radio_tabs_enabled': 'str', 'radio_tabs_metadata': 'SettingsMetadata', 'saving_custom_tabs_enabled': 'str', 'saving_custom_tabs_metadata': 'SettingsMetadata', 'sender_to_change_tab_assignments_enabled': 'str', 'sender_to_change_tab_assignments_metadata': 'SettingsMetadata', 'shared_custom_tabs_enabled': 'str', 'shared_custom_tabs_metadata': 'SettingsMetadata', 'tab_data_label_enabled': 'str', 'tab_data_label_metadata': 'SettingsMetadata', 'tab_location_enabled': 'str', 'tab_location_metadata': 'SettingsMetadata', 'tab_locking_enabled': 'str', 'tab_locking_metadata': 'SettingsMetadata', 'tab_scale_enabled': 'str', 'tab_scale_metadata': 'SettingsMetadata', 'tab_text_formatting_enabled': 'str', 'tab_text_formatting_metadata': 'SettingsMetadata', 'text_tabs_enabled': 'str', 'text_tabs_metadata': 'SettingsMetadata' } attribute_map = { 'allow_tab_order': 'allowTabOrder', 'allow_tab_order_metadata': 'allowTabOrderMetadata', 'approve_decline_tabs_enabled': 'approveDeclineTabsEnabled', 'approve_decline_tabs_metadata': 'approveDeclineTabsMetadata', 'calculated_fields_enabled': 'calculatedFieldsEnabled', 'calculated_fields_metadata': 'calculatedFieldsMetadata', 'checkbox_tabs_enabled': 'checkboxTabsEnabled', 'check_box_tabs_metadata': 'checkBoxTabsMetadata', 'data_field_regex_enabled': 'dataFieldRegexEnabled', 'data_field_regex_metadata': 'dataFieldRegexMetadata', 'data_field_size_enabled': 'dataFieldSizeEnabled', 'data_field_size_metadata': 'dataFieldSizeMetadata', 'draw_tabs_enabled': 'drawTabsEnabled', 'draw_tabs_metadata': 'drawTabsMetadata', 'first_last_email_tabs_enabled': 'firstLastEmailTabsEnabled', 'first_last_email_tabs_metadata': 'firstLastEmailTabsMetadata', 'list_tabs_enabled': 'listTabsEnabled', 'list_tabs_metadata': 'listTabsMetadata', 'note_tabs_enabled': 'noteTabsEnabled', 'note_tabs_metadata': 'noteTabsMetadata', 'prefill_tabs_enabled': 'prefillTabsEnabled', 'prefill_tabs_metadata': 'prefillTabsMetadata', 'radio_tabs_enabled': 'radioTabsEnabled', 'radio_tabs_metadata': 'radioTabsMetadata', 'saving_custom_tabs_enabled': 'savingCustomTabsEnabled', 'saving_custom_tabs_metadata': 'savingCustomTabsMetadata', 'sender_to_change_tab_assignments_enabled': 'senderToChangeTabAssignmentsEnabled', 'sender_to_change_tab_assignments_metadata': 'senderToChangeTabAssignmentsMetadata', 'shared_custom_tabs_enabled': 'sharedCustomTabsEnabled', 'shared_custom_tabs_metadata': 'sharedCustomTabsMetadata', 'tab_data_label_enabled': 'tabDataLabelEnabled', 'tab_data_label_metadata': 'tabDataLabelMetadata', 'tab_location_enabled': 'tabLocationEnabled', 'tab_location_metadata': 'tabLocationMetadata', 'tab_locking_enabled': 'tabLockingEnabled', 'tab_locking_metadata': 'tabLockingMetadata', 'tab_scale_enabled': 'tabScaleEnabled', 'tab_scale_metadata': 'tabScaleMetadata', 'tab_text_formatting_enabled': 'tabTextFormattingEnabled', 'tab_text_formatting_metadata': 'tabTextFormattingMetadata', 'text_tabs_enabled': 'textTabsEnabled', 'text_tabs_metadata': 'textTabsMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._allow_tab_order = None self._allow_tab_order_metadata = None self._approve_decline_tabs_enabled = None self._approve_decline_tabs_metadata = None self._calculated_fields_enabled = None self._calculated_fields_metadata = None self._checkbox_tabs_enabled = None self._check_box_tabs_metadata = None self._data_field_regex_enabled = None self._data_field_regex_metadata = None self._data_field_size_enabled = None self._data_field_size_metadata = None self._draw_tabs_enabled = None self._draw_tabs_metadata = None self._first_last_email_tabs_enabled = None self._first_last_email_tabs_metadata = None self._list_tabs_enabled = None self._list_tabs_metadata = None self._note_tabs_enabled = None self._note_tabs_metadata = None self._prefill_tabs_enabled = None self._prefill_tabs_metadata = None self._radio_tabs_enabled = None self._radio_tabs_metadata = None self._saving_custom_tabs_enabled = None self._saving_custom_tabs_metadata = None self._sender_to_change_tab_assignments_enabled = None self._sender_to_change_tab_assignments_metadata = None self._shared_custom_tabs_enabled = None self._shared_custom_tabs_metadata = None self._tab_data_label_enabled = None self._tab_data_label_metadata = None self._tab_location_enabled = None self._tab_location_metadata = None self._tab_locking_enabled = None self._tab_locking_metadata = None self._tab_scale_enabled = None self._tab_scale_metadata = None self._tab_text_formatting_enabled = None self._tab_text_formatting_metadata = None self._text_tabs_enabled = None self._text_tabs_metadata = None self.discriminator = None setattr(self, "_{}".format('allow_tab_order'), kwargs.get('allow_tab_order', None)) setattr(self, "_{}".format('allow_tab_order_metadata'), kwargs.get('allow_tab_order_metadata', None)) setattr(self, "_{}".format('approve_decline_tabs_enabled'), kwargs.get('approve_decline_tabs_enabled', None)) setattr(self, "_{}".format('approve_decline_tabs_metadata'), kwargs.get('approve_decline_tabs_metadata', None)) setattr(self, "_{}".format('calculated_fields_enabled'), kwargs.get('calculated_fields_enabled', None)) setattr(self, "_{}".format('calculated_fields_metadata'), kwargs.get('calculated_fields_metadata', None)) setattr(self, "_{}".format('checkbox_tabs_enabled'), kwargs.get('checkbox_tabs_enabled', None)) setattr(self, "_{}".format('check_box_tabs_metadata'), kwargs.get('check_box_tabs_metadata', None)) setattr(self, "_{}".format('data_field_regex_enabled'), kwargs.get('data_field_regex_enabled', None)) setattr(self, "_{}".format('data_field_regex_metadata'), kwargs.get('data_field_regex_metadata', None)) setattr(self, "_{}".format('data_field_size_enabled'), kwargs.get('data_field_size_enabled', None)) setattr(self, "_{}".format('data_field_size_metadata'), kwargs.get('data_field_size_metadata', None)) setattr(self, "_{}".format('draw_tabs_enabled'), kwargs.get('draw_tabs_enabled', None)) setattr(self, "_{}".format('draw_tabs_metadata'), kwargs.get('draw_tabs_metadata', None)) setattr(self, "_{}".format('first_last_email_tabs_enabled'), kwargs.get('first_last_email_tabs_enabled', None)) setattr(self, "_{}".format('first_last_email_tabs_metadata'), kwargs.get('first_last_email_tabs_metadata', None)) setattr(self, "_{}".format('list_tabs_enabled'), kwargs.get('list_tabs_enabled', None)) setattr(self, "_{}".format('list_tabs_metadata'), kwargs.get('list_tabs_metadata', None)) setattr(self, "_{}".format('note_tabs_enabled'), kwargs.get('note_tabs_enabled', None)) setattr(self, "_{}".format('note_tabs_metadata'), kwargs.get('note_tabs_metadata', None)) setattr(self, "_{}".format('prefill_tabs_enabled'), kwargs.get('prefill_tabs_enabled', None)) setattr(self, "_{}".format('prefill_tabs_metadata'), kwargs.get('prefill_tabs_metadata', None)) setattr(self, "_{}".format('radio_tabs_enabled'), kwargs.get('radio_tabs_enabled', None)) setattr(self, "_{}".format('radio_tabs_metadata'), kwargs.get('radio_tabs_metadata', None)) setattr(self, "_{}".format('saving_custom_tabs_enabled'), kwargs.get('saving_custom_tabs_enabled', None)) setattr(self, "_{}".format('saving_custom_tabs_metadata'), kwargs.get('saving_custom_tabs_metadata', None)) setattr(self, "_{}".format('sender_to_change_tab_assignments_enabled'), kwargs.get('sender_to_change_tab_assignments_enabled', None)) setattr(self, "_{}".format('sender_to_change_tab_assignments_metadata'), kwargs.get('sender_to_change_tab_assignments_metadata', None)) setattr(self, "_{}".format('shared_custom_tabs_enabled'), kwargs.get('shared_custom_tabs_enabled', None)) setattr(self, "_{}".format('shared_custom_tabs_metadata'), kwargs.get('shared_custom_tabs_metadata', None)) setattr(self, "_{}".format('tab_data_label_enabled'), kwargs.get('tab_data_label_enabled', None)) setattr(self, "_{}".format('tab_data_label_metadata'), kwargs.get('tab_data_label_metadata', None)) setattr(self, "_{}".format('tab_location_enabled'), kwargs.get('tab_location_enabled', None)) setattr(self, "_{}".format('tab_location_metadata'), kwargs.get('tab_location_metadata', None)) setattr(self, "_{}".format('tab_locking_enabled'), kwargs.get('tab_locking_enabled', None)) setattr(self, "_{}".format('tab_locking_metadata'), kwargs.get('tab_locking_metadata', None)) setattr(self, "_{}".format('tab_scale_enabled'), kwargs.get('tab_scale_enabled', None)) setattr(self, "_{}".format('tab_scale_metadata'), kwargs.get('tab_scale_metadata', None)) setattr(self, "_{}".format('tab_text_formatting_enabled'), kwargs.get('tab_text_formatting_enabled', None)) setattr(self, "_{}".format('tab_text_formatting_metadata'), kwargs.get('tab_text_formatting_metadata', None)) setattr(self, "_{}".format('text_tabs_enabled'), kwargs.get('text_tabs_enabled', None)) setattr(self, "_{}".format('text_tabs_metadata'), kwargs.get('text_tabs_metadata', None)) @property def allow_tab_order(self): return self._allow_tab_order @allow_tab_order.setter def allow_tab_order(self, allow_tab_order): self._allow_tab_order = allow_tab_order @property def allow_tab_order_metadata(self): return self._allow_tab_order_metadata @allow_tab_order_metadata.setter def allow_tab_order_metadata(self, allow_tab_order_metadata): self._allow_tab_order_metadata = allow_tab_order_metadata @property def approve_decline_tabs_enabled(self): return self._approve_decline_tabs_enabled @approve_decline_tabs_enabled.setter def approve_decline_tabs_enabled(self, approve_decline_tabs_enabled): self._approve_decline_tabs_enabled = approve_decline_tabs_enabled @property def approve_decline_tabs_metadata(self): return self._approve_decline_tabs_metadata @approve_decline_tabs_metadata.setter def approve_decline_tabs_metadata(self, approve_decline_tabs_metadata): self._approve_decline_tabs_metadata = approve_decline_tabs_metadata @property def calculated_fields_enabled(self): return self._calculated_fields_enabled @calculated_fields_enabled.setter def calculated_fields_enabled(self, calculated_fields_enabled): self._calculated_fields_enabled = calculated_fields_enabled @property def calculated_fields_metadata(self): return self._calculated_fields_metadata @calculated_fields_metadata.setter def calculated_fields_metadata(self, calculated_fields_metadata): self._calculated_fields_metadata = calculated_fields_metadata @property def checkbox_tabs_enabled(self): return self._checkbox_tabs_enabled @checkbox_tabs_enabled.setter def checkbox_tabs_enabled(self, checkbox_tabs_enabled): self._checkbox_tabs_enabled = checkbox_tabs_enabled @property def check_box_tabs_metadata(self): return self._check_box_tabs_metadata @check_box_tabs_metadata.setter def check_box_tabs_metadata(self, check_box_tabs_metadata): self._check_box_tabs_metadata = check_box_tabs_metadata @property def data_field_regex_enabled(self): return self._data_field_regex_enabled @data_field_regex_enabled.setter def data_field_regex_enabled(self, data_field_regex_enabled): self._data_field_regex_enabled = data_field_regex_enabled @property def data_field_regex_metadata(self): return self._data_field_regex_metadata @data_field_regex_metadata.setter def data_field_regex_metadata(self, data_field_regex_metadata): self._data_field_regex_metadata = data_field_regex_metadata @property def data_field_size_enabled(self): return self._data_field_size_enabled @data_field_size_enabled.setter def data_field_size_enabled(self, data_field_size_enabled): self._data_field_size_enabled = data_field_size_enabled @property def data_field_size_metadata(self): return self._data_field_size_metadata @data_field_size_metadata.setter def data_field_size_metadata(self, data_field_size_metadata): self._data_field_size_metadata = data_field_size_metadata @property def draw_tabs_enabled(self): return self._draw_tabs_enabled @draw_tabs_enabled.setter def draw_tabs_enabled(self, draw_tabs_enabled): self._draw_tabs_enabled = draw_tabs_enabled @property def draw_tabs_metadata(self): return self._draw_tabs_metadata @draw_tabs_metadata.setter def draw_tabs_metadata(self, draw_tabs_metadata): self._draw_tabs_metadata = draw_tabs_metadata @property def first_last_email_tabs_enabled(self): return self._first_last_email_tabs_enabled @first_last_email_tabs_enabled.setter def first_last_email_tabs_enabled(self, first_last_email_tabs_enabled): self._first_last_email_tabs_enabled = first_last_email_tabs_enabled @property def first_last_email_tabs_metadata(self): return self._first_last_email_tabs_metadata @first_last_email_tabs_metadata.setter def first_last_email_tabs_metadata(self, first_last_email_tabs_metadata): self._first_last_email_tabs_metadata = first_last_email_tabs_metadata @property def list_tabs_enabled(self): return self._list_tabs_enabled @list_tabs_enabled.setter def list_tabs_enabled(self, list_tabs_enabled): self._list_tabs_enabled = list_tabs_enabled @property def list_tabs_metadata(self): return self._list_tabs_metadata @list_tabs_metadata.setter def list_tabs_metadata(self, list_tabs_metadata): self._list_tabs_metadata = list_tabs_metadata @property def note_tabs_enabled(self): return self._note_tabs_enabled @note_tabs_enabled.setter
MIT License
cleverhans-lab/cleverhans
cleverhans_v3.1.0/cleverhans/attack_bundling.py
AttackGoal.print_progress
python
def print_progress(self, criteria, run_counts): print("Working on a " + self.__class__.__name__ + " goal.")
Prints a progress message about how much has been done toward the goal. :param criteria: dict, of the format returned by get_criteria :param run_counts: dict mapping each AttackConfig to a numpy array specifying how many times it has been run for each example
https://github.com/cleverhans-lab/cleverhans/blob/4aed4be702be5ce13d5017b8a3c6a2cdc4fc0009/cleverhans_v3.1.0/cleverhans/attack_bundling.py#L694-L701
import copy import logging import time import numpy as np import six from six.moves import range import tensorflow as tf from cleverhans.attacks import Noise from cleverhans.attacks import ProjectedGradientDescent from cleverhans.attacks import SPSA from cleverhans.evaluation import correctness_and_confidence from cleverhans.evaluation import batch_eval_multi_worker, run_attack from cleverhans.model import Model from cleverhans import serial from cleverhans.utils import create_logger, deep_copy, safe_zip from cleverhans.utils_tf import infer_devices from cleverhans.confidence_report import ConfidenceReport from cleverhans.confidence_report import ConfidenceReportEntry from cleverhans.confidence_report import print_stats _logger = create_logger("attack_bundling") _logger.setLevel(logging.INFO) devices = infer_devices() num_devices = len(devices) DEFAULT_EXAMPLES_PER_DEVICE = 128 BATCH_SIZE = DEFAULT_EXAMPLES_PER_DEVICE * num_devices REPORT_TIME_INTERVAL = 60 def single_run_max_confidence_recipe( sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None, ): noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params, "noise") attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25.0 for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params["y_target"] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25.0 expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls) ) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 1 for config in attack_configs} goals = [MaxConfidence(t=1.0, new_work_goal=new_work_goal)] bundle_attacks( sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=batch_size, eval_batch_size=batch_size, ) def basic_max_confidence_recipe( sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None, ): noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) expensive_pgd = [] if eps_iter_small is None: eps_iter_small = eps_iter / 25.0 for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params["y_target"] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25.0 expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls) ) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 5 for config in attack_configs} pgd_work_goal = {config: 5 for config in pgd_attack_configs} goals = [ Misclassify(new_work_goal={noise_attack_config: 50}), Misclassify(new_work_goal=pgd_work_goal), MaxConfidence(t=0.5, new_work_goal=new_work_goal), MaxConfidence(t=0.75, new_work_goal=new_work_goal), MaxConfidence(t=0.875, new_work_goal=new_work_goal), MaxConfidence(t=0.9375, new_work_goal=new_work_goal), MaxConfidence(t=0.96875, new_work_goal=new_work_goal), MaxConfidence(t=0.984375, new_work_goal=new_work_goal), MaxConfidence(t=1.0), ] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path) def fixed_max_confidence_recipe( sess, model, x, y, nb_classes, eps, clip_min, clip_max, eps_iter, nb_iter, report_path, batch_size=BATCH_SIZE, eps_iter_small=None, ): noise_attack = Noise(model, sess) pgd_attack = ProjectedGradientDescent(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] pgd_attack_configs = [] pgd_params = copy.copy(threat_params) pgd_params["eps_iter"] = eps_iter pgd_params["nb_iter"] = nb_iter assert batch_size % num_devices == 0 dev_batch_size = batch_size // num_devices ones = tf.ones(dev_batch_size, tf.int32) if eps_iter_small is None: eps_iter_small = eps_iter / 25.0 expensive_pgd = [] for cls in range(nb_classes): cls_params = copy.copy(pgd_params) cls_params["y_target"] = tf.to_float(tf.one_hot(ones * cls, nb_classes)) cls_attack_config = AttackConfig(pgd_attack, cls_params, "pgd_" + str(cls)) pgd_attack_configs.append(cls_attack_config) expensive_params = copy.copy(cls_params) expensive_params["eps_iter"] = eps_iter_small expensive_params["nb_iter"] *= 25.0 expensive_config = AttackConfig( pgd_attack, expensive_params, "expensive_pgd_" + str(cls) ) expensive_pgd.append(expensive_config) attack_configs = [noise_attack_config] + pgd_attack_configs + expensive_pgd new_work_goal = {config: 5 for config in attack_configs} pgd_work_goal = {config: 5 for config in pgd_attack_configs} goals = [ Misclassify(new_work_goal={noise_attack_config: 50}), Misclassify(new_work_goal=pgd_work_goal), MaxConfidence(t=0.5, new_work_goal=new_work_goal), MaxConfidence(t=0.75, new_work_goal=new_work_goal), MaxConfidence(t=0.875, new_work_goal=new_work_goal), MaxConfidence(t=0.9375, new_work_goal=new_work_goal), MaxConfidence(t=0.96875, new_work_goal=new_work_goal), MaxConfidence(t=0.984375, new_work_goal=new_work_goal), MaxConfidence(t=1.0, new_work_goal=new_work_goal), ] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path) def random_search_max_confidence_recipe( sess, model, x, y, eps, clip_min, clip_max, report_path, batch_size=BATCH_SIZE, num_noise_points=10000, ): noise_attack = Noise(model, sess) threat_params = {"eps": eps, "clip_min": clip_min, "clip_max": clip_max} noise_attack_config = AttackConfig(noise_attack, threat_params) attack_configs = [noise_attack_config] assert batch_size % num_devices == 0 new_work_goal = {noise_attack_config: num_noise_points} goals = [MaxConfidence(t=1.0, new_work_goal=new_work_goal)] bundle_attacks(sess, model, x, y, attack_configs, goals, report_path) class AttackConfig(object): def __init__(self, attack, params=None, name=None, pass_y=False): self.attack = attack self.params = params self.name = name if params is not None: assert isinstance(params, dict) for key in params: assert isinstance(key, six.string_types), type(key) self.pass_y = pass_y def __str__(self): if self.name is not None: return self.name return "AttackConfig(" + str(self.attack) + ", " + str(self.params) + ")" def __repr__(self): return self.__str__() def bundle_attacks( sess, model, x, y, attack_configs, goals, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE, ): assert isinstance(sess, tf.Session) assert isinstance(model, Model) assert all( isinstance(attack_config, AttackConfig) for attack_config in attack_configs ) assert all(isinstance(goal, AttackGoal) for goal in goals) assert isinstance(report_path, six.string_types) if x.shape[0] != y.shape[0]: raise ValueError("Number of input examples does not match number of labels") run_counts = {} for attack_config in attack_configs: run_counts[attack_config] = np.zeros(x.shape[0], dtype=np.int64) _logger.info("Running on clean data to initialize the report...") packed = correctness_and_confidence( sess, model, x, y, batch_size=eval_batch_size, devices=devices ) _logger.info("...done") correctness, confidence = packed _logger.info("Accuracy: " + str(correctness.mean())) report = ConfidenceReport() report["clean"] = ConfidenceReportEntry(correctness, confidence) adv_x = x.copy() for goal in goals: bundle_attacks_with_goal( sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size, eval_batch_size=eval_batch_size, ) return adv_x, run_counts def bundle_attacks_with_goal( sess, model, x, y, adv_x, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE, ): goal.start(run_counts) _logger.info("Running criteria for new goal...") criteria = goal.get_criteria(sess, model, adv_x, y, batch_size=eval_batch_size) assert "correctness" in criteria _logger.info("Accuracy: " + str(criteria["correctness"].mean())) assert "confidence" in criteria while not goal.is_satisfied(criteria, run_counts): run_batch_with_goal( sess, model, x, y, adv_x, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=attack_batch_size, ) report.completed = True save(criteria, report, report_path, adv_x) def run_batch_with_goal( sess, model, x, y, adv_x_val, criteria, attack_configs, run_counts, goal, report, report_path, attack_batch_size=BATCH_SIZE, ): attack_config = goal.get_attack_config(attack_configs, run_counts, criteria) idxs = goal.request_examples(attack_config, criteria, run_counts, attack_batch_size) x_batch = x[idxs] assert x_batch.shape[0] == attack_batch_size y_batch = y[idxs] assert y_batch.shape[0] == attack_batch_size adv_x_batch = run_attack( sess, model, x_batch, y_batch, attack_config.attack, attack_config.params, attack_batch_size, devices, pass_y=attack_config.pass_y, ) criteria_batch = goal.get_criteria( sess, model, adv_x_batch, y_batch, batch_size=min(attack_batch_size, BATCH_SIZE) ) cur_run_counts = run_counts[attack_config] for batch_idx, orig_idx in enumerate(idxs): cur_run_counts[orig_idx] += 1 should_copy = goal.new_wins(criteria, orig_idx, criteria_batch, batch_idx) if should_copy: adv_x_val[orig_idx] = adv_x_batch[batch_idx] for key in criteria: criteria[key][orig_idx] = criteria_batch[key][batch_idx] assert np.allclose(y[orig_idx], y_batch[batch_idx]) report["bundled"] = ConfidenceReportEntry( criteria["correctness"], criteria["confidence"] ) should_save = False new_time = time.time() if hasattr(report, "time"): if new_time - report.time > REPORT_TIME_INTERVAL: should_save = True else: should_save = True if should_save: report.time = new_time goal.print_progress(criteria, run_counts) save(criteria, report, report_path, adv_x_val) def save(criteria, report, report_path, adv_x_val): print_stats(criteria["correctness"], criteria["confidence"], "bundled") print("Saving to " + report_path) serial.save(report_path, report) assert report_path.endswith(".joblib") adv_x_path = report_path[: -len(".joblib")] + "_adv.npy" np.save(adv_x_path, adv_x_val) class AttackGoal(object): def start(self, run_counts): def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE): names, factory = self.extra_criteria() factory = _CriteriaFactory(model, factory) results = batch_eval_multi_worker( sess, factory, [advx, y], batch_size=batch_size, devices=devices ) names = ["correctness", "confidence"] + names out = dict(safe_zip(names, results)) return out def extra_criteria(self): return [], None def request_examples(self, attack_config, criteria, run_counts, batch_size): raise NotImplementedError( str(type(self)) + "needs to implement request_examples" ) def is_satisfied(self, criteria, run_counts): raise NotImplementedError(str(type(self)) + " needs to implement is_satisfied.")
MIT License
plusmultiply/mprm
datasets/Scannet_subcloud.py
ScannetDataset.load_evaluation_points
python
def load_evaluation_points(self, file_path): mesh_path = file_path.split('/') mesh_path[-2] = mesh_path[-2][:-6] + 'meshes' mesh_path = '/'.join(mesh_path) vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True) return np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T
Load points (from test or validation split) on which the metrics should be evaluated
https://github.com/plusmultiply/mprm/blob/9783dc179f0bfca8ca7316b638269769f11027aa/datasets/Scannet_subcloud.py#L982-L992
import json import os import tensorflow as tf import numpy as np import time import pickle from sklearn.neighbors import KDTree from utils.ply import read_ply, write_ply from utils.mesh import rasterize_mesh from os import makedirs, listdir from os.path import exists, join, isfile, isdir from datasets.common import Dataset import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0): if (features is None) and (labels is None): return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose) elif (labels is None): return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose) elif (features is None): return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose) else: return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose) class ScannetDataset(Dataset): def __init__(self, input_threads=8, load_test=False): Dataset.__init__(self, 'Scannet') self.label_to_names = {0: 'unclassified', 1: 'wall', 2: 'floor', 3: 'cabinet', 4: 'bed', 5: 'chair', 6: 'sofa', 7: 'table', 8: 'door', 9: 'window', 10: 'bookshelf', 11: 'picture', 12: 'counter', 14: 'desk', 16: 'curtain', 24: 'refridgerator', 28: 'shower curtain', 33: 'toilet', 34: 'sink', 36: 'bathtub', 39: 'otherfurniture'} self.init_labels() self.ignored_labels = np.sort([0]) self.network_model = 'cloud_segmentation' self.num_threads = input_threads self.path = '' self.train_path = join(self.path, 'training_points') self.test_path = join(self.path, 'test_points') self.validation_clouds = np.loadtxt(join(self.path, 'scannetv2_val.txt'), dtype=np.str) self.validation_split = 1 self.training_split = 0 self.all_splits = [] self.load_test = load_test def prepare_pointcloud_ply(self): print('\nPreparing ply files') t0 = time.time() paths = [join(self.path, 'scans'), join(self.path, 'scans_test')] new_paths = [self.train_path, self.test_path] mesh_paths = [join(self.path, 'training_meshes'), join(self.path, 'test_meshes')] label_files = join(self.path, 'scannetv2-labels.combined.tsv') with open(label_files, 'r') as f: lines = f.readlines() names1 = [line.split('\t')[1] for line in lines[1:]] IDs = [int(line.split('\t')[4]) for line in lines[1:]] annot_to_nyuID = {n: id for n, id in zip(names1, IDs)} for path, new_path, mesh_path in zip(paths, new_paths, mesh_paths): if not exists(new_path): makedirs(new_path) if not exists(mesh_path): makedirs(mesh_path) scenes = np.sort([f for f in listdir(path)]) N = len(scenes) for i, scene in enumerate(scenes): if exists(join(new_path, scene + '.ply')): continue t1 = time.time() vertex_data, faces = read_ply(join(path, scene, scene + '_vh_clean_2.ply'), triangular_mesh=True) vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T vertices_colors = np.vstack((vertex_data['red'], vertex_data['green'], vertex_data['blue'])).T vertices_labels = np.zeros(vertices.shape[0], dtype=np.int32) if new_path == self.train_path: with open(join(path, scene, scene + '_vh_clean_2.0.010000.segs.json'), 'r') as f: segmentations = json.load(f) segIndices = np.array(segmentations['segIndices']) with open(join(path, scene, scene + '_vh_clean.aggregation.json'), 'r') as f: aggregation = json.load(f) for segGroup in aggregation['segGroups']: c_name = segGroup['label'] if c_name in names1: nyuID = annot_to_nyuID[c_name] if nyuID in self.label_values: for segment in segGroup['segments']: vertices_labels[segIndices == segment] = nyuID write_ply(join(mesh_path, scene + '_mesh.ply'), [vertices, vertices_colors, vertices_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'], triangular_faces=faces) else: write_ply(join(mesh_path, scene + '_mesh.ply'), [vertices, vertices_colors], ['x', 'y', 'z', 'red', 'green', 'blue'], triangular_faces=faces) points, associated_vert_inds = rasterize_mesh(vertices, faces, 0.003) sub_points, sub_vert_inds = grid_subsampling(points, labels=associated_vert_inds, sampleDl=0.01) sub_colors = vertices_colors[sub_vert_inds.ravel(), :] if new_path == self.train_path: sub_labels = vertices_labels[sub_vert_inds.ravel()] write_ply(join(new_path, scene + '.ply'), [sub_points, sub_colors, sub_labels, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind']) else: write_ply(join(new_path, scene + '.ply'), [sub_points, sub_colors, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind']) print('{:s} {:.1f} sec / {:.1f}%'.format(scene, time.time() - t1, 100 * i / N)) print('Done in {:.1f}s'.format(time.time() - t0)) def load_subsampled_clouds(self, subsampling_parameter): if 0 < subsampling_parameter <= 0.01: raise ValueError('subsampling_parameter too low (should be over 1 cm') tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter)) if not exists(tree_path): makedirs(tree_path) self.train_files = np.sort([join(self.train_path, f) for f in listdir(self.train_path) if f[-4:] == '.ply']) self.test_files = np.sort([join(self.test_path, f) for f in listdir(self.test_path) if f[-4:] == '.ply']) files = np.hstack((self.train_files, self.test_files)) self.input_trees = {'training': [], 'validation': [], 'test': []} self.input_colors = {'training': [], 'validation': [], 'test': []} self.input_vert_inds = {'training': [], 'validation': [], 'test': []} self.input_labels = {'training': [], 'validation': []} N = len(files) progress_n = 30 fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%' print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(subsampling_parameter)) for i, file_path in enumerate(files): t0 = time.time() cloud_name = file_path.split('/')[-1][:-4] cloud_folder = file_path.split('/')[-2] if 'train' in cloud_folder: if cloud_name in self.validation_clouds: self.all_splits += [1] cloud_split = 'validation' else: self.all_splits += [0] cloud_split = 'training' else: cloud_split = 'test' if (cloud_split != 'test' and self.load_test) or (cloud_split == 'test' and not self.load_test): continue KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name)) sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name)) if isfile(KDTree_file): data = read_ply(sub_ply_file) sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T sub_vert_inds = data['vert_ind'] if cloud_split == 'test': sub_labels = None else: sub_labels = data['class'] with open(KDTree_file, 'rb') as f: search_tree = pickle.load(f) else: data = read_ply(file_path) points = np.vstack((data['x'], data['y'], data['z'])).T colors = np.vstack((data['red'], data['green'], data['blue'])).T if cloud_split == 'test': int_features = data['vert_ind'] else: int_features = np.vstack((data['vert_ind'], data['class'])).T sub_points, sub_colors, sub_int_features = grid_subsampling(points, features=colors, labels=int_features, sampleDl=subsampling_parameter) sub_colors = sub_colors / 255 if cloud_split == 'test': sub_vert_inds = np.squeeze(sub_int_features) sub_labels = None else: sub_vert_inds = sub_int_features[:, 0] sub_labels = sub_int_features[:, 1] search_tree = KDTree(sub_points, leaf_size=50) with open(KDTree_file, 'wb') as f: pickle.dump(search_tree, f) if cloud_split == 'test': write_ply(sub_ply_file, [sub_points, sub_colors, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind']) else: write_ply(sub_ply_file, [sub_points, sub_colors, sub_labels, sub_vert_inds], ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind']) self.input_trees[cloud_split] += [search_tree] self.input_colors[cloud_split] += [sub_colors] self.input_vert_inds[cloud_split] += [sub_vert_inds] if cloud_split in ['training', 'validation']: self.input_labels[cloud_split] += [sub_labels] print('', end='\r') print(fmt_str.format('#' * ((i * progress_n) // N), 100 * i / N), end='', flush=True) self.input_trees['validation'] = self.input_trees['training'] self.input_colors['validation'] = self.input_colors['training'] self.input_vert_inds['validation'] = self.input_colors['training'] self.input_labels['validation'] = self.input_labels['training'] self.num_training = len(self.input_trees['training']) self.num_validation = len(self.input_trees['validation']) self.num_test = len(self.input_trees['test']) print('number of training sample:', self.num_training) print('number of validation sample:', self.num_validation) print('number of test sample:', self.num_test) self.validation_proj = [] self.validation_labels = [] self.test_proj = [] self.test_labels = [] i_val = 0 i_test = 0 N = self.num_validation + self.num_test print('', end='\r') print(fmt_str.format('#' * progress_n, 100), flush=True) print('\nPreparing reprojection indices for validation and test') for i, file_path in enumerate(files): cloud_name = file_path.split('/')[-1][:-4] cloud_folder = file_path.split('/')[-2] if (not self.load_test) and 'train' in cloud_folder and cloud_name not in self.validation_clouds: proj_file = join(tree_path, '{:s}_proj_train.pkl'.format(cloud_name)) if isfile(proj_file): with open(proj_file, 'rb') as f: proj_inds, labels = pickle.load(f) else: tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter)) data = read_ply(join(tree_path, '{:s}.ply'.format(cloud_name))) vertices = np.vstack((data['x'], data['y'], data['z'])).T labels = data['class'] proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(vertices, return_distance=False)) proj_inds = proj_inds.astype(np.int32) with open(proj_file, 'wb') as f: pickle.dump([proj_inds, labels], f) self.validation_proj += [proj_inds] self.validation_labels += [labels] i_val += 1 if self.load_test and 'test' in cloud_folder: proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) if isfile(proj_file): with open(proj_file, 'rb') as f: proj_inds, labels = pickle.load(f) else: mesh_path = file_path.split('/') mesh_path[-2] = 'test_meshes' mesh_path = '/'.join(mesh_path) vertex_data, faces = read_ply(mesh_path[:-4] + '_mesh.ply', triangular_mesh=True) vertices = np.vstack((vertex_data['x'], vertex_data['y'], vertex_data['z'])).T labels = np.zeros(vertices.shape[0], dtype=np.int32) proj_inds = np.squeeze(self.input_trees['test'][i_test].query(vertices, return_distance=False)) proj_inds = proj_inds.astype(np.int32) with open(proj_file, 'wb') as f: pickle.dump([proj_inds, labels], f) self.test_proj += [proj_inds] self.test_labels += [labels] i_test += 1 print('', end='\r') print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N), 100 * (i_val + i_test) / N), end='', flush=True) print('\n') return def get_batch_gen(self, split, config): if split == 'training': epoch_n = config.epoch_steps * config.batch_num random_pick_n = None elif split == 'validation': epoch_n = config.validation_size * config.batch_num elif split == 'test': epoch_n = config.validation_size * config.batch_num elif split == 'ERF': epoch_n = 1000000 self.batch_limit = 1 np.random.seed(42) else: raise ValueError('Split argument in data generator should be "training", "validation" or "test"') if not hasattr(self, 'potentials'): self.potentials = {} self.min_potentials = {} if not hasattr(self, 'anchors'): self.anchors = [] def get_anchors(points): n_anchors = [] x_max = points[:, 0].max() x_min = points[:, 0].min() y_max = points[:, 1].max() y_min = points[:, 1].min() z_max = points[:, 2].max() z_min = points[:, 2].min() x_step = np.floor((x_max - x_min) / config.in_radius) + 1 y_step = np.floor((y_max - y_min) / config.in_radius) + 1 z_step = np.floor((z_max - z_min) / config.in_radius) + 1 x_num = np.linspace(x_min, x_max, x_step) y_num = np.linspace(y_min, y_max, y_step) z_num = np.linspace(z_min, z_max, z_step) for x in x_num: for y in y_num: for z in z_num: n_anchors.append([x, y, z]) return np.array(n_anchors) if split == 'training': self.anchors = [] self.potentials[split] = [] self.min_potentials[split] = [] data_split = split if split == 'ERF': data_split = 'test' if split == 'training': for i, tree in enumerate(self.input_trees[data_split]): points = np.array(tree.data) anchor = get_anchors(points) self.anchors += [anchor] self.potentials[split] += [np.random.rand(len(anchor)) * 1e-3] self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))] print(len(self.anchors)) print(len(self.potentials[split])) print(len(self.min_potentials[split])) else: for i, tree in enumerate(self.input_trees[data_split]): self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3] self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))] def get_random_epoch_inds(): all_epoch_inds = np.zeros((2, 0), dtype=np.int32) for cloud_ind, cloud_labels in enumerate(self.input_labels[split]): epoch_indices = np.empty((0,), dtype=np.int32) for label_ind, label in enumerate(self.label_values): if label not in self.ignored_labels: label_indices = np.where(np.equal(cloud_labels, label))[0] if len(label_indices) <= random_pick_n: epoch_indices = np.hstack((epoch_indices, label_indices)) elif len(label_indices) < 50 * random_pick_n: new_randoms = np.random.choice(label_indices, size=random_pick_n, replace=False) epoch_indices = np.hstack((epoch_indices, new_randoms.astype(np.int32))) else: rand_inds = [] while len(rand_inds) < random_pick_n: rand_inds = np.unique(np.random.choice(label_indices, size=5 * random_pick_n, replace=True)) epoch_indices = np.hstack((epoch_indices, rand_inds[:random_pick_n].astype(np.int32))) epoch_indices = np.vstack((np.full(epoch_indices.shape, cloud_ind, dtype=np.int32), epoch_indices)) all_epoch_inds = np.hstack((all_epoch_inds, epoch_indices)) return all_epoch_inds def random_balanced_gen(): if split == 'training': all_epoch_inds = get_random_epoch_inds() elif split == 'validation': all_epoch_inds = get_random_epoch_inds() else: raise ValueError('generator to be defined for test split.') p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 for i, rand_i in enumerate(np.random.permutation(all_epoch_inds.shape[1])): cloud_ind = all_epoch_inds[0, rand_i] point_ind = all_epoch_inds[1, rand_i] points = np.array(self.input_trees[split][cloud_ind].data, copy=False) center_point = points[point_ind, :].reshape(1, -1) noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape) pick_point = center_point + noise.astype(center_point.dtype) input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0] n = input_inds.shape[0] if n > self.batch_limit: input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False) n = input_inds.shape[0] input_points = (points[input_inds] - pick_point).astype(np.float32) input_colors = self.input_colors[split][cloud_ind][input_inds] input_labels = self.input_labels[split][cloud_ind][input_inds] input_labels = np.array([self.label_to_idx[l] for l in input_labels]) cloud_labels_idx = np.unique(input_labels) cloud_labels = np.zeros((1, config.num_classes)) cloud_labels[0][cloud_labels_idx] = 1 cloud_labels_all = np.ones((len(input_labels), config.num_classes)) cloud_labels_all = cloud_labels_all * cloud_labels if batch_n + n > self.batch_limit and batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 if n > 0: p_list += [input_points] c_list += [np.hstack((input_colors, input_points + pick_point))] pl_list += [input_labels] pi_list += [input_inds] ci_list += [cloud_ind] cl_list += [cloud_labels] cla_list += [cloud_labels_all] batch_n += n if batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0) ) def spatially_regular_gen(): p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 for i in range(epoch_n): cloud_ind = int(np.argmin(self.min_potentials[split])) point_ind = np.argmin(self.potentials[split][cloud_ind]) points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False) if split=='training': center_point = self.anchors[cloud_ind][point_ind].reshape(1, -1) else: center_point = points[point_ind, :].reshape(1, -1) if split != 'ERF': noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape) pick_point = center_point + noise.astype(center_point.dtype) else: pick_point = center_point input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point, r=config.in_radius)[0] n = input_inds.shape[0] if n == 0: self.potentials[split][cloud_ind][point_ind] += 1 self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) continue if split != 'ERF': dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1) tukeys = np.square(1 - dists / np.square(config.in_radius)) tukeys[dists > np.square(config.in_radius)] = 0 if split != 'training': self.potentials[split][cloud_ind][input_inds] += tukeys self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) else: self.potentials[split][cloud_ind][point_ind] += 0.01 self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind])) if n > self.batch_limit: input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False) n = input_inds.shape[0] input_points = (points[input_inds] - pick_point).astype(np.float32) input_colors = self.input_colors[data_split][cloud_ind][input_inds] if split in ['test', 'ERF']: input_labels = np.zeros(input_points.shape[0]) else: input_labels = self.input_labels[data_split][cloud_ind][input_inds] input_labels = np.array([self.label_to_idx[l] for l in input_labels]) cloud_labels_idx = np.unique(input_labels) cloud_labels_idx = cloud_labels_idx[cloud_labels_idx!=0].astype('int32') cloud_labels = np.zeros((1, config.num_classes)) cloud_labels[0][cloud_labels_idx-1] = 1 cloud_labels_all = np.ones((len(input_labels), config.num_classes)) cloud_labels_all = cloud_labels_all * cloud_labels if batch_n + n > self.batch_limit and batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) p_list = [] c_list = [] pl_list = [] pi_list = [] ci_list = [] cl_list = [] cla_list = [] batch_n = 0 if n > 0: p_list += [input_points] c_list += [np.hstack((input_colors, input_points + pick_point))] pl_list += [input_labels] pi_list += [input_inds] ci_list += [cloud_ind] cl_list += [cloud_labels] cla_list += [cloud_labels_all] batch_n += n if batch_n > 0: yield (np.concatenate(p_list, axis=0), np.concatenate(c_list, axis=0), np.concatenate(pl_list, axis=0), np.array([tp.shape[0] for tp in p_list]), np.concatenate(pi_list, axis=0), np.array(ci_list, dtype=np.int32), np.concatenate(cl_list, axis=0), np.concatenate(cla_list, axis=0)) if split == 'training': gen_func = spatially_regular_gen elif split == 'validation': gen_func = spatially_regular_gen elif split in ['test', 'ERF']: gen_func = spatially_regular_gen else: raise ValueError('Split argument in data generator should be "training", "validation" or "test"') gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32, tf.float32) gen_shapes = ([None, 3], [None, 6], [None], [None], [None], [None], [None, 20], [None, 20]) return gen_func, gen_types, gen_shapes def get_tf_mapping(self, config): def tf_map(stacked_points, stacked_colors, point_labels, stacks_lengths, point_inds, cloud_inds, cloud_labels, cloud_labels_all): batch_inds = self.tf_get_batch_inds(stacks_lengths) stacked_points, scales, rots = self.tf_augment_input(stacked_points, batch_inds, config) stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32) stacked_original_coordinates = stacked_colors[:, 3:] stacked_colors = stacked_colors[:, :3] if config.in_features_dim in [4, 5]: num_batches = batch_inds[-1] + 1 s = tf.cast(tf.less(tf.random_uniform((num_batches,)), config.augment_color), tf.float32) stacked_s = tf.gather(s, batch_inds) stacked_colors = stacked_colors * tf.expand_dims(stacked_s, axis=1) if config.in_features_dim == 1: pass elif config.in_features_dim == 2: stacked_features = tf.concat((stacked_features, stacked_original_coordinates[:, 2:]), axis=1) elif config.in_features_dim == 3: stacked_features = stacked_colors elif config.in_features_dim == 4: stacked_features = tf.concat((stacked_features, stacked_colors), axis=1) elif config.in_features_dim == 5: stacked_features = tf.concat((stacked_features, stacked_colors, stacked_original_coordinates[:, 2:]), axis=1) elif config.in_features_dim == 7: stacked_features = tf.concat((stacked_features, stacked_colors, stacked_points), axis=1) else: raise ValueError('Only accepted input dimensions are 1, 3, 4 and 7 (without and with rgb/xyz)') input_list = self.tf_segmentation_attention_inputs(config, stacked_points, stacked_features, point_labels, stacks_lengths, batch_inds) input_list += [scales, rots] input_list += [point_inds, cloud_inds] input_list += [cloud_labels] input_list += [batch_inds] input_list += [cloud_labels_all] input_list += [stacks_lengths] return input_list return tf_map
MIT License
escorciav/deep-action-proposals
daps/model.py
forward_pass
python
def forward_pass(network, input_data): l_pred_var, y_pred_var = lasagne.layers.get_output(network, input_data, deterministic=True) loc = l_pred_var.eval().reshape((-1, 2)) return loc, y_pred_var.eval()
Forward pass input_data over network
https://github.com/escorciav/deep-action-proposals/blob/c14f512febc1abd0ec40bd3188a83e4ee3913535/daps/model.py#L77-L83
import lasagne import numpy as np import theano.tensor as T from daps.c3d_encoder import Feature from daps.utils.segment import format as segment_format EPSILON = 10e-8 def build_lstm(input_var=None, seq_length=256, depth=2, width=512, input_size=4096, grad_clip=100, forget_bias=5.0): network = lasagne.layers.InputLayer(shape=(None, seq_length, input_size), input_var=input_var) nonlin = lasagne.nonlinearities.tanh gate = lasagne.layers.Gate for _ in range(depth): network = lasagne.layers.LSTMLayer( network, width, grad_clipping=grad_clip, nonlinearity=nonlin, forgetgate=gate(b=lasagne.init.Constant(forget_bias))) network = lasagne.layers.SliceLayer(network, -1, 1) return network def build_mlp(input_var=None, depth=2, width=1024, drop_input=.2, drop_hidden=.5, input_size=4096): network = lasagne.layers.InputLayer(shape=(None, input_size), input_var=input_var) if drop_input: network = lasagne.layers.dropout(network, p=drop_input) nonlin = lasagne.nonlinearities.rectify for _ in range(depth): network = lasagne.layers.DenseLayer( network, width, nonlinearity=nonlin) if drop_hidden: network = lasagne.layers.dropout(network, p=drop_hidden) return network def build_model(model_prm=None, input_var=None, input_size=4096, grad_clip=100, forget_bias=1.0): if model_prm.startswith('mlp:'): user_prm = model_prm.split(':', 1)[1].split(',') n_outputs, depth, width, drop_in, drop_hid = user_prm network = build_mlp(input_var, int(depth), int(width), float(drop_in), input_size=input_size) elif model_prm.startswith('lstm:'): user_prm = model_prm.split(':', 1)[1].split(',') n_outputs, seq_length, width, depth = user_prm network = build_lstm(input_var, int(seq_length), int(depth), int(width), input_size=input_size, grad_clip=grad_clip, forget_bias=forget_bias) else: raise ValueError("Unrecognized model type " + model_prm) nonlin, n_outputs = lasagne.nonlinearities.sigmoid, int(n_outputs) localization = lasagne.layers.DenseLayer(network, n_outputs * 2) conf = lasagne.layers.DenseLayer(network, n_outputs, nonlinearity=nonlin) return localization, conf
MIT License
tresamigossd/smv
src/main/python/smv/smvappinfo.py
SmvAppInfo._common_prefix
python
def _common_prefix(self, fqn_list): if not fqn_list: return '' parsed = [s.split(".") for s in fqn_list] s1 = min(parsed) s2 = max(parsed) for i, c in enumerate(s1): if c != s2[i]: return ".".join(s1[:i]) return ".".join(s1)
Given a list of fqns, return the longest common prefix
https://github.com/tresamigossd/smv/blob/e12257b5b07113d805e7fdd8de41cbcf72120ed7/src/main/python/smv/smvappinfo.py#L41-L54
from smv.utils import scala_seq_to_list import json from smv.modulesvisitor import ModulesVisitor class SmvAppInfo(object): def __init__(self, smvApp): self.smvApp = smvApp self.dsm = smvApp.dsm self.stages = smvApp.stages() def _graph(self): nodes = self.dsm.allDataSets() edges = [] for ds in nodes: from_ds = ds.resolvedRequiresDS edges.extend([(n, ds) for n in from_ds if n in nodes]) return (nodes, edges)
Apache License 2.0
vforgione/logging2
logging2/loggers.py
Logger.warning
python
def warning(self, message: str, **context) -> None: self._log(message=message, level=LogLevel.warning, **context)
Calls each registered ``Handler``'s ``write`` method to produce a warning log entry. :param message: the user message to be written :param context: additional key-value pairs to override template context during interpolation
https://github.com/vforgione/logging2/blob/9d620c14d9b5f67e3dc285082330296cf1ecbfcd/logging2/loggers.py#L134-L140
import inspect import os import re import sys import traceback from datetime import datetime, tzinfo from datetime import timezone as _tz from typing import Callable, Dict, Iterable, List, Optional, Set, Union from logging2 import LogRegister from logging2.handlers.abc import Handler from logging2.handlers.streaming import StdOutHandler from logging2.levels import LogLevel class Logger: TEMPLATE_KEYS_REGEX = re.compile("\{(?P<key>\w+)\}") BASE_TEMPLATE_KEYS = { "timestamp", "level", "name", "message", "source", "line", "function", "process", } DEFAULT_TEMPLATE: str = "{timestamp} {level} {name}: {message}" DEFAULT_TIMEZONE: tzinfo = _tz.utc DEFAULT_HANDLER_CLASS: type = StdOutHandler DEFAULT_LOG_LEVEL: LogLevel = LogLevel.info def __init__( self, name: str, template: Optional[str] = None, ensure_new_line: Optional[bool] = True, timezone: Optional[tzinfo] = None, additional_context: Optional[Dict[str, Union[object, Callable]]] = None, handler: Optional[Handler] = None, handlers: Optional[Iterable[Handler]] = None, level: Optional[LogLevel] = None, ): if name not in LogRegister: self.name: str = name self.ensure_new_line: bool = ensure_new_line self.timezone: tzinfo = timezone or self.DEFAULT_TIMEZONE self.additional_context: Optional[ Dict[str, Union[object, Callable]] ] = additional_context or {} self._level: LogLevel = level self._template: str = None self._keys: Set[str] = None self._setup_template(template=template or self.DEFAULT_TEMPLATE) self._handlers: Dict[str, Handler] = {} if handler: self.add_handler(handler) if handlers: for handler in handlers: self.add_handler(handler) LogRegister.register_logger(self) else: registered = LogRegister.get_logger(name=name) self.__dict__ = registered.__dict__ @property def template(self) -> str: return self._template @template.setter def template(self, new_template: str) -> None: self._setup_template(template=new_template) @property def keys(self) -> Set[str]: return self._keys @property def handlers(self) -> List[Handler]: return [handler for handler in self._handlers.values()] def add_handler(self, handler: Handler) -> None: name = handler.name if name not in self._handlers: self._handlers[name] = handler def remove_handler(self, name: str) -> None: if name in self._handlers: del self._handlers[name] def debug(self, message: str, **context) -> None: self._log(message=message, level=LogLevel.debug, **context) def info(self, message: str, **context) -> None: self._log(message=message, level=LogLevel.info, **context)
MIT License
genialis/resolwe
resolwe/test_helpers/test_runner.py
_manager_setup
python
def _manager_setup(): if TESTING_CONTEXT.get("manager_reset", False): return TESTING_CONTEXT["manager_reset"] = True state.update_constants() manager.drain_messages()
Execute setup operations common to serial and parallel testing. This mostly means state cleanup, such as resetting database connections and clearing the shared state.
https://github.com/genialis/resolwe/blob/dc8a70979ae9722e6c60ae0e3935c6542c637f48/resolwe/test_helpers/test_runner.py#L67-L77
import asyncio import contextlib import errno import logging import os import re import shutil import subprocess import sys from pathlib import Path from unittest.mock import patch import yaml import zmq import zmq.asyncio from channels.db import database_sync_to_async from django.conf import settings from django.core.management.base import CommandError from django.test import override_settings from django.test.runner import DiscoverRunner, ParallelTestSuite, RemoteTestRunner from django.utils.crypto import get_random_string import resolwe.test.testcases.setting_overrides as resolwe_settings from resolwe.flow.finders import get_finders from resolwe.flow.management.commands.prepare_runtime import Command as PrepareRuntime from resolwe.flow.managers import listener, manager, state from resolwe.storage.connectors import connectors from resolwe.test.utils import generate_process_tag logger = logging.getLogger(__name__) SPAWN_PROCESS_REGEX = re.compile( r'run\s+\{.*?["\']process["\']\s*:\s*["\'](.+?)["\'].*?\}' ) TESTING_CONTEXT = { "is_testing": False, } class TestingContext: def __enter__(self): TESTING_CONTEXT["is_testing"] = True def __exit__(self, *args, **kwargs): TESTING_CONTEXT["is_testing"] = False return False
Apache License 2.0
googleads/google-ads-python
google/ads/googleads/v8/services/services/domain_category_service/transports/grpc.py
DomainCategoryServiceGrpcTransport.get_domain_category
python
def get_domain_category( self, ) -> Callable[ [domain_category_service.GetDomainCategoryRequest], domain_category.DomainCategory, ]: if "get_domain_category" not in self._stubs: self._stubs["get_domain_category"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.DomainCategoryService/GetDomainCategory", request_serializer=domain_category_service.GetDomainCategoryRequest.serialize, response_deserializer=domain_category.DomainCategory.deserialize, ) return self._stubs["get_domain_category"]
r"""Return a callable for the get domain category method over gRPC. Returns the requested domain category. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetDomainCategoryRequest], ~.DomainCategory]: A function that, when called, will call the underlying RPC on the server.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v8/services/services/domain_category_service/transports/grpc.py#L213-L243
import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 import google.auth from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import grpc from google.ads.googleads.v8.resources.types import domain_category from google.ads.googleads.v8.services.types import domain_category_service from .base import DomainCategoryServiceTransport, DEFAULT_CLIENT_INFO class DomainCategoryServiceGrpcTransport(DomainCategoryServiceTransport): def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: self._ssl_channel_credentials = ssl_channel_credentials if channel: credentials = False self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = google.auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: return self._grpc_channel @property
Apache License 2.0
commvault/cvpysdk
cvpysdk/activitycontrol.py
ActivityControl.is_enabled
python
def is_enabled(self, activity_type): self._get_activity_control_status() for each_activity in self._activity_control_properties_list: if int(each_activity['activityType']) == self._activity_type_dict[activity_type]: self._reEnableTime = each_activity['reEnableTime'] self._noSchedEnable = each_activity['noSchedEnable'] self._reenableTimeZone = each_activity['reenableTimeZone'] return each_activity['enabled'] o_str = 'Failed to find activity type:"{0}" in the response'.format( activity_type) raise SDKException('Client', '102', o_str)
Returns True/False based on the enabled flag and also sets other relevant properties for a given activity type. Args: activity_type (str) -- Activity Type to be Enabled or Disabled Values: "ALL ACTIVITY", "DATA MANAGEMENT", "DATA RECOVERY", "DATA AGING", "AUX COPY", "DATA VERIFICATION", "DDB ACTIVITY", "SCHEDULER", "OFFLINE CONTENT INDEXING",
https://github.com/commvault/cvpysdk/blob/66df30e6e31d619812b7756cb4f7e130b220a08f/cvpysdk/activitycontrol.py#L233-L261
from __future__ import absolute_import from __future__ import unicode_literals from .exception import SDKException class ActivityControl(object): def __init__(self, commcell_object): self._commcell_object = commcell_object self._activity_type_dict = { "ALL ACTIVITY": 128, "DATA MANAGEMENT": 1, "DATA RECOVERY": 2, "DATA AGING": 16, "AUX COPY": 4, "DATA VERIFICATION": 8192, "DDB ACTIVITY": 512, "SCHEDULER": 256, "OFFLINE CONTENT INDEXING": 1024, } self._get_activity_control_status() def __repr__(self): representation_string = 'ActivityControl class instance' return representation_string def _request_json_(self, activity_type, enable_time): request_json = { "commCellInfo": { "commCellActivityControlInfo": { "activityControlOptions": [ { "activityType": self._activity_type_dict[activity_type], "enableAfterADelay": True, "enableActivityType": False, "dateTime": { "time": enable_time}}]}}} return request_json def set(self, activity_type, action): set_request = self._commcell_object._services['SET_ACTIVITY_CONTROL'] % ( str(self._activity_type_dict[activity_type]), str(action)) flag, response = self._commcell_object._cvpysdk_object.make_request( 'POST', set_request ) if flag: if response.json(): error_code = str(response.json()['errorCode']) if error_code == '0': self._get_activity_control_status() return else: raise SDKException( 'CVPySDK', '102', response.json()['errorMessage']) else: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string) def enable_after_delay(self, activity_type, enable_time): request_json = self._request_json_(activity_type, enable_time) set_request = self._commcell_object._services['SET_COMMCELL_PROPERTIES'] flag, response = self._commcell_object._cvpysdk_object.make_request( 'PUT', set_request, request_json ) if flag: if response.json() and 'response' in response.json(): error_code = response.json()['response'][0]['errorCode'] if error_code == 0: self._get_activity_control_status() return elif 'errorMessage' in response.json()['response'][0]: error_message = response.json( )['response'][0]['errorMessage'] o_str = 'Failed to enable activity control \ after a delay\nError: "{0}"'.format( error_message) raise SDKException('CVPySDK', '102', o_str) else: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string) def _get_activity_control_status(self): get_request = self._commcell_object._services['GET_ACTIVITY_CONTROL'] flag, response = self._commcell_object._cvpysdk_object.make_request( 'GET', get_request ) if flag: if response.json() and 'acObjects' in response.json(): self._activity_control_properties_list = response.json()[ 'acObjects'] else: raise SDKException('Response', '102') else: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string)
Apache License 2.0
leonjza/hogar
hogar/Plugins/Learn/main.py
commands
python
def commands (): return ['learn', 'forget', 'show']
Commands In the case of text plugins, returns the commands that this plugin should trigger for. For other message types, a empty list should be returned. -- @return list
https://github.com/leonjza/hogar/blob/a8cf4b6a6b508e5e86d26dd5cbd55add560d26a5/hogar/Plugins/Learn/main.py#L60-L72
from hogar.Models.LearnKey import LearnKey from hogar.Models.LearnValue import LearnValue from hogar.Utils.StringUtils import ignore_case_replace import peewee import logging logger = logging.getLogger(__name__) def enabled (): return True def applicable_types (): return ['text']
MIT License
openstack/cinder
cinder/api/api_utils.py
validate_integer
python
def validate_integer(value, name, min_value=None, max_value=None): try: value = strutils.validate_integer(value, name, min_value, max_value) return value except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=str(e))
Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_length: the min_length of the integer :param max_length: the max_length of the integer :returns: integer
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/api/api_utils.py#L128-L141
from keystoneauth1 import exceptions as ks_exc from keystoneauth1 import identity from keystoneauth1 import loading as ka_loading from keystoneclient import client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils import webob from webob import exc from cinder import exception from cinder.i18n import _ CONF = cfg.CONF CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token.__init__') LOG = logging.getLogger(__name__) def _parse_is_public(is_public): if is_public is None: return True elif is_none_string(is_public): return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise exc.HTTPBadRequest(explanation=msg) def is_none_string(val): if not isinstance(val, str): return False return val.lower() == 'none' def remove_invalid_filter_options(context, filters, allowed_search_options): if context.is_admin: return unknown_options = [opt for opt in filters if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%s' from query.", bad_options) for opt in unknown_options: del filters[opt] _visible_admin_metadata_keys = ['readonly', 'attached_mode'] def add_visible_admin_metadata(volume): visible_admin_meta = {} if volume.get('volume_admin_metadata'): if isinstance(volume['volume_admin_metadata'], dict): volume_admin_metadata = volume['volume_admin_metadata'] for key in volume_admin_metadata: if key in _visible_admin_metadata_keys: visible_admin_meta[key] = volume_admin_metadata[key] else: for item in volume['volume_admin_metadata']: if item['key'] in _visible_admin_metadata_keys: visible_admin_meta[item['key']] = item['value'] elif (volume.get('admin_metadata') and isinstance(volume.get('admin_metadata'), dict)): for key in _visible_admin_metadata_keys: if key in volume['admin_metadata'].keys(): visible_admin_meta[key] = volume['admin_metadata'][key] if not visible_admin_meta: return if volume.get('volume_metadata'): orig_meta = list(volume.get('volume_metadata')) for item in orig_meta: if item['key'] in visible_admin_meta.keys(): item['value'] = visible_admin_meta.pop(item['key']) for key, value in visible_admin_meta.items(): orig_meta.append({'key': key, 'value': value}) volume['volume_metadata'] = orig_meta elif (volume.get('metadata') and isinstance(volume.get('metadata'), dict)): volume['metadata'].update(visible_admin_meta) else: volume['metadata'] = visible_admin_meta
Apache License 2.0
osmr/imgclsmob
pytorch/pytorchcv/models/sepreresnet_cifar.py
sepreresnet542bn_svhn
python
def sepreresnet542bn_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn", **kwargs)
SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- num_classes : int, default 10 Number of classification num_classes. pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/pytorch/pytorchcv/models/sepreresnet_cifar.py#L443-L457
__all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn', 'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn', 'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn', 'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn', 'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn', 'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn', 'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn', 'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn'] import os import torch.nn as nn import torch.nn.init as init from .common import conv3x3_block from .sepreresnet import SEPreResUnit class CIFARSEPreResNet(nn.Module): def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(32, 32), num_classes=10): super(CIFARSEPreResNet, self).__init__() self.in_size = in_size self.num_classes = num_classes self.features = nn.Sequential() self.features.add_module("init_block", conv3x3_block( in_channels=in_channels, out_channels=init_block_channels)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.Sequential() for j, out_channels in enumerate(channels_per_stage): stride = 2 if (j == 0) and (i != 0) else 1 stage.add_module("unit{}".format(j + 1), SEPreResUnit( in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=False)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.features.add_module("final_pool", nn.AvgPool2d( kernel_size=8, stride=1)) self.output = nn.Linear( in_features=in_channels, out_features=num_classes) self._init_params() def _init_params(self): for name, module in self.named_modules(): if isinstance(module, nn.Conv2d): init.kaiming_uniform_(module.weight) if module.bias is not None: init.constant_(module.bias, 0) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.output(x) return x def get_sepreresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join("~", ".torch", "models"), **kwargs): assert (num_classes in [10, 100]) if bottleneck: assert ((blocks - 2) % 9 == 0) layers = [(blocks - 2) // 9] * 3 else: assert ((blocks - 2) % 6 == 0) layers = [(blocks - 2) // 6] * 3 channels_per_layers = [16, 32, 64] init_block_channels = 16 channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)] if bottleneck: channels = [[cij * 4 for cij in ci] for ci in channels] net = CIFARSEPreResNet( channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs) if pretrained: if (model_name is None) or (not model_name): raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.") from .model_store import download_model download_model( net=net, model_name=model_name, local_model_store_dir_path=root) return net def sepreresnet20_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10", **kwargs) def sepreresnet20_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100", **kwargs) def sepreresnet20_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn", **kwargs) def sepreresnet56_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10", **kwargs) def sepreresnet56_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100", **kwargs) def sepreresnet56_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn", **kwargs) def sepreresnet110_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10", **kwargs) def sepreresnet110_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100", **kwargs) def sepreresnet110_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn", **kwargs) def sepreresnet164bn_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10", **kwargs) def sepreresnet164bn_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100", **kwargs) def sepreresnet164bn_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn", **kwargs) def sepreresnet272bn_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10", **kwargs) def sepreresnet272bn_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100", **kwargs) def sepreresnet272bn_svhn(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn", **kwargs) def sepreresnet542bn_cifar10(num_classes=10, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10", **kwargs) def sepreresnet542bn_cifar100(num_classes=100, **kwargs): return get_sepreresnet_cifar(num_classes=num_classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100", **kwargs)
MIT License
fish-quant/big-fish
bigfish/classification/input_preparation.py
_get_centrosome_distance_map
python
def _get_centrosome_distance_map(centrosome_coord, cell_mask): if centrosome_coord.size == 3: centrosome_coord_2d = centrosome_coord[1:] else: centrosome_coord_2d = centrosome_coord.copy() mask_centrosome = np.zeros_like(cell_mask) mask_centrosome[centrosome_coord_2d[:, 0], centrosome_coord_2d[:, 1]] = True distance_map = ndi.distance_transform_edt(~mask_centrosome) distance_map[cell_mask == 0] = 0 distance_map = distance_map.astype(np.float32) return distance_map
Build distance map from a centrosome localisation. Parameters ---------- centrosome_coord : np.ndarray, np.int64 Coordinates of the detected centrosome with shape (nb_elements, 3) or (nb_elements, 2). One coordinate per dimension (zyx or yx dimensions). cell_mask : np.ndarray, bool Binary surface of the cell with shape (y, x). Returns ------- distance_map : np.ndarray, np.float32 Distance map from the centrosome with shape (y, x).
https://github.com/fish-quant/big-fish/blob/5512b6e3274872793ef4365a6dc423c72add91f9/bigfish/classification/input_preparation.py#L288-L320
import numpy as np from scipy import ndimage as ndi import bigfish.stack as stack from skimage.measure import regionprops def prepare_extracted_data(cell_mask, nuc_mask=None, ndim=None, rna_coord=None, centrosome_coord=None): stack.check_parameter(ndim=(int, type(None))) if rna_coord is not None and ndim is None: raise ValueError("'ndim' should be specified (2 or 3).") stack.check_array(cell_mask, ndim=2, dtype=[np.uint8, np.uint16, np.int64, bool]) cell_mask = cell_mask.astype(bool) if nuc_mask is not None: stack.check_array(nuc_mask, ndim=2, dtype=[np.uint8, np.uint16, np.int64, bool]) nuc_mask = nuc_mask.astype(bool) if rna_coord is not None: stack.check_array(rna_coord, ndim=2, dtype=np.int64) if centrosome_coord is not None: stack.check_array(centrosome_coord, ndim=2, dtype=np.int64) distance_cell = ndi.distance_transform_edt(cell_mask) distance_cell = distance_cell.astype(np.float32) distance_cell_normalized = distance_cell / distance_cell.max() centroid_cell = _get_centroid_surface(cell_mask) distance_centroid_cell = _get_centroid_distance_map(centroid_cell, cell_mask) if nuc_mask is not None: cell_mask_out_nuc = cell_mask.copy() cell_mask_out_nuc[nuc_mask] = False distance_nuc_ = ndi.distance_transform_edt(~nuc_mask) distance_nuc = cell_mask * distance_nuc_ distance_nuc = distance_nuc.astype(np.float32) distance_nuc_normalized = distance_nuc / distance_nuc.max() centroid_nuc = _get_centroid_surface(nuc_mask) distance_centroid_nuc = _get_centroid_distance_map(centroid_nuc, cell_mask) else: cell_mask_out_nuc = None distance_nuc = None distance_nuc_normalized = None centroid_nuc = None distance_centroid_nuc = None if rna_coord is not None: if len(rna_coord) == 0: centroid_rna = np.array([0] * ndim, dtype=np.int64) else: centroid_rna = _get_centroid_rna(rna_coord, ndim) distance_centroid_rna = _get_centroid_distance_map( centroid_rna, cell_mask) if nuc_mask is not None: mask_rna_in_nuc = nuc_mask[rna_coord[:, ndim - 2], rna_coord[:, ndim - 1]] rna_coord_out_nuc = rna_coord[~mask_rna_in_nuc] if len(rna_coord_out_nuc) == 0: centroid_rna_out_nuc = np.array([0] * ndim, dtype=np.int64) else: centroid_rna_out_nuc = _get_centroid_rna(rna_coord_out_nuc, ndim) distance_centroid_rna_out_nuc = _get_centroid_distance_map( centroid_rna_out_nuc, cell_mask) else: rna_coord_out_nuc = None centroid_rna_out_nuc = None distance_centroid_rna_out_nuc = None else: centroid_rna = None distance_centroid_rna = None rna_coord_out_nuc = None centroid_rna_out_nuc = None distance_centroid_rna_out_nuc = None if centrosome_coord is not None: if len(centrosome_coord) == 0: distance_centrosome = distance_cell.copy() else: distance_centrosome = _get_centrosome_distance_map( centrosome_coord, cell_mask) else: distance_centrosome = None prepared_inputs = (cell_mask, distance_cell, distance_cell_normalized, centroid_cell, distance_centroid_cell, nuc_mask, cell_mask_out_nuc, distance_nuc, distance_nuc_normalized, centroid_nuc, distance_centroid_nuc, rna_coord_out_nuc, centroid_rna, distance_centroid_rna, centroid_rna_out_nuc, distance_centroid_rna_out_nuc, distance_centrosome) return prepared_inputs def _get_centroid_surface(mask): region = regionprops(mask.astype(np.uint8))[0] centroid = np.array(region.centroid, dtype=np.int64) return centroid def _get_centroid_rna(rna_coord, ndim): centroid_rna = np.mean(rna_coord[:, :ndim], axis=0, dtype=np.int64) return centroid_rna def _get_centroid_distance_map(centroid, cell_mask): if centroid.size == 3: centroid_2d = centroid[1:] else: centroid_2d = centroid.copy() mask_centroid = np.zeros_like(cell_mask) mask_centroid[centroid_2d[0], centroid_2d[1]] = True distance_map = ndi.distance_transform_edt(~mask_centroid) distance_map[cell_mask == 0] = 0 distance_map = distance_map.astype(np.float32) return distance_map
BSD 3-Clause New or Revised License
docusign/docusign-python-client
docusign_esign/models/bulk_sending_list_summary.py
BulkSendingListSummary.__eq__
python
def __eq__(self, other): if not isinstance(other, BulkSendingListSummary): return False return self.to_dict() == other.to_dict()
Returns true if both objects are equal
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/bulk_sending_list_summary.py#L193-L198
import pprint import re import six from docusign_esign.client.configuration import Configuration class BulkSendingListSummary(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'bulk_send_list_id': 'str', 'created_by_user': 'str', 'created_date': 'str', 'name': 'str' } attribute_map = { 'bulk_send_list_id': 'bulkSendListId', 'created_by_user': 'createdByUser', 'created_date': 'createdDate', 'name': 'name' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._bulk_send_list_id = None self._created_by_user = None self._created_date = None self._name = None self.discriminator = None setattr(self, "_{}".format('bulk_send_list_id'), kwargs.get('bulk_send_list_id', None)) setattr(self, "_{}".format('created_by_user'), kwargs.get('created_by_user', None)) setattr(self, "_{}".format('created_date'), kwargs.get('created_date', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) @property def bulk_send_list_id(self): return self._bulk_send_list_id @bulk_send_list_id.setter def bulk_send_list_id(self, bulk_send_list_id): self._bulk_send_list_id = bulk_send_list_id @property def created_by_user(self): return self._created_by_user @created_by_user.setter def created_by_user(self, created_by_user): self._created_by_user = created_by_user @property def created_date(self): return self._created_date @created_date.setter def created_date(self, created_date): self._created_date = created_date @property def name(self): return self._name @name.setter def name(self, name): self._name = name def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BulkSendingListSummary, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str()
MIT License
rlworkgroup/garage
src/garage/torch/modules/multi_headed_mlp_module.py
MultiHeadedMLPModule._check_parameter_for_output_layer
python
def _check_parameter_for_output_layer(cls, var_name, var, n_heads): if isinstance(var, (list, tuple)): if len(var) == 1: return list(var) * n_heads if len(var) == n_heads: return var msg = ('{} should be either an integer or a collection of length ' 'n_heads ({}), but {} provided.') raise ValueError(msg.format(var_name, n_heads, var)) return [copy.deepcopy(var) for _ in range(n_heads)]
Check input parameters for output layer are valid. Args: var_name (str): variable name var (any): variable to be checked n_heads (int): number of head Returns: list: list of variables (length of n_heads) Raises: ValueError: if the variable is a list but length of the variable is not equal to n_heads
https://github.com/rlworkgroup/garage/blob/3a578852c392cecde5b7c9786aa182d74f6df1d4/src/garage/torch/modules/multi_headed_mlp_module.py#L109-L133
import copy import torch import torch.nn as nn from garage.torch import NonLinearity class MultiHeadedMLPModule(nn.Module): def __init__(self, n_heads, input_dim, output_dims, hidden_sizes, hidden_nonlinearity=torch.relu, hidden_w_init=nn.init.xavier_normal_, hidden_b_init=nn.init.zeros_, output_nonlinearities=None, output_w_inits=nn.init.xavier_normal_, output_b_inits=nn.init.zeros_, layer_normalization=False): super().__init__() self._layers = nn.ModuleList() output_dims = self._check_parameter_for_output_layer( 'output_dims', output_dims, n_heads) output_w_inits = self._check_parameter_for_output_layer( 'output_w_inits', output_w_inits, n_heads) output_b_inits = self._check_parameter_for_output_layer( 'output_b_inits', output_b_inits, n_heads) output_nonlinearities = self._check_parameter_for_output_layer( 'output_nonlinearities', output_nonlinearities, n_heads) self._layers = nn.ModuleList() prev_size = input_dim for size in hidden_sizes: hidden_layers = nn.Sequential() if layer_normalization: hidden_layers.add_module('layer_normalization', nn.LayerNorm(prev_size)) linear_layer = nn.Linear(prev_size, size) hidden_w_init(linear_layer.weight) hidden_b_init(linear_layer.bias) hidden_layers.add_module('linear', linear_layer) if hidden_nonlinearity: hidden_layers.add_module('non_linearity', NonLinearity(hidden_nonlinearity)) self._layers.append(hidden_layers) prev_size = size self._output_layers = nn.ModuleList() for i in range(n_heads): output_layer = nn.Sequential() linear_layer = nn.Linear(prev_size, output_dims[i]) output_w_inits[i](linear_layer.weight) output_b_inits[i](linear_layer.bias) output_layer.add_module('linear', linear_layer) if output_nonlinearities[i]: output_layer.add_module('non_linearity', NonLinearity(output_nonlinearities[i])) self._output_layers.append(output_layer) @classmethod
MIT License
docusign/docusign-python-client
docusign_esign/models/commission_county.py
CommissionCounty.anchor_tab_processor_version_metadata
python
def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata): self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata
Sets the anchor_tab_processor_version_metadata of this CommissionCounty. :param anchor_tab_processor_version_metadata: The anchor_tab_processor_version_metadata of this CommissionCounty. # noqa: E501 :type: PropertyMetadata
https://github.com/docusign/docusign-python-client/blob/c6aeafff0d046fa6c10a398be83ba9e24b05d4ea/docusign_esign/models/commission_county.py#L748-L756
import pprint import re import six from docusign_esign.client.configuration import Configuration class CommissionCounty(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'anchor_allow_white_space_in_characters': 'str', 'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata', 'anchor_case_sensitive': 'str', 'anchor_case_sensitive_metadata': 'PropertyMetadata', 'anchor_horizontal_alignment': 'str', 'anchor_horizontal_alignment_metadata': 'PropertyMetadata', 'anchor_ignore_if_not_present': 'str', 'anchor_ignore_if_not_present_metadata': 'PropertyMetadata', 'anchor_match_whole_word': 'str', 'anchor_match_whole_word_metadata': 'PropertyMetadata', 'anchor_string': 'str', 'anchor_string_metadata': 'PropertyMetadata', 'anchor_tab_processor_version': 'str', 'anchor_tab_processor_version_metadata': 'PropertyMetadata', 'anchor_units': 'str', 'anchor_units_metadata': 'PropertyMetadata', 'anchor_x_offset': 'str', 'anchor_x_offset_metadata': 'PropertyMetadata', 'anchor_y_offset': 'str', 'anchor_y_offset_metadata': 'PropertyMetadata', 'bold': 'str', 'bold_metadata': 'PropertyMetadata', 'conceal_value_on_document': 'str', 'conceal_value_on_document_metadata': 'PropertyMetadata', 'conditional_parent_label': 'str', 'conditional_parent_label_metadata': 'PropertyMetadata', 'conditional_parent_value': 'str', 'conditional_parent_value_metadata': 'PropertyMetadata', 'custom_tab_id': 'str', 'custom_tab_id_metadata': 'PropertyMetadata', 'disable_auto_size': 'str', 'disable_auto_size_metadata': 'PropertyMetadata', 'document_id': 'str', 'document_id_metadata': 'PropertyMetadata', 'error_details': 'ErrorDetails', 'font': 'str', 'font_color': 'str', 'font_color_metadata': 'PropertyMetadata', 'font_metadata': 'PropertyMetadata', 'font_size': 'str', 'font_size_metadata': 'PropertyMetadata', 'form_order': 'str', 'form_order_metadata': 'PropertyMetadata', 'form_page_label': 'str', 'form_page_label_metadata': 'PropertyMetadata', 'form_page_number': 'str', 'form_page_number_metadata': 'PropertyMetadata', 'height': 'str', 'height_metadata': 'PropertyMetadata', 'italic': 'str', 'italic_metadata': 'PropertyMetadata', 'locale_policy': 'LocalePolicyTab', 'locked': 'str', 'locked_metadata': 'PropertyMetadata', 'max_length': 'str', 'max_length_metadata': 'PropertyMetadata', 'merge_field': 'MergeField', 'merge_field_xml': 'str', 'name': 'str', 'name_metadata': 'PropertyMetadata', 'original_value': 'str', 'original_value_metadata': 'PropertyMetadata', 'page_number': 'str', 'page_number_metadata': 'PropertyMetadata', 'recipient_id': 'str', 'recipient_id_guid': 'str', 'recipient_id_guid_metadata': 'PropertyMetadata', 'recipient_id_metadata': 'PropertyMetadata', 'required': 'str', 'required_metadata': 'PropertyMetadata', 'smart_contract_information': 'SmartContractInformation', 'source': 'str', 'status': 'str', 'status_metadata': 'PropertyMetadata', 'tab_group_labels': 'list[str]', 'tab_group_labels_metadata': 'PropertyMetadata', 'tab_id': 'str', 'tab_id_metadata': 'PropertyMetadata', 'tab_label': 'str', 'tab_label_metadata': 'PropertyMetadata', 'tab_order': 'str', 'tab_order_metadata': 'PropertyMetadata', 'tab_type': 'str', 'tab_type_metadata': 'PropertyMetadata', 'template_locked': 'str', 'template_locked_metadata': 'PropertyMetadata', 'template_required': 'str', 'template_required_metadata': 'PropertyMetadata', 'tooltip': 'str', 'tool_tip_metadata': 'PropertyMetadata', 'underline': 'str', 'underline_metadata': 'PropertyMetadata', 'value': 'str', 'value_metadata': 'PropertyMetadata', 'width': 'str', 'width_metadata': 'PropertyMetadata', 'x_position': 'str', 'x_position_metadata': 'PropertyMetadata', 'y_position': 'str', 'y_position_metadata': 'PropertyMetadata' } attribute_map = { 'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters', 'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata', 'anchor_case_sensitive': 'anchorCaseSensitive', 'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata', 'anchor_horizontal_alignment': 'anchorHorizontalAlignment', 'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata', 'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent', 'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata', 'anchor_match_whole_word': 'anchorMatchWholeWord', 'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata', 'anchor_string': 'anchorString', 'anchor_string_metadata': 'anchorStringMetadata', 'anchor_tab_processor_version': 'anchorTabProcessorVersion', 'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata', 'anchor_units': 'anchorUnits', 'anchor_units_metadata': 'anchorUnitsMetadata', 'anchor_x_offset': 'anchorXOffset', 'anchor_x_offset_metadata': 'anchorXOffsetMetadata', 'anchor_y_offset': 'anchorYOffset', 'anchor_y_offset_metadata': 'anchorYOffsetMetadata', 'bold': 'bold', 'bold_metadata': 'boldMetadata', 'conceal_value_on_document': 'concealValueOnDocument', 'conceal_value_on_document_metadata': 'concealValueOnDocumentMetadata', 'conditional_parent_label': 'conditionalParentLabel', 'conditional_parent_label_metadata': 'conditionalParentLabelMetadata', 'conditional_parent_value': 'conditionalParentValue', 'conditional_parent_value_metadata': 'conditionalParentValueMetadata', 'custom_tab_id': 'customTabId', 'custom_tab_id_metadata': 'customTabIdMetadata', 'disable_auto_size': 'disableAutoSize', 'disable_auto_size_metadata': 'disableAutoSizeMetadata', 'document_id': 'documentId', 'document_id_metadata': 'documentIdMetadata', 'error_details': 'errorDetails', 'font': 'font', 'font_color': 'fontColor', 'font_color_metadata': 'fontColorMetadata', 'font_metadata': 'fontMetadata', 'font_size': 'fontSize', 'font_size_metadata': 'fontSizeMetadata', 'form_order': 'formOrder', 'form_order_metadata': 'formOrderMetadata', 'form_page_label': 'formPageLabel', 'form_page_label_metadata': 'formPageLabelMetadata', 'form_page_number': 'formPageNumber', 'form_page_number_metadata': 'formPageNumberMetadata', 'height': 'height', 'height_metadata': 'heightMetadata', 'italic': 'italic', 'italic_metadata': 'italicMetadata', 'locale_policy': 'localePolicy', 'locked': 'locked', 'locked_metadata': 'lockedMetadata', 'max_length': 'maxLength', 'max_length_metadata': 'maxLengthMetadata', 'merge_field': 'mergeField', 'merge_field_xml': 'mergeFieldXml', 'name': 'name', 'name_metadata': 'nameMetadata', 'original_value': 'originalValue', 'original_value_metadata': 'originalValueMetadata', 'page_number': 'pageNumber', 'page_number_metadata': 'pageNumberMetadata', 'recipient_id': 'recipientId', 'recipient_id_guid': 'recipientIdGuid', 'recipient_id_guid_metadata': 'recipientIdGuidMetadata', 'recipient_id_metadata': 'recipientIdMetadata', 'required': 'required', 'required_metadata': 'requiredMetadata', 'smart_contract_information': 'smartContractInformation', 'source': 'source', 'status': 'status', 'status_metadata': 'statusMetadata', 'tab_group_labels': 'tabGroupLabels', 'tab_group_labels_metadata': 'tabGroupLabelsMetadata', 'tab_id': 'tabId', 'tab_id_metadata': 'tabIdMetadata', 'tab_label': 'tabLabel', 'tab_label_metadata': 'tabLabelMetadata', 'tab_order': 'tabOrder', 'tab_order_metadata': 'tabOrderMetadata', 'tab_type': 'tabType', 'tab_type_metadata': 'tabTypeMetadata', 'template_locked': 'templateLocked', 'template_locked_metadata': 'templateLockedMetadata', 'template_required': 'templateRequired', 'template_required_metadata': 'templateRequiredMetadata', 'tooltip': 'tooltip', 'tool_tip_metadata': 'toolTipMetadata', 'underline': 'underline', 'underline_metadata': 'underlineMetadata', 'value': 'value', 'value_metadata': 'valueMetadata', 'width': 'width', 'width_metadata': 'widthMetadata', 'x_position': 'xPosition', 'x_position_metadata': 'xPositionMetadata', 'y_position': 'yPosition', 'y_position_metadata': 'yPositionMetadata' } def __init__(self, _configuration=None, **kwargs): if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._anchor_allow_white_space_in_characters = None self._anchor_allow_white_space_in_characters_metadata = None self._anchor_case_sensitive = None self._anchor_case_sensitive_metadata = None self._anchor_horizontal_alignment = None self._anchor_horizontal_alignment_metadata = None self._anchor_ignore_if_not_present = None self._anchor_ignore_if_not_present_metadata = None self._anchor_match_whole_word = None self._anchor_match_whole_word_metadata = None self._anchor_string = None self._anchor_string_metadata = None self._anchor_tab_processor_version = None self._anchor_tab_processor_version_metadata = None self._anchor_units = None self._anchor_units_metadata = None self._anchor_x_offset = None self._anchor_x_offset_metadata = None self._anchor_y_offset = None self._anchor_y_offset_metadata = None self._bold = None self._bold_metadata = None self._conceal_value_on_document = None self._conceal_value_on_document_metadata = None self._conditional_parent_label = None self._conditional_parent_label_metadata = None self._conditional_parent_value = None self._conditional_parent_value_metadata = None self._custom_tab_id = None self._custom_tab_id_metadata = None self._disable_auto_size = None self._disable_auto_size_metadata = None self._document_id = None self._document_id_metadata = None self._error_details = None self._font = None self._font_color = None self._font_color_metadata = None self._font_metadata = None self._font_size = None self._font_size_metadata = None self._form_order = None self._form_order_metadata = None self._form_page_label = None self._form_page_label_metadata = None self._form_page_number = None self._form_page_number_metadata = None self._height = None self._height_metadata = None self._italic = None self._italic_metadata = None self._locale_policy = None self._locked = None self._locked_metadata = None self._max_length = None self._max_length_metadata = None self._merge_field = None self._merge_field_xml = None self._name = None self._name_metadata = None self._original_value = None self._original_value_metadata = None self._page_number = None self._page_number_metadata = None self._recipient_id = None self._recipient_id_guid = None self._recipient_id_guid_metadata = None self._recipient_id_metadata = None self._required = None self._required_metadata = None self._smart_contract_information = None self._source = None self._status = None self._status_metadata = None self._tab_group_labels = None self._tab_group_labels_metadata = None self._tab_id = None self._tab_id_metadata = None self._tab_label = None self._tab_label_metadata = None self._tab_order = None self._tab_order_metadata = None self._tab_type = None self._tab_type_metadata = None self._template_locked = None self._template_locked_metadata = None self._template_required = None self._template_required_metadata = None self._tooltip = None self._tool_tip_metadata = None self._underline = None self._underline_metadata = None self._value = None self._value_metadata = None self._width = None self._width_metadata = None self._x_position = None self._x_position_metadata = None self._y_position = None self._y_position_metadata = None self.discriminator = None setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None)) setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None)) setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None)) setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None)) setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None)) setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None)) setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None)) setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None)) setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None)) setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None)) setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None)) setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None)) setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None)) setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None)) setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None)) setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None)) setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None)) setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None)) setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None)) setattr(self, "_{}".format('bold'), kwargs.get('bold', None)) setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None)) setattr(self, "_{}".format('conceal_value_on_document'), kwargs.get('conceal_value_on_document', None)) setattr(self, "_{}".format('conceal_value_on_document_metadata'), kwargs.get('conceal_value_on_document_metadata', None)) setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None)) setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None)) setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None)) setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None)) setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None)) setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None)) setattr(self, "_{}".format('disable_auto_size'), kwargs.get('disable_auto_size', None)) setattr(self, "_{}".format('disable_auto_size_metadata'), kwargs.get('disable_auto_size_metadata', None)) setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None)) setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None)) setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None)) setattr(self, "_{}".format('font'), kwargs.get('font', None)) setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None)) setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None)) setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None)) setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None)) setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None)) setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None)) setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None)) setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None)) setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None)) setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None)) setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None)) setattr(self, "_{}".format('height'), kwargs.get('height', None)) setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None)) setattr(self, "_{}".format('italic'), kwargs.get('italic', None)) setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None)) setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None)) setattr(self, "_{}".format('locked'), kwargs.get('locked', None)) setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None)) setattr(self, "_{}".format('max_length'), kwargs.get('max_length', None)) setattr(self, "_{}".format('max_length_metadata'), kwargs.get('max_length_metadata', None)) setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None)) setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None)) setattr(self, "_{}".format('name'), kwargs.get('name', None)) setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None)) setattr(self, "_{}".format('original_value'), kwargs.get('original_value', None)) setattr(self, "_{}".format('original_value_metadata'), kwargs.get('original_value_metadata', None)) setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None)) setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None)) setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None)) setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None)) setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None)) setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None)) setattr(self, "_{}".format('required'), kwargs.get('required', None)) setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None)) setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None)) setattr(self, "_{}".format('source'), kwargs.get('source', None)) setattr(self, "_{}".format('status'), kwargs.get('status', None)) setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None)) setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None)) setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None)) setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None)) setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None)) setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None)) setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None)) setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None)) setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None)) setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None)) setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None)) setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None)) setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None)) setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None)) setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None)) setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None)) setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None)) setattr(self, "_{}".format('underline'), kwargs.get('underline', None)) setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None)) setattr(self, "_{}".format('value'), kwargs.get('value', None)) setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None)) setattr(self, "_{}".format('width'), kwargs.get('width', None)) setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None)) setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None)) setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None)) setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None)) setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None)) @property def anchor_allow_white_space_in_characters(self): return self._anchor_allow_white_space_in_characters @anchor_allow_white_space_in_characters.setter def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters): self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters @property def anchor_allow_white_space_in_characters_metadata(self): return self._anchor_allow_white_space_in_characters_metadata @anchor_allow_white_space_in_characters_metadata.setter def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata): self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata @property def anchor_case_sensitive(self): return self._anchor_case_sensitive @anchor_case_sensitive.setter def anchor_case_sensitive(self, anchor_case_sensitive): self._anchor_case_sensitive = anchor_case_sensitive @property def anchor_case_sensitive_metadata(self): return self._anchor_case_sensitive_metadata @anchor_case_sensitive_metadata.setter def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata): self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata @property def anchor_horizontal_alignment(self): return self._anchor_horizontal_alignment @anchor_horizontal_alignment.setter def anchor_horizontal_alignment(self, anchor_horizontal_alignment): self._anchor_horizontal_alignment = anchor_horizontal_alignment @property def anchor_horizontal_alignment_metadata(self): return self._anchor_horizontal_alignment_metadata @anchor_horizontal_alignment_metadata.setter def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata): self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata @property def anchor_ignore_if_not_present(self): return self._anchor_ignore_if_not_present @anchor_ignore_if_not_present.setter def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present): self._anchor_ignore_if_not_present = anchor_ignore_if_not_present @property def anchor_ignore_if_not_present_metadata(self): return self._anchor_ignore_if_not_present_metadata @anchor_ignore_if_not_present_metadata.setter def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata): self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata @property def anchor_match_whole_word(self): return self._anchor_match_whole_word @anchor_match_whole_word.setter def anchor_match_whole_word(self, anchor_match_whole_word): self._anchor_match_whole_word = anchor_match_whole_word @property def anchor_match_whole_word_metadata(self): return self._anchor_match_whole_word_metadata @anchor_match_whole_word_metadata.setter def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata): self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata @property def anchor_string(self): return self._anchor_string @anchor_string.setter def anchor_string(self, anchor_string): self._anchor_string = anchor_string @property def anchor_string_metadata(self): return self._anchor_string_metadata @anchor_string_metadata.setter def anchor_string_metadata(self, anchor_string_metadata): self._anchor_string_metadata = anchor_string_metadata @property def anchor_tab_processor_version(self): return self._anchor_tab_processor_version @anchor_tab_processor_version.setter def anchor_tab_processor_version(self, anchor_tab_processor_version): self._anchor_tab_processor_version = anchor_tab_processor_version @property def anchor_tab_processor_version_metadata(self): return self._anchor_tab_processor_version_metadata @anchor_tab_processor_version_metadata.setter
MIT License
red-hat-storage/ocs-ci
ocs_ci/ocs/resources/bucketclass.py
bucket_class_factory
python
def bucket_class_factory( request, mcg_obj, backingstore_factory, namespace_store_factory ): interfaces = { "oc": mcg_obj.oc_create_bucketclass, "cli": mcg_obj.cli_create_bucketclass, } created_bucket_classes = [] def _create_bucket_class(bucket_class_dict): if "interface" in bucket_class_dict: interface = bucket_class_dict["interface"] if interface.lower() not in interfaces.keys(): raise RuntimeError( f"Invalid interface type received: {interface}. " f'available types: {", ".join(interfaces)}' ) else: interface = "OC" namespace_policy = {} backingstores = None namespacestores = None placement_policy = None if "namespace_policy_dict" in bucket_class_dict: if "namespacestore_dict" in bucket_class_dict["namespace_policy_dict"]: nss_dict = bucket_class_dict["namespace_policy_dict"][ "namespacestore_dict" ] namespacestores = namespace_store_factory(interface, nss_dict) namespace_policy["type"] = bucket_class_dict["namespace_policy_dict"][ "type" ] if namespace_policy["type"] == "Cache": namespace_policy["cache"] = { "hubResource": namespacestores[0].name, "caching": { "ttl": bucket_class_dict["namespace_policy_dict"]["ttl"] }, } else: namespace_policy["read_resources"] = [ nss.name for nss in namespacestores ] namespace_policy["write_resource"] = namespacestores[0].name elif "namespacestores" in bucket_class_dict["namespace_policy_dict"]: namespacestores = bucket_class_dict["namespace_policy_dict"][ "namespacestores" ] namespace_policy["type"] = bucket_class_dict["namespace_policy_dict"][ "type" ] if namespace_policy["type"] == "Cache": namespace_policy["cache"] = { "hubResource": namespacestores[0].name, "caching": { "ttl": bucket_class_dict["namespace_policy_dict"]["ttl"] }, } else: namespace_policy["read_resources"] = [ nss.name for nss in namespacestores ] namespace_policy["write_resource"] = namespacestores[0].name elif "backingstore_dict" in bucket_class_dict: backingstores = [ backingstore for backingstore in backingstore_factory( interface, bucket_class_dict["backingstore_dict"] ) ] else: backingstores = [ BackingStore(constants.DEFAULT_NOOBAA_BACKINGSTORE, method="oc") ] if "placement_policy" in bucket_class_dict: placement_policy = bucket_class_dict["placement_policy"] else: placement_policy = "Spread" bucket_class_name = create_unique_resource_name( resource_description="bucketclass", resource_type=interface.lower() ) interfaces[interface.lower()]( name=bucket_class_name, backingstores=backingstores, placement=placement_policy, namespace_policy=namespace_policy, ) bucket_class_object = BucketClass( bucket_class_name, backingstores, namespacestores, placement_policy, namespace_policy, ) created_bucket_classes.append(bucket_class_object) return bucket_class_object def bucket_class_cleanup(): for bucket_class in created_bucket_classes: try: bucket_class.delete() except CommandFailed as e: if "NotFound" in str(e): log.warning(f"{bucket_class.name} could not be found in cleanup") else: raise request.addfinalizer(bucket_class_cleanup) return _create_bucket_class
Create a bucket class factory. Calling this fixture creates a new custom bucket class. For a custom backingstore(s), provide the 'backingstore_dict' parameter. Args: request (object): Pytest built-in fixture mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials backingstore_factory: Factory for backing store creation
https://github.com/red-hat-storage/ocs-ci/blob/81bc3dd3c2bccbf875ffa8fa5fa2eb0ac9d52b7e/ocs_ci/ocs/resources/bucketclass.py#L41-L199
import logging from ocs_ci.ocs import constants from ocs_ci.ocs.resources.backingstore import BackingStore from ocs_ci.ocs.exceptions import CommandFailed from ocs_ci.framework import config from ocs_ci.ocs.ocp import OCP from ocs_ci.helpers.helpers import create_unique_resource_name log = logging.getLogger(__name__) class BucketClass: def __init__( self, name, backingstores, namespacestores, placement_policy, namespace_policy ): self.name = name self.backingstores = backingstores self.namespacestores = namespacestores self.placement_policy = placement_policy self.namespace_policy = namespace_policy def delete(self): log.info(f"Cleaning up bucket class {self.name}") OCP(namespace=config.ENV_DATA["cluster_namespace"]).exec_oc_cmd( command=f"delete bucketclass {self.name}", out_yaml_format=False ) log.info(f"Verifying whether bucket class {self.name} exists after deletion")
MIT License
csyben/pyro-nn
pyronn/ct_reconstruction/geometry/geometry_fan_2d.py
GeometryFan2D.set_trajectory
python
def set_trajectory(self, central_ray_vectors): self.central_ray_vectors = np.array(central_ray_vectors, self.np_dtype)
Sets the member central_ray_vectors. Args: central_ray_vectors: np.array defining the trajectory central_ray_vectors.
https://github.com/csyben/pyro-nn/blob/726b62b57d7093ff0f3e675e66d976d989eebc0a/pyronn/ct_reconstruction/geometry/geometry_fan_2d.py#L38-L44
import numpy as np from .geometry_base import GeometryBase class GeometryFan2D(GeometryBase): def __init__(self, volume_shape, volume_spacing, detector_shape, detector_spacing, number_of_projections, angular_range, source_detector_distance, source_isocenter_distance): super().__init__(volume_shape, volume_spacing, [detector_shape], [detector_spacing], number_of_projections, angular_range, source_detector_distance, source_isocenter_distance) self.fan_angle = np.arctan(((self.detector_shape[0] - 1) / 2.0 * self.detector_spacing[0]) / self.source_detector_distance)
Apache License 2.0
anymesh/anymesh-python
example/urwid/graphics.py
PythonLogo.__init__
python
def __init__(self): blu = AttrSpec('light blue', 'default') yel = AttrSpec('yellow', 'default') width = 17 self._canvas = Text([ (blu, " ______\n"), (blu, " _|_o__ |"), (yel, "__\n"), (blu, " | _____|"), (yel, " |\n"), (blu, " |__| "), (yel, "______|\n"), (yel, " |____o_|")]).render((width,))
Create canvas containing an ASCII version of the Python Logo and store it.
https://github.com/anymesh/anymesh-python/blob/017b7808f2fbdc765604488d325678c28be438c0/example/urwid/graphics.py#L884-L897
from urwid.util import decompose_tagmarkup, get_encoding_mode from urwid.canvas import CompositeCanvas, CanvasJoin, TextCanvas, CanvasCombine, SolidCanvas from urwid.widget import WidgetMeta, Widget, BOX, FIXED, FLOW, nocache_widget_render, nocache_widget_render_instance, fixed_size, WidgetWrap, Divider, SolidFill, Text, CENTER, CLIP from urwid.container import Pile, Columns from urwid.display_common import AttrSpec from urwid.decoration import WidgetDecoration class BigText(Widget): _sizing = frozenset([FIXED]) def __init__(self, markup, font): self.set_font(font) self.set_text(markup) def set_text(self, markup): self.text, self.attrib = decompose_tagmarkup(markup) self._invalidate() def get_text(self): return self.text, self.attrib def set_font(self, font): self.font = font self._invalidate() def pack(self, size=None, focus=False): rows = self.font.height cols = 0 for c in self.text: cols += self.font.char_width(c) return cols, rows def render(self, size, focus=False): fixed_size(size) a = None ai = ak = 0 o = [] rows = self.font.height attrib = self.attrib + [(None, len(self.text))] for ch in self.text: if not ak: a, ak = attrib[ai] ai += 1 ak -= 1 width = self.font.char_width(ch) if not width: continue c = self.font.render(ch) if a is not None: c = CompositeCanvas(c) c.fill_attr(a) o.append((c, None, False, width)) if o: canv = CanvasJoin(o) else: canv = TextCanvas([""] * rows, maxcol=0, check_width=False) canv = CompositeCanvas(canv) canv.set_depends([]) return canv class LineBox(WidgetDecoration, WidgetWrap): def __init__(self, original_widget, title="", tlcorner=u'┌', tline=u'─', lline=u'│', trcorner=u'┐', blcorner=u'└', rline=u'│', bline=u'─', brcorner=u'┘'): tline, bline = Divider(tline), Divider(bline) lline, rline = SolidFill(lline), SolidFill(rline) tlcorner, trcorner = Text(tlcorner), Text(trcorner) blcorner, brcorner = Text(blcorner), Text(brcorner) self.title_widget = Text(self.format_title(title)) self.tline_widget = Columns([ tline, ('flow', self.title_widget), tline, ]) top = Columns([ ('fixed', 1, tlcorner), self.tline_widget, ('fixed', 1, trcorner) ]) middle = Columns([ ('fixed', 1, lline), original_widget, ('fixed', 1, rline), ], box_columns=[0, 2], focus_column=1) bottom = Columns([ ('fixed', 1, blcorner), bline, ('fixed', 1, brcorner) ]) pile = Pile([('flow', top), middle, ('flow', bottom)], focus_item=1) WidgetDecoration.__init__(self, original_widget) WidgetWrap.__init__(self, pile) def format_title(self, text): if len(text) > 0: return " %s " % text else: return "" def set_title(self, text): self.title_widget.set_text(self.format_title(text)) self.tline_widget._invalidate() class BarGraphMeta(WidgetMeta): def __init__(cls, name, bases, d): super(BarGraphMeta, cls).__init__(name, bases, d) if "get_data" in d: cls.render = nocache_widget_render(cls) cls._get_data = cls.get_data cls.get_data = property( lambda self: self._get_data, nocache_bargraph_get_data) def nocache_bargraph_get_data(self, get_data_fn): self.render = nocache_widget_render_instance(self) self._get_data = get_data_fn class BarGraphError(Exception): pass class BarGraph(Widget): __metaclass__ = BarGraphMeta _sizing = frozenset([BOX]) ignore_focus = True eighths = u' ▁▂▃▄▅▆▇' hlines = u'_⎺⎻─⎼⎽' def __init__(self, attlist, hatt=None, satt=None): self.set_segment_attributes(attlist, hatt, satt) self.set_data([], 1, None) self.set_bar_width(None) def set_segment_attributes(self, attlist, hatt=None, satt=None): self.attr = [] self.char = [] if len(attlist) < 2: raise BarGraphError("attlist must include at least background and seg1: %r" % (attlist,)) assert len(attlist) >= 2, 'must at least specify bg and fg!' for a in attlist: if type(a) != tuple: self.attr.append(a) self.char.append(' ') else: attr, ch = a self.attr.append(attr) self.char.append(ch) self.hatt = [] if hatt is None: hatt = [self.attr[0]] elif type(hatt) != list: hatt = [hatt] self.hatt = hatt if satt is None: satt = {} for i in satt.items(): try: (fg, bg), attr = i except ValueError: raise BarGraphError("satt not in (fg,bg:attr) form: %r" % (i,)) if type(fg) != int or fg >= len(attlist): raise BarGraphError("fg not valid integer: %r" % (fg,)) if type(bg) != int or bg >= len(attlist): raise BarGraphError("bg not valid integer: %r" % (fg,)) if fg <= bg: raise BarGraphError("fg (%s) not > bg (%s)" % (fg, bg)) self.satt = satt def set_data(self, bardata, top, hlines=None): if hlines is not None: hlines = hlines[:] hlines.sort() self.data = bardata, top, hlines self._invalidate() def _get_data(self, size): (maxcol, maxrow) = size bardata, top, hlines = self.data widths = self.calculate_bar_widths((maxcol, maxrow), bardata) if len(bardata) > len(widths): return bardata[:len(widths)], top, hlines return bardata, top, hlines def set_bar_width(self, width): assert width is None or width > 0 self.bar_width = width self._invalidate() def calculate_bar_widths(self, size, bardata): (maxcol, maxrow) = size if self.bar_width is not None: return [self.bar_width] * min( len(bardata), maxcol / self.bar_width) if len(bardata) >= maxcol: return [1] * maxcol widths = [] grow = maxcol remain = len(bardata) for row in bardata: w = int(float(grow) / remain + 0.5) widths.append(w) grow -= w remain -= 1 return widths def selectable(self): return False def use_smoothed(self): return self.satt and get_encoding_mode() == "utf8" def calculate_display(self, size): (maxcol, maxrow) = size bardata, top, hlines = self.get_data((maxcol, maxrow)) widths = self.calculate_bar_widths((maxcol, maxrow), bardata) if self.use_smoothed(): disp = calculate_bargraph_display(bardata, top, widths, maxrow * 8) disp = self.smooth_display(disp) else: disp = calculate_bargraph_display(bardata, top, widths, maxrow) if hlines: disp = self.hlines_display(disp, top, hlines, maxrow) return disp def hlines_display(self, disp, top, hlines, maxrow): if self.use_smoothed(): shiftr = 0 r = [(0.2, 1), (0.4, 2), (0.6, 3), (0.8, 4), (1.0, 5), ] else: shiftr = 0.5 r = [(1.0, 0), ] rhl = [] for h in hlines: rh = float(top - h) * maxrow / top - shiftr if rh < 0: continue rhl.append(rh) hrows = [] last_i = -1 for rh in rhl: i = int(rh) if i == last_i: continue f = rh - i for spl, chnum in r: if f < spl: hrows.append((i, chnum)) break last_i = i def fill_row(row, chnum): rout = [] for bar_type, width in row: if (type(bar_type) == int and len(self.hatt) > bar_type): rout.append(((bar_type, chnum), width)) continue rout.append((bar_type, width)) return rout o = [] k = 0 rnum = 0 for y_count, row in disp: if k >= len(hrows): o.append((y_count, row)) continue end_block = rnum + y_count while k < len(hrows) and hrows[k][0] < end_block: i, chnum = hrows[k] if i - rnum > 0: o.append((i - rnum, row)) o.append((1, fill_row(row, chnum))) rnum = i + 1 k += 1 if rnum < end_block: o.append((end_block - rnum, row)) rnum = end_block return o def smooth_display(self, disp): o = [] r = 0 def seg_combine((bt1, w1), (bt2, w2)): if (bt1, w1) == (bt2, w2): return (bt1, w1), None, None wmin = min(w1, w2) l1 = l2 = None if w1 > w2: l1 = (bt1, w1 - w2) elif w2 > w1: l2 = (bt2, w2 - w1) if type(bt1) == tuple: return (bt1, wmin), l1, l2 if (bt2, bt1) not in self.satt: if r < 4: return (bt2, wmin), l1, l2 return (bt1, wmin), l1, l2 return ((bt2, bt1, 8 - r), wmin), l1, l2 def row_combine_last(count, row): o_count, o_row = o[-1] row = row[:] o_row = o_row[:] l = [] while row: (bt, w), l1, l2 = seg_combine( o_row.pop(0), row.pop(0)) if l and l[-1][0] == bt: l[-1] = (bt, l[-1][1] + w) else: l.append((bt, w)) if l1: o_row = [l1] + o_row if l2: row = [l2] + row assert not o_row o[-1] = (o_count + count, l) for y_count, row in disp: if r: count = min(8 - r, y_count) row_combine_last(count, row) y_count -= count r += count r = r % 8 if not y_count: continue assert r == 0 if y_count > 7: o.append((y_count // 8 * 8, row)) y_count = y_count % 8 if not y_count: continue o.append((y_count, row)) r = y_count return [(y // 8, row) for (y, row) in o] def render(self, size, focus=False): (maxcol, maxrow) = size disp = self.calculate_display((maxcol, maxrow)) combinelist = [] for y_count, row in disp: l = [] for bar_type, width in row: if type(bar_type) == tuple: if len(bar_type) == 3: fg, bg, k = bar_type a = self.satt[(fg, bg)] t = self.eighths[k] * width else: bg, k = bar_type a = self.hatt[bg] t = self.hlines[k] * width else: a = self.attr[bar_type] t = self.char[bar_type] * width l.append((a, t)) c = Text(l).render((maxcol,)) assert c.rows() == 1, "Invalid characters in BarGraph!" combinelist += [(c, None, False)] * y_count canv = CanvasCombine(combinelist) return canv def calculate_bargraph_display(bardata, top, bar_widths, maxrow): assert len(bardata) == len(bar_widths) maxcol = sum(bar_widths) rows = [None] * maxrow def add_segment(seg_num, col, row, width, rows=rows): if rows[row]: last_seg, last_col, last_end = rows[row][-1] if last_end > col: if last_col >= col: del rows[row][-1] else: rows[row][-1] = (last_seg, last_col, col) elif last_seg == seg_num and last_end == col: rows[row][-1] = (last_seg, last_col, last_end + width) return elif rows[row] is None: rows[row] = [] rows[row].append((seg_num, col, col + width)) col = 0 barnum = 0 for bar in bardata: width = bar_widths[barnum] if width < 1: continue tallest = maxrow segments = scale_bar_values(bar, top, maxrow) for k in range(len(bar) - 1, -1, -1): s = segments[k] if s >= maxrow: continue if s < 0: s = 0 if s < tallest: tallest = s add_segment(k + 1, col, s, width) col += width barnum += 1 rowsets = [] y_count = 0 last = [(0, maxcol)] for r in rows: if r is None: y_count = y_count + 1 continue if y_count: rowsets.append((y_count, last)) y_count = 0 i = 0 la, ln = last[i] c = 0 o = [] for seg_num, start, end in r: while start > c + ln: o.append((la, ln)) i += 1 c += ln la, ln = last[i] if la == seg_num: o.append((la, end - c)) else: if start - c > 0: o.append((la, start - c)) o.append((seg_num, end - start)) if end == maxcol: i = len(last) break while end >= c + ln: i += 1 c += ln la, ln = last[i] if la != seg_num: ln = c + ln - end c = end continue oa, on = o[-1] on += c + ln - end o[-1] = oa, on i += 1 c += ln if c == maxcol: break assert i < len(last), repr((on, maxcol)) la, ln = last[i] if i < len(last): o += [(la, ln)] + last[i + 1:] last = o y_count += 1 if y_count: rowsets.append((y_count, last)) return rowsets class GraphVScale(Widget): _sizing = frozenset([BOX]) def __init__(self, labels, top): self.set_scale(labels, top) def set_scale(self, labels, top): labels = labels[:] labels.sort() labels.reverse() self.pos = [] self.txt = [] for y, markup in labels: self.pos.append(y) self.txt.append(Text(markup)) self.top = top def selectable(self): return False def render(self, size, focus=False): (maxcol, maxrow) = size pl = scale_bar_values(self.pos, self.top, maxrow) combinelist = [] rows = 0 for p, t in zip(pl, self.txt): p -= 1 if p >= maxrow: break if p < rows: continue c = t.render((maxcol,)) if p > rows: run = p - rows c = CompositeCanvas(c) c.pad_trim_top_bottom(run, 0) rows += c.rows() combinelist.append((c, None, False)) if not combinelist: return SolidCanvas(" ", size[0], size[1]) c = CanvasCombine(combinelist) if maxrow - rows: c.pad_trim_top_bottom(0, maxrow - rows) return c def scale_bar_values( bar, top, maxrow ): return [maxrow - int(float(v) * maxrow / top + 0.5) for v in bar] class ProgressBar(Widget): _sizing = frozenset([FLOW]) eighths = u' ▏▎▍▌▋▊▉' text_align = CENTER def __init__(self, normal, complete, current=0, done=100, satt=None): self.normal = normal self.complete = complete self._current = current self._done = done self.satt = satt def set_completion(self, current): self._current = current self._invalidate() current = property(lambda self: self._current, set_completion) def _set_done(self, done): self._done = done self._invalidate() done = property(lambda self: self._done, _set_done) def rows(self, size, focus=False): return 1 def get_text(self): percent = min(100, max(0, int(self.current * 100 / self.done))) return str(percent) + " %" def render(self, size, focus=False): (maxcol,) = size txt = Text(self.get_text(), self.text_align, CLIP) c = txt.render((maxcol,)) cf = float(self.current) * maxcol / self.done ccol = int(cf) cs = 0 if self.satt is not None: cs = int((cf - ccol) * 8) if ccol < 0 or (ccol == 0 and cs == 0): c._attr = [[(self.normal, maxcol)]] elif ccol >= maxcol: c._attr = [[(self.complete, maxcol)]] elif cs and c._text[0][ccol] == " ": t = c._text[0] cenc = self.eighths[cs].encode("utf-8") c._text[0] = t[:ccol] + cenc + t[ccol + 1:] a = [] if ccol > 0: a.append((self.complete, ccol)) a.append((self.satt, len(cenc))) if maxcol - ccol - 1 > 0: a.append((self.normal, maxcol - ccol - 1)) c._attr = [a] c._cs = [[(None, len(c._text[0]))]] else: c._attr = [[(self.complete, ccol), (self.normal, maxcol - ccol)]] return c class PythonLogo(Widget): _sizing = frozenset([FIXED])
MIT License
deepmind/acme
acme/utils/loggers/terminal.py
serialize
python
def serialize(values: base.LoggingData) -> str: return ' | '.join(f'{_format_key(k)} = {_format_value(v)}' for k, v in sorted(values.items()))
Converts `values` to a pretty-printed string. This takes a dictionary `values` whose keys are strings and returns a formatted string such that each [key, value] pair is separated by ' = ' and each entry is separated by ' | '. The keys are sorted alphabetically to ensure a consistent order, and snake case is split into words. For example: values = {'a': 1, 'b' = 2.33333333, 'c': 'hello', 'big_value': 10} # Returns 'A = 1 | B = 2.333 | Big Value = 10 | C = hello' values_string = serialize(values) Args: values: A dictionary with string keys. Returns: A formatted string.
https://github.com/deepmind/acme/blob/39232315e1761219bcc98e7a4ecdd308a42b00e4/acme/utils/loggers/terminal.py#L38-L59
import logging import time from typing import Any, Callable from acme.utils.loggers import base import numpy as np def _format_key(key: str) -> str: return key.replace('_', ' ').title() def _format_value(value: Any) -> str: value = base.to_numpy(value) if isinstance(value, (float, np.number)): return f'{value:0.3f}' return f'{value}'
Apache License 2.0
dopefishh/pympi
pympi/Praat.py
TextGrid.get_tier
python
def get_tier(self, name_num): return self.tiers[name_num - 1] if isinstance(name_num, int) else [i for i in self.tiers if i.name == name_num][0]
Gives a tier, when multiple tiers exist with that name only the first is returned. :param name_num: Name or number of the tier to return. :type name_num: int or str :returns: The tier. :raises IndexError: If the tier doesn't exist.
https://github.com/dopefishh/pympi/blob/c17292c21dacb747a20fc1069450792b52c8a6f8/pympi/Praat.py#L175-L185
import codecs import re import struct VERSION = '1.70.2' class TextGrid: def __init__(self, file_path=None, xmin=0, xmax=None, codec='utf-8'): self.tiers = [] self.codec = codec if not file_path: if xmax is None: raise Exception('No xmax specified') self.tier_num = 0 self.xmin = xmin self.xmax = xmax else: with open(file_path, 'rb') as f: self.from_file(f, codec) def from_file(self, ifile, codec='ascii'): if ifile.read(12) == b'ooBinaryFile': def bin2str(ifile): textlen = struct.unpack('>h', ifile.read(2))[0] if textlen >= 0: return ifile.read(textlen).decode('ascii') elif textlen == -1: textlen = struct.unpack('>h', ifile.read(2))[0] data = ifile.read(textlen*2) charlist = (data[i:i+2] for i in range(0, len(data), 2)) return ''.join( chr(struct.unpack('>h', i)[0]) for i in charlist) ifile.read(ord(ifile.read(1))) self.xmin = struct.unpack('>d', ifile.read(8))[0] self.xmax = struct.unpack('>d', ifile.read(8))[0] ifile.read(1) self.tier_num = struct.unpack('>i', ifile.read(4))[0] for i in range(self.tier_num): tier_type = ifile.read(ord(ifile.read(1))).decode('ascii') name = bin2str(ifile) tier = Tier(0, 0, name=name, tier_type=tier_type) self.tiers.append(tier) tier.xmin = struct.unpack('>d', ifile.read(8))[0] tier.xmax = struct.unpack('>d', ifile.read(8))[0] nint = struct.unpack('>i', ifile.read(4))[0] for i in range(nint): x1 = struct.unpack('>d', ifile.read(8))[0] if tier.tier_type == 'IntervalTier': x2 = struct.unpack('>d', ifile.read(8))[0] text = bin2str(ifile) if tier.tier_type == 'IntervalTier': tier.intervals.append((x1, x2, text)) elif tier.tier_type == 'TextTier': tier.intervals.append((x1, text)) else: raise Exception('Tiertype does not exist.') else: def nn(ifile, pat): line = next(ifile).decode(codec) return pat.search(line).group(1) regfloat = re.compile(r'([\d.]+)\s*$', flags=re.UNICODE) regint = re.compile(r'([\d]+)\s*$', flags=re.UNICODE) regstr = re.compile(r'"(.*)"\s*$', flags=re.UNICODE) next(ifile), next(ifile), next(ifile) self.xmin = float(nn(ifile, regfloat)) self.xmax = float(nn(ifile, regfloat)) line = next(ifile) short = line.strip() == b'<exists>' self.tier_num = int(nn(ifile, regint)) not short and next(ifile) for i in range(self.tier_num): not short and next(ifile) tier_type = nn(ifile, regstr) name = nn(ifile, regstr) tier = Tier(0, 0, name=name, tier_type=tier_type) self.tiers.append(tier) tier.xmin = float(nn(ifile, regfloat)) tier.xmax = float(nn(ifile, regfloat)) for i in range(int(nn(ifile, regint))): not short and next(ifile) x1 = float(nn(ifile, regfloat)) if tier.tier_type == 'IntervalTier': x2 = float(nn(ifile, regfloat)) t = nn(ifile, regstr) tier.intervals.append((x1, x2, t)) elif tier.tier_type == 'TextTier': t = nn(ifile, regstr) tier.intervals.append((x1, t)) def sort_tiers(self, key=lambda x: x.name): self.tiers.sort(key=key) def add_tier(self, name, tier_type='IntervalTier', number=None): if number is None: number = 1 if not self.tiers else len(self.tiers)+1 elif number < 1 or number > len(self.tiers): raise ValueError('Number not in [1..{}]'.format(len(self.tiers))) elif tier_type not in Tier.P_TIERS: raise ValueError('tier_type has to be in {}'.format(Tier.P_TIERS)) self.tiers.insert(number-1, Tier(self.xmin, self.xmax, name, tier_type)) return self.tiers[number-1] def remove_tier(self, name_num): if isinstance(name_num, int): del(self.tiers[name_num-1]) else: self.tiers = [i for i in self.tiers if i.name != name_num]
MIT License
krassowski/nbpipeline
nbpipeline/rules.py
NotebookRule.__init__
python
def __init__( self, *args, notebook, diff=True, deduce_io=True, deduce_io_from_data_vault=True, execute=True, **kwargs ): super().__init__(*args, **kwargs) self.todos = [] self.notebook = notebook self.absolute_notebook_path = Path(notebook).absolute() self.generate_diff = diff self.diff = None self.text_diff = None self.fidelity = None self.images = [] self.headers = [] self.status = None self.execute = execute from datetime import datetime, timedelta month_ago = (datetime.today() - timedelta(days=30)).timestamp() self.changes = run_command(f'git rev-list --max-age {month_ago} HEAD --count {self.notebook}') if deduce_io: self.deduce_io_from_tags() if deduce_io_from_data_vault: self.deduce_io_from_data_vault()
Rule for Jupyter Notebooks Args: deduce_io: whether to automatically deduce inputs and outputs from the code cells tagged "inputs" and "outputs"; local variables defined in the cell will be evaluated and used as inputs or outputs. If you want to generate paths with a helper function for brevity, assign a dict of {variable: path} to `__inputs__`/`__outputs__` in the tagged cell using `io.create_paths()` helper. diff: whether to generate diffs against the current state of the notebook deduce_io_from_data_vault: whether to deduce the inputs and outputs from `data_vault` magics (`%vault store` and `%vault import`), see https://github.com/krassowski/data-vault execute: if False, the notebook will note be run; useful to include final "leaf" notebooks which may take too long to run, but are not essential to the overall results
https://github.com/krassowski/nbpipeline/blob/c2337db2b19767b2cdfcc9bf019e2bf687bb4423/nbpipeline/rules.py#L247-L289
import json import pickle import re from copy import copy, deepcopy from functools import lru_cache from json import JSONDecodeError from os import system, walk, sep from abc import ABC, abstractmethod from pathlib import Path import time from subprocess import check_output from tempfile import NamedTemporaryFile from warnings import warn from .utils import subset_dict_preserving_order, run_command, nice_time class no_quotes(str): def __repr__(self): original = super().__repr__() return original[1:-1] class Rule(ABC): cache_dir: Path tmp_dir: Path is_setup = False rules = {} def __init__(self, name, **kwargs): assert name not in self.rules self.name = name self.execution_time = None self.rules[name] = self extra_kwargs = set(kwargs) - {'output', 'input', 'group', 'parameters'} if extra_kwargs: raise Exception(f'Unrecognized keyword arguments to {self.__class__.__name__}: {extra_kwargs}') self.arguments = subset_dict_preserving_order( kwargs, {'input', 'output', 'parameters'} ) self.group = kwargs.get('group', None) self.outputs = {} self.inputs = {} self.parameters = {} if 'output' in kwargs: output = kwargs['output'] self.outputs = output if isinstance(output, dict) else {'': output} if 'input' in kwargs: input = kwargs['input'] self.inputs = input if isinstance(input, dict) else {'': input} if 'parameters' in kwargs: self.parameters = kwargs['parameters'] @property def has_inputs(self): return len(self.inputs) != 0 @property def has_outputs(self): return len(self.outputs) != 0 @abstractmethod def run(self, use_cache: bool) -> int: if not self.is_setup: raise ValueError('Please set up the rules class settings with Rule.setup() first!') @classmethod def setup(cls, cache_dir: Path, tmp_dir: Path): cls.cache_dir = Path(cache_dir) cls.tmp_dir = Path(tmp_dir) cls.is_setup = True @abstractmethod def to_json(self): pass def __repr__(self): fragments = [repr(self.name)] if self.group: fragments.append(f'({self.group})') if self.has_inputs or self.has_outputs: fragments.append('with') if self.has_inputs: fragments.append(f'{len(self.inputs)} inputs') if self.has_inputs and self.has_outputs: fragments.append('and') if self.has_outputs: fragments.append(f'{len(self.outputs)} outputs') fragments = ' '.join(fragments) return f'<{self.__class__.__name__} {fragments}>' class Group: groups = {} def __init__(self, id: str, name: str, color='#cccccc', parent=None): assert name not in self.groups self.name = name self.id = id self.color = color self.groups[id] = self self.parent = parent def to_json(self): return { 'label': self.name, 'id': self.id, 'color': self.color, 'parent': self.parent } class ShellRule(Rule): def __init__(self, name, command, **kwargs): super().__init__(self, name, **kwargs) self.command = command def serialize(self, arguments_group): if isinstance(arguments_group, dict): return ' '.join( ( ( ('-' + key if len(key) == 1 else '--' + key) + ' ' ) if len(key) else '' ) + ( repr(value) ) for key, value in arguments_group.items() ) else: return repr(arguments_group) @property def serialized_arguments(self): return ' '.join({ self.serialize(arguments_group) for arguments_group in self.arguments.values() }) def run(self, use_cache=False) -> int: super().run(use_cache) start_time = time.time() status = system(f'{self.command} {self.serialized_arguments}') self.execution_time = time.time() - start_time return status def to_json(self): return { 'name': self.command, 'arguments': self.serialized_arguments, 'execution_time': self.execution_time, 'type': 'shell' } def expand_run_magics(notebook): out_notebook = copy(notebook) new_cells = [] for cell in notebook['cells']: if cell['cell_type'] != 'code': new_cells.append(cell) continue if any(line.startswith('%run') for line in cell['source']): other_code = [] for line in cell['source']: if line.startswith('%run'): if other_code: split_cell = copy(cell) split_cell['source'] = other_code new_cells.append(split_cell) other_code = [] to_include = line[5:].strip() with open(to_include) as o: nb_run = json.load(o) new_cells.extend(nb_run['cells']) else: other_code.append(line) if other_code: split_cell = copy(cell) split_cell['source'] = other_code new_cells.append(split_cell) else: new_cells.append(cell) out_notebook['cells'] = new_cells return out_notebook class NotebookRule(Rule): options: None @property def output_nb_dir(self) -> Path: return self.tmp_dir / 'out' @property def reference_nb_dir(self) -> Path: return self.tmp_dir / 'ref' @property def stripped_nb_dir(self) -> Path: return self.tmp_dir / 'stripped'
MIT License
seldonio/alibi
alibi/utils/distributed.py
DistributedExplainer.actor_index
python
def actor_index(self) -> int: return self._actor_index
Returns the index of the actor for which state is returned.
https://github.com/seldonio/alibi/blob/ef757b9579f85ef2e3dfc7088211969616ee3fdb/alibi/utils/distributed.py#L550-L554
import copy import logging import numpy as np from functools import partial from scipy import sparse from typing import Any, Dict, Generator, List, Optional, Tuple, Union logger = logging.getLogger(__name__) def check_ray() -> bool: import importlib spec = importlib.util.find_spec('ray') if spec: return True return False RAY_INSTALLED = check_ray() class ActorPool(object): if RAY_INSTALLED: import ray ray = ray def __init__(self, actors): self._idle_actors = list(actors) self._future_to_actor = {} self._index_to_future = {} self._next_task_index = 0 self._next_return_index = 0 self._pending_submits = [] def map(self, fn, values, chunksize=1): values = self._chunk(values, chunksize=chunksize) for v in values: self.submit(fn, v) while self.has_next(): yield self.get_next() def map_unordered(self, fn, values, chunksize=1): values = self._chunk(values, chunksize=chunksize) for v in values: self.submit(fn, v) while self.has_next(): yield self.get_next_unordered() def submit(self, fn, value): if self._idle_actors: actor = self._idle_actors.pop() future = fn(actor, value) self._future_to_actor[future] = (self._next_task_index, actor) self._index_to_future[self._next_task_index] = future self._next_task_index += 1 else: self._pending_submits.append((fn, value)) def has_next(self): return bool(self._future_to_actor) def get_next(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") if self._next_return_index >= self._next_task_index: raise ValueError("It is not allowed to call get_next() after " "get_next_unordered().") future = self._index_to_future[self._next_return_index] if timeout is not None: res, _ = self.ray.wait([future], timeout=timeout) if not res: raise TimeoutError("Timed out waiting for result") del self._index_to_future[self._next_return_index] self._next_return_index += 1 i, a = self._future_to_actor.pop(future) self._return_actor(a) return self.ray.get(future) def get_next_unordered(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") res, _ = self.ray.wait( list(self._future_to_actor), num_returns=1, timeout=timeout) if res: [future] = res else: raise TimeoutError("Timed out waiting for result") i, a = self._future_to_actor.pop(future) self._return_actor(a) del self._index_to_future[i] self._next_return_index = max(self._next_return_index, i + 1) return self.ray.get(future) def _return_actor(self, actor): self._idle_actors.append(actor) if self._pending_submits: self.submit(*self._pending_submits.pop(0)) @staticmethod def _chunk(values: list, chunksize: int) -> Generator[List, None, None]: for i in range(0, len(values), chunksize): yield values[i:i + chunksize] def batch(X: np.ndarray, batch_size: Optional[int] = None, n_batches: int = 4) -> List[np.ndarray]: n_records = X.shape[0] if isinstance(X, sparse.spmatrix): logger.warning("Batching function received sparse matrix input. Converting to dense matrix first...") X = X.toarray() if batch_size: n_batches = n_records // batch_size if n_records % batch_size != 0: n_batches += 1 slices = [batch_size * i for i in range(1, n_batches)] batches = np.array_split(X, slices) else: batches = np.array_split(X, n_batches) return batches def default_target_fcn(actor: Any, instances: tuple, kwargs: Optional[Dict] = None): if kwargs is None: kwargs = {} return actor.get_explanation.remote(instances, **kwargs) def concatenate_minibatches(minibatch_results: Union[List[np.ndarray], List[List[np.ndarray]]]) -> Union[np.ndarray, List[np.ndarray]]: if isinstance(minibatch_results[0], np.ndarray): return np.concatenate(minibatch_results, axis=0) elif isinstance(minibatch_results[0], list) and isinstance(minibatch_results[0][0], np.ndarray): return _array_list_concatenator(minibatch_results) else: raise TypeError( "Minibatch concatenation function is defined only for List[np.ndarray] and List[List[np.ndarray]]" ) def _array_list_concatenator(minibatch_results: List[List[np.ndarray]]) -> List[np.ndarray]: n_classes = len(minibatch_results[0]) to_concatenate = [list(zip(*minibatch_results))[idx] for idx in range(n_classes)] concatenated = [np.concatenate(arrays, axis=0) for arrays in to_concatenate] return concatenated def invert_permutation(p: list) -> np.ndarray: s = np.empty_like(p) s[p] = np.arange(len(p)) return s def order_result(unordered_result: Generator[Tuple[int, Any], None, None]) -> List: result_order, results = list(zip(*[(idx, res) for idx, res in unordered_result])) orig_order = invert_permutation(list(result_order)) ordered_result = [results[idx] for idx in orig_order] return ordered_result class ResourceError(Exception): pass class DistributedExplainer: if RAY_INSTALLED: import ray ray = ray def __init__(self, distributed_opts: Dict[str, Any], explainer_type: Any, explainer_init_args: Tuple, explainer_init_kwargs: dict, concatenate_results: bool = True, return_generator: bool = False): if not RAY_INSTALLED: raise ModuleNotFoundError("Module requires ray to be installed. pip install alibi[ray] ") self.n_processes = distributed_opts['n_cpus'] self.batch_size = distributed_opts['batch_size'] self.return_generator = return_generator self.concatenate_results = concatenate_results algorithm = distributed_opts.get('algorithm', 'default') if 'algorithm' == 'default': logger.warning( "No algorithm specified in distributed option, default target function will be selected." ) self.target_fcn = default_target_fcn self.concatenate = None if concatenate_results: self.concatenate = concatenate_minibatches if f"{algorithm}_target_fcn" in globals(): self.target_fcn = globals()[f"{algorithm}_target_fcn"] if not DistributedExplainer.ray.is_initialized(): logger.info(f"Initialising ray on {self.n_processes} processes!") DistributedExplainer.ray.init(num_cpus=self.n_processes) self.pool = self.create_parallel_pool( explainer_type, explainer_init_args, explainer_init_kwargs ) self._actor_index = 0 def __getattr__(self, item: str) -> Any: if self._actor_index > self.n_processes - 1: raise ValueError(f"Index of actor should be less than or equal to {self.n_processes - 1}!") actor = self.pool._idle_actors[self._actor_index] return DistributedExplainer.ray.get(actor.return_attribute.remote(item)) @property
Apache License 2.0
mikidown/mikidown
mikidown/mikiwindow.py
MikiWindow.updateRecentViewedNotes
python
def updateRecentViewedNotes(self): self.viewedList.clear() self.viewedListActions = [] viewedNotes = self.settings.recentViewedNotes() existedNotes = [] i = 0 for f in viewedNotes: if self.notesTree.pageExists(f): existedNotes.append(f) names = f.split('/') if self.altPressed and i in range(1, 10): action = self.act(names[-1], self.openFunction(f), 'Alt+'+str(i), True, ViewedNoteIcon(i), 'Alt+'+str(i)) else: action = self.act(names[-1], self.openFunction(f), None, True) self.viewedListActions.append(action) i += 1 if not self.altPressed: self.settings.updateRecentViewedNotes(existedNotes) for action in self.viewedListActions: self.viewedList.addAction(action) if len(self.viewedListActions): self.viewedListActions[0].setChecked(True)
Switching notes will trigger this. When Alt pressed, show note number.
https://github.com/mikidown/mikidown/blob/70568eff44e4b8bc718dcdf35f81a31baebb9b74/mikidown/mikiwindow.py#L973-L1003
import os import shutil import re from threading import Thread from PyQt5.QtCore import Qt from PyQt5 import QtCore, QtGui, QtWidgets, QtWebKitWidgets, QtPrintSupport from whoosh.index import create_in, open_dir from whoosh.qparser import QueryParser, RegexPlugin from whoosh.writing import AsyncWriter import mikidown.mikidown_rc from .slashpleter import SlashPleter from .config import __appname__, __version__ from .mikibook import NotebookListDialog, NotebookSettingsDialog, Mikibook, MikidownCfgDialog from .mikitree import MikiTree, TocTree from .mikiedit import MikiEdit from .mikiview import MikiView from .mikisearch import MikiSearch from .mikitemplate import ManageTemplatesDialog from .attachment import AttachmentView from .highlighter import MikiHighlighter from .findreplacedialog import FindReplaceDialog from .utils import Event, LineEditDialog, ViewedNoteIcon, parseHeaders, parseTitle, METADATA_CHECKER, JSCRIPT_TPL class MikiSepNote(QtWidgets.QDockWidget): def __init__(self, settings, name, filename, plain_text=False, parent=None): super().__init__(parent=parent) splitty = QtWidgets.QSplitter(self) self.setWidget(splitty) self.setWindowTitle(os.path.basename(name)) self.setFloating(True) self.setAttribute(Qt.WA_DeleteOnClose) self.plain_text = plain_text self.notePath = settings.notePath fh = QtCore.QFile(filename) try: if not fh.open(QtCore.QIODevice.ReadOnly): raise IOError(fh.errorString()) except IOError as e: QtWidgets.QMessageBox.warning(self, self.tr("Read Error"), self.tr("Failed to open %s: %s") % (filename, e)) finally: if fh is not None: notestream = QtCore.QTextStream(fh) notestream.setCodec("UTF-8") noteBody = notestream.readAll() fh.close() self.tocw = TocTree(self) splitty.addWidget(self.tocw) strip_math_for_header_parsing = False strip_fence_for_header_parsing = False self.tocw.itemClicked.connect(self.tocNavigate) if 'asciimathml' in settings.extensions: stuff=JSCRIPT_TPL.format(settings.mathjax) strip_math_for_header_parsing = True else: stuff='' if 'fenced_code' in settings.extensions or 'extra' in settings.extensions: strip_fence_for_header_parsing = True if plain_text: note_view = QtWidgets.QPlainTextEdit(self) qfnt = QtGui.QFont() qfnt.setFamily('monospace') note_view.setFont(qfnt) note_view.setPlainText(noteBody) else: note_view = QtWebKitWidgets.QWebView(self) note_view.setHtml(settings.md.reset().convert(noteBody)+stuff) note_view.page().setLinkDelegationPolicy(QtWebKitWidgets.QWebPage.DelegateAllLinks) note_view.linkClicked.connect(self.linkClicked) note_view.settings().setUserStyleSheetUrl( QtCore.QUrl('file://'+self.parent().settings.cssfile)) self.note_view = note_view splitty.addWidget(note_view) self.tocw.updateToc(os.path.basename(name), parseHeaders(noteBody, strip_fenced_block=strip_fence_for_header_parsing, strip_ascii_math=strip_math_for_header_parsing)) def tocNavigate(self, current): if current is None: return pos = int(current.text(1)) if self.plain_text: self.note_view.moveCursor(QtGui.QTextCursor.End) cur = self.note_view.textCursor() cur.setPosition(pos, QtGui.QTextCursor.MoveAnchor) self.note_view.setTextCursor(cur) else: self.note_view.page().mainFrame().scrollToAnchor(current.text(2)) def findItemByAnchor(self, anchor): return self.tocw.findItems(anchor, Qt.MatchExactly|Qt.MatchRecursive, column=2) def linkClicked(self, qurl): name = qurl.toString() http = re.compile('https?://') if http.match(name): QtGui.QDesktopServices.openUrl(qurl) return name = name.replace('file://', '') name = name.replace(self.notePath, '').split('#') item = self.parent().notesTree.pageToItem(name[0]) if not item or item == self.parent().notesTree.currentItem(): return else: if self.plain_text: if len(name) == 2: self.parent().newPlainTextNoteDisplay(item, anchor=name[1]) else: self.parent().newPlainTextNoteDisplay(item) else: if len(name) == 2: self.parent().newNoteDisplay(item, anchor=name[1]) else: self.parent().newNoteDisplay(item) class MikiWindow(QtWidgets.QMainWindow): postInit = Event() postClose = Event() def __init__(self, settings, parent=None): super(MikiWindow, self).__init__(parent) self.tray = None self.alwaysClose = False self.setObjectName("mikiWindow") self.settings = settings self.notePath = settings.notePath self.lockPath = os.path.join(settings.notebookPath, '.mikidown_lock') print("Path: ", self.lockPath) print("existst: ", os.path.exists(self.lockPath)) if not os.path.exists(self.lockPath): self.lockPathFH = os.open(self.lockPath, os.O_CREAT | os.O_EXCL | os.O_RDWR) self.notesTree = MikiTree(self) self.quickNoteNav = QtWidgets.QLineEdit() self.notesTab = QtWidgets.QWidget() self.completer = SlashPleter() self.completer.setModel(self.notesTree.model()) self.quickNoteNav.setCompleter(self.completer) self.notesTree.setObjectName("notesTree") self.initTree(self.notePath, self.notesTree) self.notesTree.sortItems(0, Qt.AscendingOrder) self.ix = None self.setupWhoosh() self.viewedList = QtWidgets.QToolBar(self.tr('Recently Viewed'), self) self.viewedList.setIconSize(QtCore.QSize(16, 16)) self.viewedList.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.viewedListActions = [] self.noteSplitter = QtWidgets.QSplitter(Qt.Horizontal) self.dockIndex = QtWidgets.QDockWidget(self.tr("Index")) self.dockSearch = QtWidgets.QDockWidget(self.tr("Search")) self.searchEdit = QtWidgets.QLineEdit() self.searchView = MikiSearch(self) self.searchTab = QtWidgets.QWidget() self.dockToc = QtWidgets.QDockWidget(self.tr("TOC")) self.tocTree = TocTree() self.dockAttachment = QtWidgets.QDockWidget(self.tr("Attachment")) self.attachmentView = AttachmentView(self) self.notesEdit = MikiEdit(self) self.notesEdit.setObjectName(self.tr("notesEdit")) self.loadHighlighter() self.notesView = MikiView(self) self.findBar = QtWidgets.QToolBar(self.tr('Find'), self) self.findBar.setFixedHeight(30) self.findEdit = QtWidgets.QLineEdit(self.findBar) self.checkBox = QtWidgets.QCheckBox(self.tr('Match case'), self.findBar) self.statusBar = QtWidgets.QStatusBar(self) self.statusLabel = QtWidgets.QLabel(self) self.altPressed = False self.actions = dict() self.setupActions() self.setupMainWindow() if self.settings.version < __version__ or Mikibook.settings.value("version", defaultValue="0") < __version__: self.changelogHelp() self.settings.qsettings.setValue("version", __version__) Mikibook.settings.setValue("version", __version__) self.postInit(self) def loadHighlighter(self): fnt = Mikibook.settings.value('editorFont', defaultValue=None) fntsize = Mikibook.settings.value('editorFontSize', type=int, defaultValue=12) header_scales_font = Mikibook.settings.value('headerScaleFont', type=bool, defaultValue=True) if fnt is not None: self.notesEdit.setFontFamily(fnt) self.notesEdit.setFontPointSize(fntsize) h = MikiHighlighter(parent=self.notesEdit, scale_font_sizes=header_scales_font) tw = Mikibook.settings.value('tabWidth', type=int, defaultValue=4) qfm = QtGui.QFontMetrics(h.patterns[0][1].font()) self.notesEdit.setTabStopWidth(tw * qfm.width(' ')) def setupActions(self): actTabIndex = self.act(self.tr('Switch to Index Tab'), lambda: self.raiseDock(self.dockIndex), self.tr('Ctrl+Shift+I')) actTabSearch = self.act(self.tr('Switch to Search Tab'), lambda: self.raiseDock(self.dockSearch), self.tr('Ctrl+Shift+F')) self.addAction(actTabIndex) self.addAction(actTabSearch) actionNewPage = self.act(self.tr('&New Page...'), self.notesTree.newPage, QtGui.QKeySequence.New) self.actions.update(newPage=actionNewPage) actionNewSubpage = self.act(self.tr('New Sub&page...'), self.notesTree.newSubpage, self.tr('Ctrl+Shift+N')) self.actions.update(newSubpage=actionNewSubpage) actionImportPage = self.act(self.tr('&Import Page...'), self.importPage) self.actions.update(importPage=actionImportPage) actionNBSettings = self.act(self.tr('Notebook Set&tings...'), self.notebookSettings) self.actions.update(NBSettings=actionNBSettings) actionNBTemplates = self.act(self.tr('Notebook Temp&lates...'), self.notebookTemplates) self.actions.update(NBTemplates=actionNBTemplates) actionMDSettings = self.act(self.tr('&Mikidown Settings...'), self.mikidownSettings) self.actions.update(MDSettings=actionMDSettings) actionOpenNotebook = self.act(self.tr('&Open Notebook...'), self.openNotebook, QtGui.QKeySequence.Open) self.actions.update(openNotebook=actionOpenNotebook) actionReIndex = self.act(self.tr('Re-index'), self.reIndex) self.actions.update(reIndex=actionReIndex) actionSave = self.act(self.tr('&Save'), self.saveCurrentNote, QtGui.QKeySequence.Save) actionSave.setEnabled(False) self.actions.update(save=actionSave) actionSaveAs = self.act(self.tr('Save &As...'), self.saveNoteAs, QtGui.QKeySequence.SaveAs) self.actions.update(saveAs=actionSaveAs) actionHtml = self.act(self.tr('to &HTML'), self.notesEdit.saveAsHtml) self.actions.update(html=actionHtml) actionPrint = self.act(self.tr('&Print'), self.printNote, QtGui.QKeySequence.Print) self.actions.update(print_=actionPrint) actionRenamePage = self.act(self.tr('&Rename Page...'), self.notesTree.renamePage, 'F2') self.actions.update(renamePage=actionRenamePage) actionDelPage = self.act(self.tr('&Delete Page'), self.notesTree.delPageWrapper, QtGui.QKeySequence.Delete) self.actions.update(delPage=actionDelPage) actionQuit = self.act(self.tr('&Quit'), self.forceClose, QtGui.QKeySequence.Quit) actionQuit.setMenuRole(QtWidgets.QAction.QuitRole) self.actions.update(quit=actionQuit) actionUndo = self.act(self.tr('&Undo'), lambda: self.notesEdit.undo(), QtGui.QKeySequence.Undo) actionUndo.setEnabled(False) self.notesEdit.undoAvailable.connect(actionUndo.setEnabled) self.actions.update(undo=actionUndo) actionRedo = self.act(self.tr('&Redo'), lambda: self.notesEdit.redo(), QtGui.QKeySequence.Redo) actionRedo.setEnabled(False) self.notesEdit.redoAvailable.connect(actionRedo.setEnabled) self.actions.update(redo=actionRedo) actionFindText = self.act(self.tr('&Find Text'), self.findBar.setVisible, QtGui.QKeySequence.Find, True) self.actions.update(findText=actionFindText) actionFindRepl = self.act(self.tr('Find and Replace'), FindReplaceDialog(self.notesEdit).open, QtGui.QKeySequence.Replace) self.actions.update(findRepl=actionFindRepl) actionFind = self.act(self.tr('Next'), self.findText, QtGui.QKeySequence.FindNext) self.actions.update(find=actionFind) actionFindPrev = self.act(self.tr('Previous'), lambda: self.findText(back=True), QtGui.QKeySequence.FindPrevious) self.actions.update(findPrev=actionFindPrev) actionSortLines = self.act(self.tr('&Sort Lines'), self.sortLines) self.actions.update(sortLines=actionSortLines) actionQuickNav = self.act(self.tr("&Quick Open Note"), self.quickNoteNav.setFocus, self.tr('Ctrl+G')) self.addAction(actionQuickNav) actionInsertImage = self.act(self.tr('&Insert Attachment'), self.notesEdit.insertAttachmentWrapper, self.tr('Ctrl+I')) actionInsertImage.setEnabled(False) self.actions.update(insertImage=actionInsertImage) QtGui.QIcon.setThemeName(Mikibook.settings.value('iconTheme', QtGui.QIcon.themeName())) actionEdit = self.act(self.tr('Edit'), self.edit, self.tr('Ctrl+E'), True, QtGui.QIcon.fromTheme('document-edit'), self.tr('Edit mode (Ctrl+E)')) self.actions.update(edit=actionEdit) actionSplit = self.act(self.tr('Split'), self.liveView, self.tr('Ctrl+R'), True, QtGui.QIcon.fromTheme('view-split-left-right'), self.tr('Split mode (Ctrl+R)')) self.actions.update(split=actionSplit) actionFlipEditAndView = self.act(self.tr('Flip Edit and View'), self.flipEditAndView) actionFlipEditAndView.setEnabled(False) self.actions.update(flipEditAndView=actionFlipEditAndView) actionReadme = self.act(self.tr('README'), self.readmeHelp) self.actions.update(readme=actionReadme) actionChangelog = self.act(self.tr('Changelog'), self.changelogHelp) self.actions.update(changelog=actionChangelog) actionAboutQt = self.act(self.tr('About Qt'), QtWidgets.qApp.aboutQt) self.actions.update(aboutQt=actionAboutQt) def setupMainWindow(self): self.resize(800, 600) screen = QtWidgets.QDesktopWidget().screenGeometry() size = self.geometry() self.move(( screen.width()-size.width())/2, (screen.height()-size.height())/2) self.setWindowTitle( '{} - {}'.format(self.settings.notebookName, __appname__)) self.viewedList.setFixedHeight(25) self.noteSplitter.addWidget(self.notesEdit) self.noteSplitter.addWidget(self.notesView) mainSplitter = QtWidgets.QSplitter(Qt.Vertical) mainSplitter.setChildrenCollapsible(False) mainSplitter.addWidget(self.viewedList) mainSplitter.addWidget(self.noteSplitter) mainSplitter.addWidget(self.findBar) self.setCentralWidget(mainSplitter) self.searchEdit.returnPressed.connect(self.searchNote) self.quickNoteNav.returnPressed.connect(self.openFuncWrapper) searchLayout = QtWidgets.QVBoxLayout() searchLayout.addWidget(self.searchEdit) searchLayout.addWidget(self.searchView) self.searchTab.setLayout(searchLayout) indexLayout = QtWidgets.QVBoxLayout(self.notesTab) indexLayout.addWidget(self.quickNoteNav) indexLayout.addWidget(self.notesTree) self.dockIndex.setObjectName("Index") self.dockIndex.setWidget(self.notesTab) self.dockSearch.setObjectName("Search") self.dockSearch.setWidget(self.searchTab) self.dockToc.setObjectName("TOC") self.dockToc.setWidget(self.tocTree) self.dockAttachment.setObjectName("Attachment") self.dockAttachment.setWidget(self.attachmentView) self.setDockOptions(QtWidgets.QMainWindow.VerticalTabs) self.addDockWidget(Qt.LeftDockWidgetArea, self.dockIndex) self.addDockWidget(Qt.LeftDockWidgetArea, self.dockSearch) self.addDockWidget(Qt.LeftDockWidgetArea, self.dockToc) self.addDockWidget(Qt.LeftDockWidgetArea, self.dockAttachment) self.tabifyDockWidget(self.dockIndex, self.dockSearch) self.tabifyDockWidget(self.dockSearch, self.dockToc) self.tabifyDockWidget(self.dockToc, self.dockAttachment) self.setTabPosition(Qt.LeftDockWidgetArea, QtWidgets.QTabWidget.North) self.dockIndex.raise_() menuBar = QtWidgets.QMenuBar(self) self.setMenuBar(menuBar) menuFile = menuBar.addMenu(self.tr('&File')) menuEdit = menuBar.addMenu(self.tr('&Edit')) menuView = menuBar.addMenu(self.tr('&View')) menuHelp = menuBar.addMenu(self.tr('&Help')) menuFile.addAction(self.actions['newPage']) menuFile.addAction(self.actions['newSubpage']) menuFile.addAction(self.actions['NBSettings']) menuFile.addAction(self.actions['NBTemplates']) menuFile.addAction(self.actions['MDSettings']) menuFile.addAction(self.actions['importPage']) menuFile.addAction(self.actions['openNotebook']) menuFile.addAction(self.actions['reIndex']) menuFile.addSeparator() menuFile.addAction(self.actions['save']) menuFile.addAction(self.actions['saveAs']) menuFile.addAction(self.actions['print_']) menuExport = menuFile.addMenu(self.tr('&Export')) menuExport.addAction(self.actions['html']) menuFile.addSeparator() menuFile.addAction(self.actions['renamePage']) menuFile.addAction(self.actions['delPage']) menuFile.addSeparator() menuFile.addAction(self.actions['quit']) menuEdit.addAction(self.actions['undo']) menuEdit.addAction(self.actions['redo']) menuEdit.addAction(self.actions['findText']) menuEdit.addAction(self.actions['findRepl']) menuEdit.addSeparator() menuEdit.addAction(self.actions['sortLines']) menuEdit.addAction(self.actions['insertImage']) menuView.addAction(self.actions['edit']) menuView.addAction(self.actions['split']) menuView.addAction(self.actions['flipEditAndView']) menuShowHide = menuView.addMenu(self.tr('Show/Hide')) menuShowHide.addAction(self.dockIndex.toggleViewAction()) menuShowHide.addAction(self.dockSearch.toggleViewAction()) menuShowHide.addAction(self.dockToc.toggleViewAction()) menuShowHide.addAction(self.dockAttachment.toggleViewAction()) menuHelp.addAction(self.actions['readme']) menuHelp.addAction(self.actions['changelog']) menuHelp.addAction(self.actions['aboutQt']) toolBar = QtWidgets.QToolBar(self.tr("toolbar"), self) toolBar.setObjectName("toolbar") toolBar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) self.addToolBar(Qt.TopToolBarArea, toolBar) toolBar.addAction(self.actions['edit']) toolBar.addAction(self.actions['split']) self.findEdit.returnPressed.connect(self.findText) self.findBar.addWidget(self.findEdit) self.findBar.addWidget(self.checkBox) self.findBar.addAction(self.actions['findPrev']) self.findBar.addAction(self.actions['find']) self.findBar.setVisible(False) self.findBar.visibilityChanged.connect(self.findBarVisibilityChanged) self.setStatusBar(self.statusBar) self.statusBar.addWidget(self.statusLabel, 1) self.notesTree.currentItemChanged.connect( self.currentItemChangedWrapper) self.notesTree.nvwCallback = self.newNoteDisplay self.notesTree.nvwtCallback = self.newPlainTextNoteDisplay self.tocTree.itemClicked.connect(self.tocNavigate) self.notesEdit.textChanged.connect(self.noteEditted) self.notesEdit.document( ).modificationChanged.connect(self.modificationChanged) self.updateRecentViewedNotes() notes = self.settings.recentViewedNotes() if len(notes) != 0: item = self.notesTree.pageToItem(notes[0]) self.notesTree.setCurrentItem(item) def newNoteDisplay(self, item, anchor=None): msn = MikiSepNote(self.settings, item.text(0), self.notesTree.itemToFile(item), plain_text=False, parent=self) if anchor: msn.note_view.page().mainFrame().scrollToAnchor(anchor) msn.show() def newPlainTextNoteDisplay(self, item, anchor=None): msn = MikiSepNote(self.settings, item.text(0), self.notesTree.itemToFile(item), plain_text=True, parent=self) if anchor: item = msn.findItemByAnchor(anchor)[0] msn.tocNavigate(item) msn.show() def openFuncWrapper(self): self.openFunction(self.quickNoteNav.text())() def setupWhoosh(self): indexdir = self.settings.indexdir try: self.ix = open_dir(indexdir) except: QtCore.QDir().mkpath(indexdir) self.ix = create_in(indexdir, self.settings.schema) p = Thread(target=self.whoosh_index, args=()) p.start() def restore(self): if self.settings.geometry: self.restoreGeometry(self.settings.geometry) if self.settings.windowstate: self.restoreState(self.settings.windowstate) def initTree(self, notePath, parent): if not QtCore.QDir(notePath).exists(): return notebookDir = QtCore.QDir(notePath) notesList = notebookDir.entryInfoList(['*.md', '*.mkd', '*.markdown'], QtCore.QDir.NoFilter, QtCore.QDir.Name|QtCore.QDir.IgnoreCase) nl = [note.completeBaseName() for note in notesList] noduplicate = list(set(nl)) for name in noduplicate: item = QtWidgets.QTreeWidgetItem(parent, [name]) path = notePath + '/' + name self.initTree(path, item) def updateToc(self): root = self.notesTree.currentPage() strip_math_for_header_parsing = False strip_fence_for_header_parsing = False if 'asciimathml' in self.settings.extensions: strip_math_for_header_parsing = True if 'fenced_code' in self.settings.extensions or 'extra' in self.settings.extensions: strip_fence_for_header_parsing = True self.tocTree.updateToc(root, parseHeaders(self.notesEdit.toPlainText(), strip_fenced_block=strip_fence_for_header_parsing, strip_ascii_math=strip_math_for_header_parsing)) def updateAttachmentView(self): item = self.notesTree.currentItem() path = self.notesTree.itemToAttachmentDir(item) self.attachmentView.model.setRootPath(path) index = self.attachmentView.model.index(path) self.attachmentView.setRootIndex(index) def openFile(self, filename): fh = QtCore.QFile(filename) try: if not fh.open(QtCore.QIODevice.ReadOnly): raise IOError(fh.errorString()) except IOError as e: QtWidgets.QMessageBox.warning(self, self.tr('Read Error'), self.tr('Failed to open %s: %s') % (filename, e)) finally: if fh is not None: notestream = QtCore.QTextStream(fh) notestream.setCodec("UTF-8") noteBody = notestream.readAll() fh.close() self.notesEdit.setPlainText(noteBody) self.notesView.scrollPosition = QtCore.QPoint(0, 0) self.notesEdit.document().setModified(False) self.notesView.updateView() self.setCurrentNote() self.updateRecentViewedNotes() def currentItemChangedWrapper(self, current, previous): if current is None: return prev = self.notesTree.itemToPage(previous) if self.notesTree.pageExists(prev): self.saveNote(previous) currentFile = self.notesTree.itemToFile(current) self.openFile(currentFile) self.updateAttachmentView() def tocNavigate(self, current): if current is None: return pos = int(current.text(1)) link = "file://" + self.notePath + "/#" + current.text(2) self.notesEdit.moveCursor(QtGui.QTextCursor.End) cur = self.notesEdit.textCursor() cur.setPosition(pos, QtGui.QTextCursor.MoveAnchor) self.notesEdit.setTextCursor(cur) self.notesView.load(QtCore.QUrl(link)) def switchNote(self, num): if num < len(self.viewedListActions): self.viewedListActions[num].trigger() def saveCurrentNote(self): item = self.notesTree.currentItem() self.saveNote(item) def saveNote(self, item): if self.notesEdit.document().isModified(): self.notesEdit.document().setModified(False) else: return self.notesEdit.save(item) def saveNoteAs(self): self.saveCurrentNote() fileName = QtWidgets.QFileDialog.getSaveFileName(self, self.tr('Save as'), '', '(*.md *.mkd *.markdown);;'+self.tr('All files(*)')) if fileName == '': return if not QtCore.QFileInfo(fileName).suffix(): fileName += '.md' fh = QtCore.QFile(fileName) fh.open(QtCore.QIODevice.WriteOnly) savestream = QtCore.QTextStream(fh) savestream.setCodec("UTF-8") savestream << self.notesEdit.toPlainText() fh.close() def printNote(self): printer = QtPrintSupport.QPrinter(QtPrintSupport.QPrinter.HighResolution) printer.setCreator(__appname__ + ' ' + __version__) printer.setDocName(self.notesTree.currentItem().text(0)) printdialog = QtPrintSupport.QPrintDialog(printer, self) if printdialog.exec() == QtWidgets.QDialog.Accepted: self.notesView.print_(printer) def noteEditted(self): self.updateToc() self.notesView.updateLiveView() def modificationChanged(self, changed): self.actions['save'].setEnabled(changed) name = self.notesTree.currentPage() self.statusBar.clearMessage() if changed: self.statusLabel.setText(name + '*') else: self.statusLabel.setText(name) def importPage(self): filename = QtWidgets.QFileDialog.getOpenFileName( self, self.tr('Import file'), '', '(*.md *.mkd *.markdown *.txt);;'+self.tr('All files(*)')) if filename == '': return self.importPageCore(filename) def importPageCore(self, filename): fh = QtCore.QFile(filename) fh.open(QtCore.QIODevice.ReadOnly) filestream = QtCore.QTextStream(fh) filestream.setCodec("UTF-8") fileBody = filestream.readAll() fh.close() page = QtCore.QFileInfo(filename).completeBaseName() fh = QtCore.QFile(self.notesTree.pageToFile(page)) if fh.exists(): QtWidgets.QMessageBox.warning(self, self.tr("Import Error"), self.tr("Page already exists: %s") % page) dialog = LineEditDialog(self.notePath, self) if dialog.exec_(): page = dialog.editor.text() fh.close() fh = QtCore.QFile(self.notesTree.pageToFile(page)) else: return fh.open(QtCore.QIODevice.WriteOnly) savestream = QtCore.QTextStream(fh) savestream.setCodec("UTF-8") savestream << fileBody fh.close() item = QtWidgets.QTreeWidgetItem(self.notesTree, [page]) self.notesTree.sortItems(0, Qt.AscendingOrder) self.notesTree.setCurrentItem(item) def openNotebook(self): dialog = NotebookListDialog(self) if dialog.exec_(): pass def notebookSettings(self): dialog = NotebookSettingsDialog(self) if dialog.exec_(): pass def notebookTemplates(self): dialog = ManageTemplatesDialog(self.settings, parent=self) if dialog.exec_(): pass def mikidownSettings(self): dialog = MikidownCfgDialog(self) if dialog.exec_(): pass def reIndex(self): shutil.rmtree(self.settings.indexdir) self.setupWhoosh() def act(self, name, trig, shortcut=None, checkable=False, icon=None, tooltip=None): if icon: action = QtWidgets.QAction(icon, name, self) else: action = QtWidgets.QAction(name, self) if shortcut: action.setShortcut(QtGui.QKeySequence(shortcut)) action.setCheckable(checkable) if tooltip: action.setToolTip(tooltip) action.triggered.connect(trig) return action def edit(self, viewmode): if self.actions['split'].isChecked(): self.actions['split'].setChecked(False) self.notesView.setVisible(not viewmode) self.notesEdit.setVisible(viewmode) if viewmode: self.notesEdit.setFocus() else: self.notesView.setFocus() self.saveCurrentNote() self.actions['insertImage'].setEnabled(viewmode) self.notesView.updateView() def liveView(self, viewmode): self.actions['split'].setChecked(viewmode) sizes = self.noteSplitter.sizes() if self.actions['edit'].isChecked(): self.actions['edit'].setChecked(False) self.notesView.setVisible(viewmode) splitSize = [sizes[0]*0.45, sizes[0]*0.55] else: self.notesEdit.setVisible(viewmode) splitSize = [sizes[1]*0.45, sizes[1]*0.55] if viewmode: self.notesEdit.setFocus() else: self.notesView.setFocus() self.actions['flipEditAndView'].setEnabled(viewmode) self.actions['insertImage'].setEnabled(viewmode) self.noteSplitter.setSizes(splitSize) self.saveCurrentNote() self.notesView.updateView() def findBarVisibilityChanged(self, visible): self.actions['findText'].setChecked(visible) if visible: self.findEdit.setFocus(Qt.ShortcutFocusReason) def findText(self, back=False): flags = 0 if back: flags = QtGui.QTextDocument.FindBackward if self.checkBox.isChecked(): flags = flags | QtGui.QTextDocument.FindCaseSensitively text = self.findEdit.text() if not self.findMain(text, flags): if text in self.notesEdit.toPlainText(): cursor = self.notesEdit.textCursor() if back: cursor.movePosition(QtGui.QTextCursor.End) else: cursor.movePosition(QtGui.QTextCursor.Start) self.notesEdit.setTextCursor(cursor) self.findMain(text, flags) def findMain(self, text, flags): viewFlags = QtWebKitWidgets.QWebPage.FindFlags( flags) | QtWebKitWidgets.QWebPage.FindWrapsAroundDocument if flags: self.notesView.findText(text, viewFlags) return self.notesEdit.find(text, flags) else: self.notesView.findText(text) return self.notesEdit.find(text) def sortLines(self): cursor = self.notesEdit.textCursor() start = cursor.selectionStart() end = cursor.selectionEnd() cursor.setPosition(start) cursor.movePosition(QtGui.QTextCursor.StartOfLine) cursor.setPosition(end, mode=QtGui.QTextCursor.KeepAnchor) cursor.movePosition(QtGui.QTextCursor.EndOfLine, mode=QtGui.QTextCursor.KeepAnchor) text = cursor.selectedText() lines = text.split('\u2029') sortedLines = sorted(lines) cursor.insertText('\n'.join(sortedLines)) def notesEditInFocus(self, e): if e.gotFocus: self.actions['insertImage'].setEnabled(True) def searchNote(self): pattern = self.searchEdit.text() if not pattern: return results = [] print("Searching using", pattern) with self.ix.searcher() as searcher: matches = [] queryp = QueryParser("content", self.ix.schema) queryp.add_plugin(RegexPlugin(expr=r'r"(?P<text>[^"\\]*(\\.[^"\\]*)*)"')) query = queryp.parse(pattern) ms = searcher.search(query, limit=None) for m in ms: matches.append(m) for r in matches: title = r['title'] path = r['path'] term = r.highlights("content") results.append([title, path, term]) html = "" for title, path, hi in results: html += ("<p><a href='" + path + "'>" + title + "</a><br/><span class='path'>" + path + "</span><br/>" + hi + "</p>") self.searchView.setHtml(html) print("Finished searching", pattern) def whoosh_index(self): it = QtWidgets.QTreeWidgetItemIterator( self.notesTree, QtWidgets.QTreeWidgetItemIterator.All) print("Starting complete indexing.") writer = AsyncWriter(self.ix) while it.value(): treeItem = it.value() name = self.notesTree.itemToPage(treeItem) path = os.path.join(self.notesTree.pageToFile(name)).replace(os.sep, '/') print(path) fileobj = open(path, 'r', encoding='utf-8') content = fileobj.read() fileobj.close() if METADATA_CHECKER.match(content) and 'meta' in self.settings.extensions: no_metadata_content = METADATA_CHECKER.sub("", content, count=1).lstrip() self.settings.md.reset().convert(content) writer.update_document( path=name, title=parseTitle(content, name), content=no_metadata_content, tags=','.join(self.settings.md.Meta.get('tags', [])).strip()) else: writer.add_document(path=name, title=parseTitle(content, name), content=content, tags='') it += 1 writer.commit() print("Finished completely reindexing.") def listItemChanged(self, row): if row != -1: item = self.searchList.currentItem().data(Qt.UserRole) self.notesTree.setCurrentItem(item) flags = QtWebKitWidgets.QWebPage.HighlightAllOccurrences self.notesView.findText(self.searchEdit.text(), flags) def setCurrentNote(self): item = self.notesTree.currentItem() name = self.notesTree.itemToPage(item) notes = self.settings.recentViewedNotes() for f in notes: if f == name: notes.remove(f) notes.insert(0, name) recent_notes_n = Mikibook.settings.value('recentNotesNumber',type=int, defaultValue=20) if len(notes) > recent_notes_n: del notes[recent_notes_n:] self.settings.updateRecentViewedNotes(notes)
MIT License
jobovy/galpy
galpy/util/leung_dop853.py
dense_output
python
def dense_output(t_current, t_old, h_current, rcont): s = (t_current - t_old) / h_current s1 = 1.0 - s return rcont[0] + s * (rcont[1] + s1 * ( rcont[2] + s * (rcont[3] + s1 * (rcont[4] + s * (rcont[5] + s1 * (rcont[6] + s * rcont[7]))))))
Dense output function, basically extrapolatin
https://github.com/jobovy/galpy/blob/0470fa3e990f44319e9340497f669699d1bf1008/galpy/util/leung_dop853.py#L239-L248
import numpy c2 = 0.526001519587677318785587544488e-1 c3 = 0.789002279381515978178381316732e-1 c4 = 0.118350341907227396726757197510 c5 = 0.281649658092772603273242802490 c6 = 0.333333333333333333333333333333 c7 = 0.25 c8 = 0.307692307692307692307692307692 c9 = 0.651282051282051282051282051282 c10 = 0.6 c11 = 0.857142857142857142857142857142 c12 = 1 c13 = 1 c14 = 0.1 c15 = 0.2 c16 = 0.777777777777777777777777777778 a21 = 5.26001519587677318785587544488e-2 a31 = 1.97250569845378994544595329183e-2 a32 = 5.91751709536136983633785987549e-2 a41 = 2.95875854768068491816892993775e-2 a43 = 8.87627564304205475450678981324e-2 a51 = 2.41365134159266685502369798665e-1 a53 = -8.84549479328286085344864962717e-1 a54 = 9.24834003261792003115737966543e-1 a61 = 3.7037037037037037037037037037e-2 a64 = 1.70828608729473871279604482173e-1 a65 = 1.25467687566822425016691814123e-1 a71 = 3.7109375e-2 a74 = 1.70252211019544039314978060272e-1 a75 = 6.02165389804559606850219397283e-2 a76 = -1.7578125e-2 a81 = 3.70920001185047927108779319836e-2 a84 = 1.70383925712239993810214054705e-1 a85 = 1.07262030446373284651809199168e-1 a86 = -1.53194377486244017527936158236e-2 a87 = 8.27378916381402288758473766002e-3 a91 = 6.24110958716075717114429577812e-1 a94 = -3.36089262944694129406857109825e0 a95 = -8.68219346841726006818189891453e-1 a96 = 2.75920996994467083049415600797e1 a97 = 2.01540675504778934086186788979e1 a98 = -4.34898841810699588477366255144e1 a101 = 4.77662536438264365890433908527e-1 a104 = -2.48811461997166764192642586468e0 a105 = -5.90290826836842996371446475743e-1 a106 = 2.12300514481811942347288949897e1 a107 = 1.52792336328824235832596922938e1 a108 = -3.32882109689848629194453265587e1 a109 = -2.03312017085086261358222928593e-2 a111 = -9.3714243008598732571704021658e-1 a114 = 5.18637242884406370830023853209e0 a115 = 1.09143734899672957818500254654e0 a116 = -8.14978701074692612513997267357e0 a117 = -1.85200656599969598641566180701e1 a118 = 2.27394870993505042818970056734e1 a119 = 2.49360555267965238987089396762e0 a1110 = -3.0467644718982195003823669022e0 a121 = 2.27331014751653820792359768449e0 a124 = -1.05344954667372501984066689879e1 a125 = -2.00087205822486249909675718444e0 a126 = -1.79589318631187989172765950534e1 a127 = 2.79488845294199600508499808837e1 a128 = -2.85899827713502369474065508674e0 a129 = -8.87285693353062954433549289258e0 a1210 = 1.23605671757943030647266201528e1 a1211 = 6.43392746015763530355970484046e-1 a141 = 5.61675022830479523392909219681e-2 a147 = 2.53500210216624811088794765333e-1 a148 = -2.46239037470802489917441475441e-1 a149 = -1.24191423263816360469010140626e-1 a1410 = 1.5329179827876569731206322685e-1 a1411 = 8.20105229563468988491666602057e-3 a1412 = 7.56789766054569976138603589584e-3 a1413 = -8.298e-3 a151 = 3.18346481635021405060768473261e-2 a156 = 2.83009096723667755288322961402e-2 a157 = 5.35419883074385676223797384372e-2 a158 = -5.49237485713909884646569340306e-2 a1511 = -1.08347328697249322858509316994e-4 a1512 = 3.82571090835658412954920192323e-4 a1513 = -3.40465008687404560802977114492e-4 a1514 = 1.41312443674632500278074618366e-1 a161 = -4.28896301583791923408573538692e-1 a166 = -4.69762141536116384314449447206e0 a167 = 7.68342119606259904184240953878e0 a168 = 4.06898981839711007970213554331e0 a169 = 3.56727187455281109270669543021e-1 a1613 = -1.39902416515901462129418009734e-3 a1614 = 2.9475147891527723389556272149e0 a1615 = -9.15095847217987001081870187138e0 b1 = 5.42937341165687622380535766363e-2 b6 = 4.45031289275240888144113950566 b7 = 1.89151789931450038304281599044 b8 = -5.8012039600105847814672114227 b9 = 3.1116436695781989440891606237e-1 b10 = -1.52160949662516078556178806805e-1 b11 = 2.01365400804030348374776537501e-1 b12 = 4.47106157277725905176885569043e-2 bhh1 = 0.244094488188976377952755905512 bhh2 = 0.733846688281611857341361741547 bhh3 = 0.220588235294117647058823529412e-1 d41 = -0.84289382761090128651353491142e+1 d46 = 0.56671495351937776962531783590 d47 = -0.30689499459498916912797304727e+1 d48 = 0.23846676565120698287728149680e+1 d49 = 0.21170345824450282767155149946e+1 d410 = -0.87139158377797299206789907490 d411 = 0.22404374302607882758541771650e+1 d412 = 0.63157877876946881815570249290 d413 = -0.88990336451333310820698117400e-1 d414 = 0.18148505520854727256656404962e+2 d415 = -0.91946323924783554000451984436e+1 d416 = -0.44360363875948939664310572000e+1 d51 = 0.10427508642579134603413151009e+2 d56 = 0.24228349177525818288430175319e+3 d57 = 0.16520045171727028198505394887e+3 d58 = -0.37454675472269020279518312152e+3 d59 = -0.22113666853125306036270938578e+2 d510 = 0.77334326684722638389603898808e+1 d511 = -0.30674084731089398182061213626e+2 d512 = -0.93321305264302278729567221706e+1 d513 = 0.15697238121770843886131091075e+2 d514 = -0.31139403219565177677282850411e+2 d515 = -0.93529243588444783865713862664e+1 d516 = 0.35816841486394083752465898540e+2 d61 = 0.19985053242002433820987653617e+2 d66 = -0.38703730874935176555105901742e+3 d67 = -0.18917813819516756882830838328e+3 d68 = 0.52780815920542364900561016686e+3 d69 = -0.11573902539959630126141871134e+2 d610 = 0.68812326946963000169666922661e+1 d611 = -0.10006050966910838403183860980e+1 d612 = 0.77771377980534432092869265740 d613 = -0.27782057523535084065932004339e+1 d614 = -0.60196695231264120758267380846e+2 d615 = 0.84320405506677161018159903784e+2 d616 = 0.11992291136182789328035130030e+2 d71 = -0.25693933462703749003312586129e+2 d76 = -0.15418974869023643374053993627e+3 d77 = -0.23152937917604549567536039109e+3 d78 = 0.35763911791061412378285349910e+3 d79 = 0.93405324183624310003907691704e+2 d710 = -0.37458323136451633156875139351e+2 d711 = 0.10409964950896230045147246184e+3 d712 = 0.29840293426660503123344363579e+2 d713 = -0.43533456590011143754432175058e+2 d714 = 0.96324553959188282948394950600e+2 d715 = -0.39177261675615439165231486172e+2 d716 = -0.14972683625798562581422125276e+3 er1 = 0.1312004499419488073250102996e-1 er6 = -0.1225156446376204440720569753e+1 er7 = -0.4957589496572501915214079952 er8 = 0.1664377182454986536961530415e+1 er9 = -0.3503288487499736816886487290 er10 = 0.3341791187130174790297318841 er11 = 0.8192320648511571246570742613e-1 er12 = -0.2235530786388629525884427845e-1 unsigned_int_max = numpy.iinfo(numpy.int32).max uround = numpy.finfo(numpy.float).eps def custom_sign(a, b): return numpy.fabs(a) if b > 0.0 else -numpy.fabs(a) def hinit(func, x, t, pos_neg, f0, iord, hmax, rtol, atol, args): sk = atol + rtol * numpy.fabs(x) dnf = numpy.sum(numpy.square(f0 / sk), axis=0) dny = numpy.sum(numpy.square(x / sk), axis=0) h = numpy.sqrt(dny / dnf) * 0.01 h = numpy.min([h, numpy.fabs(hmax)]) h = custom_sign(h, pos_neg) xx1 = x + h * f0 f1 = numpy.array(func(xx1, t[0] + h, *args)) der2 = numpy.sum(numpy.square((f1 - f0) / sk), axis=0) der2 = numpy.sqrt(der2) / h der12 = numpy.max([numpy.fabs(der2), numpy.sqrt(dnf)]) h1 = numpy.power(0.01 / der12, 1.0 / iord) h = numpy.min([100.0 * numpy.fabs(h), numpy.min([numpy.fabs(h1), numpy.fabs(hmax)])]) return custom_sign(h, pos_neg), f0, f1, xx1
BSD 3-Clause New or Revised License
tensorflow/fold
tensorflow_fold/loom/loom.py
Weaver.named_tensor
python
def named_tensor(self, name): return self._tensor_name_to_result[name]
Return a LoomResult which stands in for the named Tensor input.
https://github.com/tensorflow/fold/blob/0e7ca14832a14a5f2009d4e0424783a80e7d7a2c/tensorflow_fold/loom/loom.py#L929-L931
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import functools import numbers import re import numpy as np import six from six.moves import xrange import tensorflow as tf from tensorflow_fold.loom import deserializing_weaver_op from tensorflow_fold.loom import loom_pb2 from tensorflow_fold.loom import pywrapweaver TENSOR_IDX_T = tf.int32 _TypeShape = collections.namedtuple('TypeShape', ['dtype', 'shape', 'tag']) @functools.total_ordering class TypeShape(_TypeShape): def __new__(cls, dtype=None, shape=None, tag='', tensor=None): if tensor is not None: if dtype is not None: raise TypeError('Specify only one of tensor and dtype.') if shape is not None: raise TypeError('Specify only one of tensor and shape.') dtype = tensor.dtype shape = tensor.get_shape().as_list() elif not (isinstance(dtype, tf.DType) or isinstance(dtype, six.string_types)): raise TypeError('%r is not a tf.DType or string' % (dtype,)) dtype = tf.as_dtype(dtype).base_dtype.name if not all(isinstance(s, numbers.Integral) and s >= 0 for s in shape): raise TypeError('shape must be non-negative integers: %s' % shape) shape = tuple(int(s) for s in shape) if not isinstance(tag, six.string_types): raise TypeError('A TypeShape tag must be a string; type of %r is %s' % (tag, type(tag))) return _TypeShape.__new__(cls, dtype, shape, tag) @property def dtype_enum(self): return tf.as_dtype(self.dtype).as_datatype_enum def tensor_flow_name(self): tag = re.sub(r'\W+', '', self.tag) if tag: tag = '_'+tag return '%s_%s%s' % (self.dtype, '_'.join(str(x) for x in self.shape), tag) def __str__(self): return 'TypeShape{dtype:%s shape:%s tag:"%s"}' % ( self.dtype, self.shape, self.tag) @six.add_metaclass(abc.ABCMeta) class LoomOp(object): def __init__(self, input_type_shapes, output_type_shapes): if not input_type_shapes: raise TypeError('Every LoomOp must have at least one input.') if not output_type_shapes: raise TypeError('Every LoomOp must have at least one output.') if not all(isinstance(ts, TypeShape) for ts in input_type_shapes): raise TypeError('All of input_type_shapes must be TypeShapes.') if not all(isinstance(ts, TypeShape) for ts in output_type_shapes): raise TypeError('All of output_type_shapes must be TypeShapes.') self._input_type_shapes = input_type_shapes self._output_type_shapes = output_type_shapes @property def input_type_shapes(self): return self._input_type_shapes @property def output_type_shapes(self): return self._output_type_shapes @abc.abstractmethod def instantiate_batch(self, inputs): _ = inputs raise NotImplementedError( 'LoomOp needs a definition for instantiate_batch.') class PassThroughLoomOp(LoomOp): def __init__(self, type_shape): super(PassThroughLoomOp, self).__init__([type_shape], [type_shape]) def instantiate_batch(self, inputs): return inputs class Loom(object): def __init__(self, max_depth=None, named_tensors=None, named_ops=None, batch_inputs=None, extra_type_shapes=None, dry_run=False, parallel_iterations=None, back_prop=None, swap_memory=None, direct_feed_dict=False, loom_input_tensor=None, weaver_op=None): if named_ops is None: raise TypeError('named_ops is a mandatory argument.') if max_depth is None: max_depth = -1 self._max_depth = max_depth if named_tensors is None: named_tensors = {} if batch_inputs is None: batch_inputs = {} if parallel_iterations is None: parallel_iterations = 10 if back_prop is None: back_prop = True if swap_memory is None: swap_memory = False self._batch_inputs = batch_inputs self._dry_run = dry_run self._parallel_iterations = parallel_iterations self._back_prop = back_prop self._swap_memory = swap_memory self._direct_feed_dict = direct_feed_dict if direct_feed_dict: if loom_input_tensor is not None: raise TypeError( 'direct_feed_dict and loom_input_tensor are incompatible.') if weaver_op is not None: raise TypeError('direct_feed_dict and weaver_op are incompatible.') if not direct_feed_dict: if weaver_op is None: if loom_input_tensor is None: loom_input_tensor = tf.placeholder( 'string', name='LoomInput') def weaver_from_input_tensor(**kwargs): return deserializing_weaver_op.deserializing_weaver( self._loom_input_tensor, **kwargs) weaver_op = weaver_from_input_tensor else: if loom_input_tensor is not None: raise TypeError('You can specify at most one of loom_input_tensor ' 'or weaver_op.') self._loom_input_tensor = loom_input_tensor self._weaver_op = weaver_op self._setup_type_shapes(named_ops, extra_type_shapes) self._setup_named_tensors(named_tensors) self._setup_loom_ops(named_ops) self._setup_metadata() self._setup_network() def _setup_type_shapes(self, named_ops, extra_type_shapes): type_shape_set = set() for op in six.itervalues(named_ops): type_shape_set.update(op.input_type_shapes) type_shape_set.update(op.output_type_shapes) if extra_type_shapes is not None: type_shape_set.update(extra_type_shapes) self._type_shapes = sorted(type_shape_set) non_empty_tags = set() for ts in self._type_shapes: if ts.tag: if ts.tag in non_empty_tags: raise TypeError('Tags on tagged TypeShapes must be unique; ' '%s occured more than once.' % (ts.tag,)) else: non_empty_tags.add(ts.tag) self._type_shape_to_idx = {ts: idx for idx, ts in enumerate(self._type_shapes)} def _setup_named_tensors(self, named_tensors): self._ts_idx_to_named_tensors = [[] for _ in self._type_shapes] self._ts_idx_to_tensor_names = [[] for _ in self._type_shapes] for name, tensor in sorted(six.iteritems(named_tensors)): if isinstance(tensor, tuple): tensor, tag = tensor ts = TypeShape(tensor=tensor, tag=tag) else: ts = TypeShape(tensor=tensor) ts_idx = self._type_shape_to_idx[ts] self._ts_idx_to_named_tensors[ts_idx].append(tensor) self._ts_idx_to_tensor_names[ts_idx].append(name) def _pass_through_name(self, ts): return '_pass_through_' + ts.tensor_flow_name() def _setup_loom_ops(self, named_ops): pass_through_ops = [PassThroughLoomOp(ts) for ts in self._type_shapes] non_passthrough_op_names = sorted(six.iterkeys(named_ops)) self._loom_op_names = ( [self._pass_through_name(ts) for ts in self._type_shapes] + non_passthrough_op_names) self._loom_ops = ( pass_through_ops + [named_ops[k] for k in non_passthrough_op_names]) self._loom_total_args = sum( len(op.input_type_shapes) for op in self._loom_ops) self._loom_op_name_to_idx = { name: idx for idx, name in enumerate(self._loom_op_names)} def _setup_metadata(self): loom_metadata = loom_pb2.LoomMetadata() loom_metadata.max_depth = self._max_depth for ts, tensor_names in zip( self._type_shapes, self._ts_idx_to_tensor_names): type_shape_metadata = loom_metadata.type_shape_metadata.add() type_shape_metadata.dtype = ts.dtype_enum type_shape_metadata.shape.extend(ts.shape) type_shape_metadata.tag = ts.tag type_shape_metadata.name = str(ts) type_shape_metadata.tensor_names.extend(tensor_names) type_shape_metadata.is_batch_input = ( (ts in self._batch_inputs) or self._direct_feed_dict) for op_name, op in zip(self._loom_op_names, self._loom_ops): op_metadata = loom_metadata.op_metadata.add() op_metadata.name = op_name op_metadata.input_ts_idx.extend( self._type_shape_to_idx[ts] for ts in op.input_type_shapes) op_metadata.output_ts_idx.extend( self._type_shape_to_idx[ts] for ts in op.output_type_shapes) self._loom_metadata_str = ( loom_metadata.SerializeToString()) def _setup_network(self): if self._dry_run: self._output = [tf.constant(np.zeros((1,)+ts.shape, dtype=ts.dtype)) for ts in self._type_shapes] return if self._direct_feed_dict: self._arg_wiring_concat = tf.placeholder( TENSOR_IDX_T, name='arg_wiring_concat') self._arg_wiring_slice_starts = tf.placeholder( TENSOR_IDX_T, name='arg_wiring_slice_starts') self._arg_wiring_slice_sizes = tf.placeholder( TENSOR_IDX_T, name='arg_wiring_slice_sizes') self._output_wirings = [ tf.placeholder(TENSOR_IDX_T, name='output_wirings_%d' % ts_idx) for ts_idx in xrange(len(self._type_shapes))] self._constants = [ tf.placeholder(ts.dtype, name='constants_%d' % ts_idx) for ts_idx, ts in enumerate(self._type_shapes)] else: (self._arg_wiring_concat, self._arg_wiring_slice_starts, self._arg_wiring_slice_sizes, self._output_wirings, self._constants) = self._weaver_op( metadata=self._loom_metadata_str, constant_types=[tf.as_dtype(ts.dtype) for ts in self._type_shapes], num_type_shapes=len(self._type_shapes)) inputs = self._constants for ts_idx, ts in enumerate(self._type_shapes): if ts in self._batch_inputs: inputs[ts_idx] = self._batch_inputs[ts] state = [] for inputs_tensor, named_tensors in ( zip(inputs, self._ts_idx_to_named_tensors)): if not named_tensors: state.append(inputs_tensor) else: state.append(tf.concat([tf.stack(named_tensors), inputs_tensor], 0)) if self._max_depth == -1: current_max_depth = ( tf.size(self._arg_wiring_slice_starts) // self._loom_total_args) def loop_conditional(depth, *unused_state): return tf.less_equal(depth, current_max_depth) def loop_body(depth, *state): new_depth = tf.add(depth, 1, name='increment_depth') new_state = self._construct_loom_layer(depth, state) return [new_depth] + new_state initial_depth = tf.constant(1, name='initial_depth') state = tf.while_loop(loop_conditional, loop_body, [initial_depth] + state, parallel_iterations=self._parallel_iterations, back_prop=self._back_prop, swap_memory=self._swap_memory)[1:] else: for depth in xrange(1, self._max_depth+1): with tf.name_scope('loom_depth_%03d' % depth): state = self._construct_loom_layer(depth, state) with tf.name_scope('output_gathers'): self._output = [ tf.gather(s, w, name=self._type_shapes[ts_idx].tensor_flow_name()) for ts_idx, (s, w) in enumerate(zip(state, self._output_wirings))] for type_shape, output in zip(self._type_shapes, self._output): output.set_shape((None,) + type_shape.shape) def _construct_loom_layer(self, depth, state): new_state_segments = [[] for _ in state] start_wire_pos = (depth - 1) * self._loom_total_args wire_pos_offset = 0 for op_idx, op in enumerate(self._loom_ops): with tf.name_scope(self._loom_op_names[op_idx]): arg_inputs = [] for arg_idx, arg_ts in enumerate(op.input_type_shapes): with tf.name_scope('arg_%d' % arg_idx): wire_pos = start_wire_pos + wire_pos_offset wire_pos_offset += 1 slice_start = tf.slice( self._arg_wiring_slice_starts, [wire_pos], [1]) slice_size = tf.slice( self._arg_wiring_slice_sizes, [wire_pos], [1]) arg_wiring = tf.slice( self._arg_wiring_concat, slice_start, slice_size) arg_ts_idx = self._type_shape_to_idx[arg_ts] arg_input = tf.gather(state[arg_ts_idx], arg_wiring) arg_input.set_shape((None,) + arg_ts.shape) arg_inputs.append(arg_input) op_outputs = op.instantiate_batch(arg_inputs) for output_idx, (output, output_ts) in enumerate( zip(op_outputs, op.output_type_shapes)): if not isinstance(output, tf.Tensor): raise TypeError('Op %s returns non-Tensor output %r' % (self._loom_op_names[op_idx], output)) try: output.set_shape((None,) + output_ts.shape) except ValueError as e: raise ValueError('Op %s output %d: %s' % ( self._loom_op_names[op_idx], output_idx, e)) if output.dtype.base_dtype.name != output_ts.dtype: raise ValueError('Op %s output %d: expected dtype %s got %s' % ( self._loom_op_names[op_idx], output_idx, output_ts.dtype, output.dtype.base_dtype.name)) output_ts_idx = self._type_shape_to_idx[output_ts] new_state_segments[output_ts_idx].append(output) with tf.name_scope('concat'): return [ tf.concat( s, 0, name=self._type_shapes[ts_idx].tensor_flow_name()) for ts_idx, s in enumerate(new_state_segments) ] def output_tensor(self, type_shape): return self._output[self._type_shape_to_idx[type_shape]] @property def input_tensor(self): if self._direct_feed_dict: raise TypeError('This loom has direct_feed_dict set, ' 'so it has no input tensor') return self._loom_input_tensor @property def type_shapes(self): return self._type_shapes def make_weaver(self): return Weaver(self) def deserialize_weaver(self, serialized_weaver): deserialized = self.make_weaver() deserialized._deserialize(serialized_weaver) return deserialized class Weaver(object): def __init__(self, loom): if not isinstance(loom, Loom): raise TypeError('A weaver must be passed a Loom on construction.') self._loom = loom self._weaver = pywrapweaver.Weaver(self._loom._loom_metadata_str) if self._weaver.error_string(): raise AssertionError('Failed to create weaver: ', self._weaver.error_string()) self._constants = [[] for _ in self._loom._type_shapes] self._tensor_name_to_result = {} for ts_idx, names in enumerate(self._loom._ts_idx_to_tensor_names): for name_idx, name in enumerate(names): named_tensor = self._weaver.GetNamedTensor(ts_idx, name_idx) if named_tensor == -1: raise AssertionError( 'Failed to GetNamedTensor in Weaver wrapper for %s error: %s.' % (name, self._weaver.error_string())) self._tensor_name_to_result[name] = named_tensor for name, result in six.iteritems(self._tensor_name_to_result): self._safe_set_attr(name, result) for op_idx, op_name in enumerate(self._loom._loom_op_names): self._safe_set_attr(op_name, functools.partial(self._call_op_sugar, op_idx)) def _safe_set_attr(self, name, value): if hasattr(self, name): print('Warning: op or named tensor has the same name as a Weaver', 'attribute:', name) else: setattr(self, name, value) def _call_op_sugar(self, op_idx, *args): if not all(isinstance(a, six.integer_types) for a in args): raise TypeError('All args passed to call_op must be integers ' '(LoomResult ids.) Did you forget to call constant?') result = self._weaver.CallOp(op_idx, args) if not result: raise AssertionError('Weaver op call failed: %s' % self._weaver.error_string()) if len(result) == 1: return result[0] return result def _deserialize(self, weaver_message_str): if not self._weaver.Deserialize(weaver_message_str): raise AssertionError( 'Weaver Deserialization failed: %s' % self._weaver.error_string()) def serialize(self): serialization = self._weaver.Serialize() if not serialization: raise AssertionError( 'Weaver Serialization failed: %s' % self._weaver.error_string()) return serialization @property def deepest(self): return self._weaver.Deepest() def depth(self, result): depth = self._weaver.Depth(result) if depth == -1: raise AssertionError('Invalid LoomResult ID passed to depth.') return depth def get_type_shape(self, result): ts_idx = self._weaver.GetTypeShape(result) if ts_idx == -1: raise AssertionError('Invalid LoomResult ID passed to get_type_shape.') return self._loom._type_shapes[ts_idx]
Apache License 2.0
mpi4jax/mpi4jax
mpi4jax/_src/collective_ops/sendrecv.py
sendrecv
python
def sendrecv( sendbuf, recvbuf, source, dest, *, sendtag=0, recvtag=_MPI.ANY_TAG, comm=None, status=None, token=None, ): if token is None: token = create_token(sendbuf) if comm is None: comm = get_default_comm() comm = wrap_as_hashable(comm) if status is not None: status = wrap_as_hashable(status) return tuple( mpi_sendrecv_p.bind( sendbuf, recvbuf, token, source=source, dest=dest, sendtag=sendtag, recvtag=recvtag, comm=comm, status=status, _must_transpose=False, ) )
Perform a sendrecv operation. .. warning:: Unlike mpi4py's sendrecv, this returns a *new* array with the received data. Arguments: sendbuf: Array or scalar input to send. recvbuf: Array or scalar input with the correct shape and dtype. This can contain arbitrary data and will not be overwritten. source (int): Rank of the source MPI process. dest (int): Rank of the destination MPI process. sendtag (int): Tag of this message for sending. recvtag (int): Tag of this message for receiving. comm (mpi4py.MPI.Comm): The MPI communicator to use (defaults to a clone of :obj:`COMM_WORLD`). status (mpi4py.MPI.Status): Status object, can be used for introspection. token (Token): XLA token to use to ensure correct execution order. If not given, a new token is generated. Returns: Tuple[DeviceArray, Token]: - Received data. - A new, modified token, that depends on this operation.
https://github.com/mpi4jax/mpi4jax/blob/e3ed6f00a5552099f260c6b1f68588917461403b/mpi4jax/_src/collective_ops/sendrecv.py#L40-L102
import numpy as _np from mpi4py import MPI as _MPI from jax import abstract_arrays, core from jax.core import Primitive from jax.interpreters import ad, xla, batching from jax.lax import create_token from jax.lib import xla_client from ..utils import ( HashableMPIType, default_primitive_impl, to_dtype_handle, to_mpi_handle, to_mpi_ptr, unpack_hashable, wrap_as_hashable, xla_constant_intc, xla_constant_uintptr, ) from ..decorators import translation_rule_cpu, translation_rule_gpu from ..validation import enforce_types from ..comm import get_default_comm mpi_sendrecv_p = Primitive("sendrecv_mpi") mpi_sendrecv_impl = default_primitive_impl(mpi_sendrecv_p) @enforce_types( source=_np.integer, dest=_np.integer, sendtag=_np.integer, recvtag=_np.integer, comm=(type(None), _MPI.Intracomm, HashableMPIType), status=(type(None), _MPI.Status, HashableMPIType), token=(type(None), xla.Token, core.Tracer), )
MIT License
netmanaiops/logclass
decorators.py
print_step
python
def print_step(func): @functools.wraps(func) def wrapper_print_name(*args, **kwargs): print(f"Calling {func.__qualname__}") value = func(*args, **kwargs) return value return wrapper_print_name
Print the function signature and return value
https://github.com/netmanaiops/logclass/blob/62c1c9c61294625bdb3d99dc01b6adc7b735c4ab/decorators.py#L19-L26
import functools def debug(func): @functools.wraps(func) def wrapper_debug(*args, **kwargs): args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = ", ".join(args_repr + kwargs_repr) print(f"Calling {func.__name__}({signature})") value = func(*args, **kwargs) print(f"{func.__name__!r} returned {value!r}") return value return wrapper_debug
MIT License
shunichi09/pythonlinearnonlinearcontrol
PythonLinearNonlinearControl/envs/cartpole.py
CartPoleEnv.step
python
def step(self, u): if self.config["input_lower_bound"] is not None: u = np.clip(u, self.config["input_lower_bound"], self.config["input_upper_bound"]) d_x0 = self.curr_x[1] d_x1 = (u[0] + self.config["mp"] * np.sin(self.curr_x[2]) * (self.config["l"] * (self.curr_x[3]**2) + self.config["g"] * np.cos(self.curr_x[2]))) / (self.config["mc"] + self.config["mp"] * (np.sin(self.curr_x[2])**2)) d_x2 = self.curr_x[3] d_x3 = (-u[0] * np.cos(self.curr_x[2]) - self.config["mp"] * self.config["l"] * (self.curr_x[3]**2) * np.cos(self.curr_x[2]) * np.sin(self.curr_x[2]) - (self.config["mc"] + self.config["mp"]) * self.config["g"] * np.sin(self.curr_x[2])) / (self.config["l"] * (self.config["mc"] + self.config["mp"] * (np.sin(self.curr_x[2])**2))) next_x = self.curr_x + np.array([d_x0, d_x1, d_x2, d_x3]) * self.config["dt"] costs = 0. costs += 0.1 * np.sum(u**2) costs += 6. * self.curr_x[0]**2 + 12. * (np.cos(self.curr_x[2]) + 1.)**2 + 0.1 * self.curr_x[1]**2 + 0.1 * self.curr_x[3]**2 self.history_x.append(next_x.flatten()) self.history_g_x.append(self.g_x.flatten()) self.curr_x = next_x.flatten().copy() self.step_count += 1 return next_x.flatten(), costs, self.step_count > self.config["max_step"], {"goal_state": self.g_x}
step environments Args: u (numpy.ndarray) : input, shape(input_size, ) Returns: next_x (numpy.ndarray): next state, shape(state_size, ) cost (float): costs done (bool): end the simulation or not info (dict): information
https://github.com/shunichi09/pythonlinearnonlinearcontrol/blob/eb0bf0c78251e372a9db9fa6a888583a11d0ee12/PythonLinearNonlinearControl/envs/cartpole.py#L60-L120
import numpy as np from matplotlib.axes import Axes from .env import Env from ..plotters.plot_objs import square class CartPoleEnv(Env): def __init__(self): self.config = {"state_size": 4, "input_size": 1, "dt": 0.02, "max_step": 500, "input_lower_bound": [-3.], "input_upper_bound": [3.], "mp": 0.2, "mc": 1., "l": 0.5, "g": 9.81, "cart_size": (0.15, 0.1), } super(CartPoleEnv, self).__init__(self.config) def reset(self, init_x=None): self.step_count = 0 theta = np.random.randn(1) self.curr_x = np.array([0., 0., theta[0], 0.]) if init_x is not None: self.curr_x = init_x self.g_x = np.array([0., 0., -np.pi, 0.]) self.history_x = [] self.history_g_x = [] return self.curr_x, {"goal_state": self.g_x}
MIT License
readthedocs/readthedocs.org
readthedocs/doc_builder/base.py
BaseBuilder.run
python
def run(self, *args, **kwargs): return self.build_env.run(*args, **kwargs)
Proxy run to build environment.
https://github.com/readthedocs/readthedocs.org/blob/2cff8376f0ef8f25ae6d8763bdbec86f47e33ab9/readthedocs/doc_builder/base.py#L142-L144
import logging import os import shutil from functools import wraps from readthedocs.projects.models import Feature log = logging.getLogger(__name__) def restoring_chdir(fn): @wraps(fn) def decorator(*args, **kw): try: path = os.getcwd() return fn(*args, **kw) finally: os.chdir(path) return decorator class BaseBuilder: _force = False ignore_patterns = [] old_artifact_path = None def __init__(self, build_env, python_env, force=False): self.build_env = build_env self.python_env = python_env self.version = build_env.version self.project = build_env.project self.config = python_env.config if python_env else None self._force = force self.project_path = self.project.checkout_path(self.version.slug) self.target = self.project.artifact_path( version=self.version.slug, type_=self.type, ) def get_final_doctype(self): return self.config.doctype def force(self, **__): log.info('Forcing a build') self._force = True def append_conf(self): pass def build(self): raise NotImplementedError def move(self, **__): if os.path.exists(self.old_artifact_path): if os.path.exists(self.target): shutil.rmtree(self.target) log.info('Copying %s on the local filesystem', self.type) log.debug('Ignoring patterns %s', self.ignore_patterns) shutil.copytree( self.old_artifact_path, self.target, ignore=shutil.ignore_patterns(*self.ignore_patterns), ) else: log.warning('Not moving docs, because the build dir is unknown.') def clean(self, **__): if os.path.exists(self.old_artifact_path): shutil.rmtree(self.old_artifact_path) log.info('Removing old artifact path: %s', self.old_artifact_path) def docs_dir(self, docs_dir=None, **__): if docs_dir: return docs_dir for doc_dir_name in ['docs', 'doc', 'Doc', 'book']: possible_path = os.path.join(self.project_path, doc_dir_name) if os.path.exists(possible_path): return possible_path return self.project_path def create_index(self, extension='md', **__): docs_dir = self.docs_dir() index_filename = os.path.join( docs_dir, 'index.{ext}'.format(ext=extension), ) if not os.path.exists(index_filename): readme_filename = os.path.join( docs_dir, 'README.{ext}'.format(ext=extension), ) if os.path.exists(readme_filename): return 'README' if not self.project.has_feature(Feature.DONT_CREATE_INDEX): index_text = """ Welcome to Read the Docs ------------------------ This is an autogenerated index file. Please create an ``index.{ext}`` or ``README.{ext}`` file with your own content under the root (or ``/docs``) directory in your repository. If you want to use another markup, choose a different builder in your settings. Check out our `Getting Started Guide <https://docs.readthedocs.io/en/latest/getting_started.html>`_ to become more familiar with Read the Docs. """ with open(index_filename, 'w+') as index_file: index_file.write(index_text.format(dir=docs_dir, ext=extension)) return 'index'
MIT License
ganeti/ganeti
lib/storage/container.py
_LvmBase._RunListCommand
python
def _RunListCommand(args): result = utils.RunCmd(args) if result.failed: raise errors.StorageError("Failed to run %r, command output: %s" % (args[0], result.output)) return result.stdout
Run LVM command.
https://github.com/ganeti/ganeti/blob/4d21019c72cba4d746f5d17ca22098f4c7682e9c/lib/storage/container.py#L314-L324
import logging from ganeti import errors from ganeti import constants from ganeti import utils def _ParseSize(value): return int(round(float(value), 0)) class _Base(object): def List(self, name, fields): raise NotImplementedError() def Modify(self, name, changes): if changes: raise errors.ProgrammerError("Unable to modify the following" "fields: %r" % (list(changes), )) def Execute(self, name, op): raise NotImplementedError() class FileStorage(_Base): def __init__(self, paths): super(FileStorage, self).__init__() self._paths = paths def List(self, name, fields): rows = [] if name is None: paths = self._paths else: paths = [name] for path in paths: rows.append(self._ListInner(path, fields)) return rows @staticmethod def _ListInner(path, fields): values = [] if constants.SF_USED in fields: dirsize = utils.CalculateDirectorySize(path) else: dirsize = None if constants.SF_FREE in fields or constants.SF_SIZE in fields: fsstats = utils.GetFilesystemStats(path) else: fsstats = None for field_name in fields: if field_name == constants.SF_NAME: values.append(path) elif field_name == constants.SF_USED: values.append(dirsize) elif field_name == constants.SF_FREE: values.append(fsstats[1]) elif field_name == constants.SF_SIZE: values.append(fsstats[0]) elif field_name == constants.SF_ALLOCATABLE: values.append(True) else: raise errors.StorageError("Unknown field: %r" % field_name) return values class _LvmBase(_Base): LIST_SEP = "|" LIST_COMMAND = None LIST_FIELDS = None def List(self, name, wanted_field_names): lvm_fields = self._GetLvmFields(self.LIST_FIELDS, wanted_field_names) cmd_args = self._BuildListCommand(self.LIST_COMMAND, self.LIST_SEP, lvm_fields, name) cmd_result = self._RunListCommand(cmd_args) return self._BuildList(self._SplitList(cmd_result, self.LIST_SEP, len(lvm_fields)), self.LIST_FIELDS, wanted_field_names, lvm_fields) @staticmethod def _GetLvmFields(fields_def, wanted_field_names): field_to_idx = dict([(field_name, idx) for (idx, (field_name, _, _)) in enumerate(fields_def)]) lvm_fields = [] for field_name in wanted_field_names: try: idx = field_to_idx[field_name] except IndexError: raise errors.StorageError("Unknown field: %r" % field_name) (_, lvm_names, _) = fields_def[idx] lvm_fields.extend(lvm_names) return utils.UniqueSequence(lvm_fields) @classmethod def _BuildList(cls, cmd_result, fields_def, wanted_field_names, lvm_fields): lvm_name_to_idx = dict([(lvm_name, idx) for (idx, lvm_name) in enumerate(lvm_fields)]) field_to_idx = dict([(field_name, idx) for (idx, (field_name, _, _)) in enumerate(fields_def)]) data = [] for raw_data in cmd_result: row = [] for field_name in wanted_field_names: (_, lvm_names, mapper) = fields_def[field_to_idx[field_name]] values = [raw_data[lvm_name_to_idx[i]] for i in lvm_names] if callable(mapper): val = mapper(*values) elif len(values) == 1: assert mapper is None, ("Invalid mapper value (neither callable" " nor None) for one-element fields") val = values[0] else: assert not values, "LVM storage has multi-fields without a function" val = mapper row.append(val) data.append(row) return data @staticmethod def _BuildListCommand(cmd, sep, options, name): args = [cmd, "--noheadings", "--units=m", "--nosuffix", "--separator", sep, "--options", ",".join(options)] if name is not None: args.append(name) return args @staticmethod
BSD 2-Clause Simplified License
flapjax/flapjack-cogs
blizzard/blizzard.py
Blizzard.battletag
python
async def battletag(self, ctx): pass
Change your battletag settings.
https://github.com/flapjax/flapjack-cogs/blob/8a1a20f86fec36f60899dbbf91f2f77eee191786/blizzard/blizzard.py#L199-L201
import asyncio import re from copy import copy from numbers import Number import aiohttp import bleach import discord from bs4 import BeautifulSoup from redbot.core import Config, checks, commands from discord.ext.commands import formatter class Blizzard(commands.Cog): default_global_settings = { "notes_format": "paged", "notes_timeout": 60 } def __init__(self, bot: commands.Bot): self.bot = bot self.conf = Config.get_conf(self, identifier=62869734) self.conf.register_global( **self.default_global_settings ) self.session = aiohttp.ClientSession() self.base_url = 'https://us.battle.net/connect/en/app/' self.product_url = '/patch-notes?productType=' self.wowtoken_url = 'http://wowtokenprices.com' self.patch_urls = { 'hearthstone': 'https://us.battle.net/hearthstone/en/blog/', 'overwatch': 'https://playoverwatch.com/en-us/game/patch-notes/pc/', 'starcraft2': 'https://us.battle.net/sc2/en/game/patch-notes/', 'warcraft': 'https://us.battle.net/wow/en/game/patch-notes/', 'diablo3': 'https://us.battle.net/d3/en/game/patch-notes/', 'hots': 'https://us.battle.net/heroes/en/blog/' } self.header = {"User-Agent": "flapjackcogs/1.0"} self.patch_header = {'User-Agent': 'Battle.net/1.0.8.4217'} self.abbr = { 'hearthstone': 'wtcg', 'overwatch': 'Pro', 'starcraft2': 'sc2', 'warcraft': 'WoW', 'diablo3': 'd3', 'hots': 'heroes' } self.thumbs = { 'hearthstone': 'http://i.imgur.com/uK0AlGb.png', 'overwatch': 'https://i.imgur.com/YZ4w2ey.png', 'starcraft2': 'https://i.imgur.com/ErDVIMO.png', 'warcraft': 'https://i.imgur.com/nrGZdB7.png', 'diablo3': 'https://i.imgur.com/5WYDHHZ.png', 'hots': 'https://i.imgur.com/NSMkOsA.png' } self.emoji = { "next": "\N{BLACK RIGHTWARDS ARROW}", "back": "\N{LEFTWARDS BLACK ARROW}", "no": "\N{CROSS MARK}" } self.expired_embed = discord.Embed(title="This menu has exipred due " "to inactivity.") def cog_unload(self): self.bot.loop.create_task(self.session.close()) async def red_delete_data_for_user(self, **kwargs): return async def show_menu(self, ctx, message, messages, page): if message: await message.edit(content=messages[page]) return message else: return await ctx.send(messages[page]) async def _info_menu(self, ctx, messages, **kwargs): page = kwargs.get("page", 0) timeout = kwargs.get("timeout", 60) emoji = kwargs.get("emoji", self.emoji) message = kwargs.get("message", None) choices = len(messages) reactions_needed = True if message is None else False message = await self.show_menu(ctx, message, messages, page) if reactions_needed: await message.add_reaction(str(emoji['back'])) await message.add_reaction(str(emoji['no'])) await message.add_reaction(str(emoji['next'])) def check(r, u): return r.message.id == message.id and u == ctx.message.author try: (r, u) = await self.bot.wait_for('reaction_add', check=check, timeout=timeout) except asyncio.TimeoutError: return [None, message] reacts = {v: k for k, v in emoji.items()} react = reacts[r.emoji] if react == "next": page += 1 elif react == "back": page -= 1 elif react == "no": return ["no", message] if page < 0: page = choices - 1 if page == choices: page = 0 try: await message.remove_reaction(emoji[react], u) except discord.errors.Forbidden: await ctx.send('I require the "manage messages" permission ' 'to make these menus work.') return ["no", message] return await self._info_menu( ctx, messages, page=page, timeout=timeout, emoji=emoji, message=message) def dictgrab(self, my_dict, *keys): temp_dict = copy(my_dict) for key in keys: temp_dict = temp_dict.get(key) if temp_dict is None: return '-' if isinstance(temp_dict, Number): return str(round(temp_dict)) else: return '-' @commands.group(name="blizzard") async def blizzard(self, ctx): pass @blizzard.command(name="apikey") @checks.is_owner() async def _apikey_blizzard(self, ctx, key: str): await self.conf.apikey.set(key) await ctx.send('API key set.') @blizzard.command(name="noteformat") @checks.is_owner() async def _noteformat_blizzard(self, ctx, form: str): accept = ['paged', 'full', 'embed'] if form in accept: await self.conf.notes_format.set(form) await ctx.send("Patch notes format set to `{}`.".format(form)) else: await ctx.send("`{}` is not a valid format. Please choose " "`{}`, `{}`, or `{}`.".format(form, accept[0], accept[1], accept[2])) @blizzard.command(name="notetimeout") @checks.is_owner() async def _notetimeout_blizzard(self, ctx, timeout: int): min_max = (5, 3600) if min_max[0] <= timeout <= min_max[1]: await self.conf.notes_timeout.set(timeout) await ctx.send("Timeout period set to `{} sec`.".format(timeout)) else: await ctx.send("Please choose a duration between " "{} and {} seconds.".format(min_max[0], min_max[1])) @commands.group(name="battletag")
MIT License
project-rig/nengo_spinnaker
nengo_spinnaker/operators/lif.py
EnsembleLIF.make_vertices
python
def make_vertices(self, model, n_steps): params = model.params[self.ensemble] self.regions = ens_regions = dict() incoming = model.get_signals_to_object(self) assert EnsembleInputPort.neurons not in incoming incoming_modulatory = {port: signal for (port, signal) in iteritems(incoming) if isinstance(port, LearningRule)} (ens_regions[Regions.input_filters], ens_regions[Regions.input_routing]) = make_filter_regions( incoming[InputPort.standard], model.dt, True, model.keyspaces.filter_routing_tag, width=self.ensemble.size_in ) (ens_regions[Regions.inhibition_filters], ens_regions[Regions.inhibition_routing]) = make_filter_regions( incoming[EnsembleInputPort.global_inhibition], model.dt, True, model.keyspaces.filter_routing_tag, width=1 ) outgoing = model.get_signals_from_object(self) assert EnsembleOutputPort.neurons not in outgoing if OutputPort.standard in outgoing: decoders, output_keys = get_decoders_and_keys(outgoing[OutputPort.standard], True) else: decoders = np.array([]) output_keys = list() size_out = decoders.shape[0] ens_regions[Regions.decoders] = regions.MatrixRegion( tp.np_to_fix(decoders / model.dt), sliced_dimension=regions.MatrixPartitioning.rows) ens_regions[Regions.keys] = regions.KeyspacesRegion( output_keys, fields=[regions.KeyField({'cluster': 'cluster'})], partitioned_by_atom=True ) encoders_with_gain = params.scaled_encoders size_in = encoders_with_gain.shape[1] ens_regions[Regions.filtered_activity] = FilteredActivityRegion(model.dt) ens_regions[Regions.pes] = PESRegion(self.ensemble.n_neurons) mod_filters = list() mod_keyspace_routes = list() learnt_decoders = np.array([]) learnt_output_keys = list() for sig, t_params in outgoing[EnsembleOutputPort.learnt]: l_rule = t_params.learning_rule l_rule_type = t_params.learning_rule.learning_rule_type if isinstance(l_rule_type, PES): if l_rule in incoming_modulatory: e = incoming_modulatory[l_rule] error_filter_index = len(mod_filters) decoder_start = learnt_decoders.shape[0] rule_decoders, rule_output_keys = get_decoders_and_keys([(sig, t_params)], False) if decoder_start == 0: learnt_decoders = rule_decoders else: learnt_decoders = np.vstack( (learnt_decoders, rule_decoders)) decoder_stop = learnt_decoders.shape[0] learnt_output_keys.extend(rule_output_keys) mod_filters, mod_keyspace_routes = add_filters( mod_filters, mod_keyspace_routes, e, minimise=False) activity_filter_index = -1 ens_regions[Regions.pes].learning_rules.append( PESLearningRule( learning_rate=l_rule_type.learning_rate / model.dt, error_filter_index=error_filter_index, decoder_start=decoder_start, decoder_stop=decoder_stop, activity_filter_index=activity_filter_index)) else: raise ValueError( "Ensemble %s has outgoing connection with PES " "learning, but no corresponding modulatory " "connection" % self.ensemble.label ) else: raise NotImplementedError( "SpiNNaker does not support %s learning rule." % l_rule_type ) size_learnt_out = learnt_decoders.shape[0] ens_regions[Regions.learnt_decoders] = regions.MatrixRegion( tp.np_to_fix(learnt_decoders / model.dt), sliced_dimension=regions.MatrixPartitioning.rows) ens_regions[Regions.learnt_keys] = regions.KeyspacesRegion( learnt_output_keys, fields=[regions.KeyField({'cluster': 'cluster'})], partitioned_by_atom=True ) ens_regions[Regions.voja] = VojaRegion(1.0 / self.ensemble.radius) learnt_encoder_filters = list() learnt_encoder_routes = list() for sig, t_params in incoming[EnsembleInputPort.learnt]: l_rule = t_params.learning_rule l_rule_type = t_params.learning_rule.learning_rule_type if isinstance(l_rule_type, Voja): if l_rule in incoming_modulatory: l = incoming_modulatory[l_rule] learn_sig_filter_index = len(mod_filters) mod_filters, mod_keyspace_routes = add_filters( mod_filters, mod_keyspace_routes, l, minimise=False) else: learn_sig_filter_index = -1 decoded_input_filter_index = len(learnt_encoder_filters) encoder_offset = encoders_with_gain.shape[1] base_encoders = encoders_with_gain[:, :self.ensemble.size_in] encoders_with_gain = np.hstack((encoders_with_gain, base_encoders)) learnt_encoder_filters, learnt_encoder_routes = add_filters( learnt_encoder_filters, learnt_encoder_routes, [(sig, t_params)], minimise=False) activity_filter_index = -1 ens_regions[Regions.voja].learning_rules.append( VojaLearningRule( learning_rate=l_rule_type.learning_rate, learning_signal_filter_index=learn_sig_filter_index, encoder_offset=encoder_offset, decoded_input_filter_index=decoded_input_filter_index, activity_filter_index=activity_filter_index)) else: raise NotImplementedError( "SpiNNaker does not support %s learning rule." % l_rule_type ) ens_regions[Regions.encoders] = regions.MatrixRegion( tp.np_to_fix(encoders_with_gain), sliced_dimension=regions.MatrixPartitioning.rows) tiled_direct_input = np.tile( self.direct_input, encoders_with_gain.shape[1] // self.ensemble.size_in) bias_with_di = params.bias + np.dot(encoders_with_gain, tiled_direct_input) assert bias_with_di.ndim == 1 ens_regions[Regions.bias] = regions.MatrixRegion( tp.np_to_fix(bias_with_di), sliced_dimension=regions.MatrixPartitioning.rows) ens_regions[Regions.gain] = regions.MatrixRegion( tp.np_to_fix(params.gain), sliced_dimension=regions.MatrixPartitioning.rows) ens_regions[Regions.modulatory_filters] = FilterRegion(mod_filters, model.dt) ens_regions[Regions.modulatory_routing] = FilterRoutingRegion(mod_keyspace_routes, model.keyspaces.filter_routing_tag) ens_regions[Regions.learnt_encoder_filters] = FilterRegion(learnt_encoder_filters, model.dt) ens_regions[Regions.learnt_encoder_routing] = FilterRoutingRegion(learnt_encoder_routes, model.keyspaces.filter_routing_tag) ens_regions[Regions.population_length] = regions.ListRegion("I") n_learnt_input_signals = len(learnt_encoder_filters) ens_regions[Regions.ensemble] = EnsembleRegion( model.machine_timestep, self.ensemble.size_in, encoders_with_gain.shape[1], n_learnt_input_signals) ens_regions[Regions.neuron] = LIFRegion( model.dt, self.ensemble.neuron_type.tau_rc, self.ensemble.neuron_type.tau_ref ) n_profiler_samples = 0 self.profiled = getconfig(model.config, self.ensemble, "profile", False) if self.profiled: n_profiler_samples = getconfig(model.config, self.ensemble, "profile_num_samples") if n_profiler_samples is None: n_profiler_samples = (len(EnsembleSlice.profiler_tag_names) * n_steps * 2) ens_regions[Regions.profiler] = regions.Profiler(n_profiler_samples) ens_regions[Regions.ensemble].n_profiler_samples = n_profiler_samples for probe in self.local_probes: if probe.attr in ("output", "spikes"): self.record_spikes = True elif probe.attr == "voltage": self.record_voltages = True elif probe.attr == "scaled_encoders": self.record_encoders = True else: raise NotImplementedError( "Cannot probe {} on Ensembles".format(probe.attr) ) ens_regions[Regions.ensemble].record_spikes = self.record_spikes ens_regions[Regions.ensemble].record_voltages = self.record_voltages ens_regions[Regions.ensemble].record_encoders = self.record_encoders self.learnt_enc_dims = (encoders_with_gain.shape[1] - self.ensemble.size_in) ens_regions[Regions.spike_recording] = regions.SpikeRecordingRegion(n_steps if self.record_spikes else 0) ens_regions[Regions.voltage_recording] = regions.VoltageRecordingRegion(n_steps if self.record_voltages else 0) ens_regions[Regions.encoder_recording] = regions.EncoderRecordingRegion(n_steps if self.record_encoders else 0, self.learnt_enc_dims) cycles = 200 * model.machine_timestep cpu_constraint = partition.Constraint(cycles, 0.4) dtcm_constraint = partition.Constraint(56*2**10, 0.75) cluster_usage = ClusterResourceUsage( size_in, size_out, size_learnt_out ) partition_constraints = {dtcm_constraint: cluster_usage.dtcm_usage, cpu_constraint: cluster_usage.cpu_usage} self.clusters = list() vertices = list() constraints = list() for sl in partition.partition(slice(0, self.ensemble.n_neurons), partition_constraints): cluster = EnsembleCluster(sl, self.ensemble.size_in, encoders_with_gain.shape[1], size_out, size_learnt_out, n_learnt_input_signals, ens_regions) self.clusters.append(cluster) cluster_vertices = cluster.make_vertices(cycles) vertices.extend(cluster_vertices) constraints.append(SameChipConstraint(cluster_vertices)) return netlistspec(vertices, self.load_to_machine, after_simulation_function=self.after_simulation, constraints=constraints)
Construct the data which can be loaded into the memory of a SpiNNaker machine.
https://github.com/project-rig/nengo_spinnaker/blob/8afde11ee265c070e7003f25d3c06f5138ec3b05/nengo_spinnaker/operators/lif.py#L86-L450
import collections import enum import itertools import math from nengo.base import ObjView from nengo.connection import LearningRule from nengo.learning_rules import PES, Voja import numpy as np from rig.place_and_route import Cores, SDRAM from rig.place_and_route.constraints import SameChipConstraint from six import iteritems, itervalues import struct from nengo_spinnaker.builder.netlist import netlistspec from nengo_spinnaker.builder.ports import ( EnsembleInputPort, EnsembleOutputPort, InputPort, OutputPort ) from nengo_spinnaker.regions.filters import (FilterRegion, FilterRoutingRegion, add_filters, make_filter_regions) from nengo_spinnaker.regions.utils import Args from .. import regions from nengo_spinnaker.netlist import Vertex from nengo_spinnaker import partition from nengo_spinnaker.utils.application import get_application from nengo_spinnaker.utils.config import getconfig from nengo_spinnaker.utils import type_casts as tp from nengo_spinnaker.utils import neurons as neuron_utils class Regions(enum.IntEnum): ensemble = 1 neuron = 2 encoders = 3 bias = 4 gain = 5 decoders = 6 learnt_decoders = 7 keys = 8 learnt_keys = 9 population_length = 10 input_filters = 11 input_routing = 12 inhibition_filters = 13 inhibition_routing = 14 modulatory_filters = 15 modulatory_routing = 16 learnt_encoder_filters = 17 learnt_encoder_routing = 18 pes = 19 voja = 20 filtered_activity = 21 profiler = 22 spike_recording = 23 voltage_recording = 24 encoder_recording = 25 RoutingRegions = (Regions.input_routing, Regions.inhibition_routing, Regions.modulatory_routing, Regions.learnt_encoder_routing) class EnsembleLIF(object): def __init__(self, ensemble): self.ensemble = ensemble self.direct_input = np.zeros(ensemble.size_in) self.local_probes = list() self.profiled = False self.record_spikes = False self.record_voltages = False self.record_encoders = False
MIT License
neuralensemble/python-neo
neo/io/klustakwikio.py
FilenameParser.read_filenames
python
def read_filenames(self, typestring='fet'): all_filenames = glob.glob(os.path.join(self.dirname, '*')) d = {} for v in all_filenames: split_fn = os.path.split(v)[1] m = glob.re.search((r'^(\w+)\.%s\.(\d+)$' % typestring), split_fn) if m is not None: if self.basename is None: self.basename = m.group(1) if self.basename == m.group(1): tetn = int(m.group(2)) d[tetn] = v return d
Returns filenames in the data directory matching the type. Generally, `typestring` is one of the following: 'fet', 'clu', 'spk', 'res' Returns a dict {group_number: filename}, e.g.: { 0: 'basename.fet.0', 1: 'basename.fet.1', 2: 'basename.fet.2'} 'basename' can be any string not containing whitespace. Only filenames that begin with "basename.typestring." and end with a sequence of digits are valid. The digits are converted to an integer and used as the group number.
https://github.com/neuralensemble/python-neo/blob/889060c022a56b9c3122afee68cbd5d83e4abe78/neo/io/klustakwikio.py#L426-L464
import glob import logging import os.path import shutil import numpy as np try: import matplotlib.mlab as mlab except ImportError as err: HAVE_MLAB = False MLAB_ERR = err else: HAVE_MLAB = True MLAB_ERR = None from neo.io.baseio import BaseIO from neo.core import Block, Segment, Group, SpikeTrain class KlustaKwikIO(BaseIO): is_readable = True is_writable = True supported_objects = [Block, SpikeTrain] readable_objects = [Block] writeable_objects = [Block] has_header = False is_streameable = False read_params = {} write_params = {} name = 'KlustaKwik' extensions = ['fet', 'clu', 'res', 'spk'] mode = 'file' def __init__(self, filename, sampling_rate=30000.): if not HAVE_MLAB: raise MLAB_ERR BaseIO.__init__(self) self.filename, self.basename = os.path.split(os.path.abspath(filename)) self.sampling_rate = float(sampling_rate) if not os.path.isdir(self.filename): raise ValueError("filename must be a directory") self._fp = FilenameParser(dirname=self.filename, basename=self.basename) def read_block(self, lazy=False): assert not lazy, 'Do not support lazy' block = Block() self._fetfiles = self._fp.read_filenames('fet') self._clufiles = self._fp.read_filenames('clu') if len(self._fetfiles) == 0: return block seg = Segment(name='seg0', index=0, file_origin=self.filename) block.segments.append(seg) self.spiketrains = dict() for group in sorted(self._fetfiles.keys()): fetfile = self._fetfiles[group] spks, features = self._load_spike_times(fetfile) if group in self._clufiles: clufile = self._clufiles[group] uids = self._load_unit_id(clufile) else: uids = np.zeros(spks.shape, dtype=np.int32) if len(spks) != len(uids): raise ValueError("lengths of fet and clu files are different") unique_unit_ids = np.unique(uids) for unit_id in sorted(unique_unit_ids): u = Group(name=('unit %d from group %d' % (unit_id, group)), index=unit_id, group=group) st = SpikeTrain( times=spks[uids == unit_id] / self.sampling_rate, units='sec', t_start=0.0, t_stop=spks.max() / self.sampling_rate, name=('unit %d from group %d' % (unit_id, group))) st.annotations['cluster'] = unit_id st.annotations['group'] = group if len(features) != 0: st.annotations['waveform_features'] = features u.add(st) seg.spiketrains.append(st) block.create_many_to_one_relationship() return block def _load_spike_times(self, fetfilename): with open(fetfilename, mode='r') as f: nbFeatures = int(f.readline().strip()) names = ['fet%d' % n for n in range(nbFeatures)] names.append('spike_time') data = np.recfromtxt(fetfilename, names=names, skip_header=1, delimiter=' ') features = np.array([data['fet%d' % n] for n in range(nbFeatures)]) return data['spike_time'], features.transpose() def _load_unit_id(self, clufilename): with open(clufilename, mode='r') as f: nbClusters = int(f.readline().strip()) cluster_names = f.readlines() try: cluster_ids = [int(name) for name in cluster_names] except ValueError: raise ValueError( "Could not convert cluster name to integer in %s" % clufilename) cluster_ids = np.array(cluster_ids, dtype=np.int32) if len(np.unique(cluster_ids)) != nbClusters: logging.warning("warning: I got %d clusters instead of %d in %s" % ( len(np.unique(cluster_ids)), nbClusters, clufilename)) return cluster_ids def write_block(self, block): if self.basename is None: logging.warning("warning: no basename provided, using `basename`") self.basename = 'basename' self._make_all_file_handles(block) self._group2features = {} for seg in block.segments: for st in seg.spiketrains: group = self.st2group(st) fetfilehandle = self._fetfilehandles[group] clufilehandle = self._clufilehandles[group] cluster = self.st2cluster(st) try: sr = st.annotations['sampling_rate'] except KeyError: sr = self.sampling_rate spike_times_in_samples = np.rint( np.array(st) * sr).astype(np.int64) try: all_features = st.annotations['waveform_features'] except KeyError: all_features = [ [] for _ in range(len(spike_times_in_samples))] all_features = np.asarray(all_features) if all_features.ndim != 2: raise ValueError("waveform features should be 2d array") try: n_features = self._group2features[group] except KeyError: n_features = all_features.shape[1] self._group2features[group] = n_features fetfilehandle.write("%d\n" % n_features) if n_features != all_features.shape[1]: raise ValueError("inconsistent number of features: " + "supposed to be %d but I got %d" % (n_features, all_features.shape[1])) for stt, features in zip(spike_times_in_samples, all_features): for val in features: fetfilehandle.write(str(val)) fetfilehandle.write(" ") fetfilehandle.write("%d\n" % stt) clufilehandle.write("%d\n" % cluster) self._close_all_files() def st2group(self, st): try: return st.annotations['group'] except KeyError: return 0 def st2cluster(self, st): try: return st.annotations['cluster'] except KeyError: return 0 def _make_all_file_handles(self, block): group2clusters = {} for seg in block.segments: for st in seg.spiketrains: group = self.st2group(st) cluster = self.st2cluster(st) if group in group2clusters: if cluster not in group2clusters[group]: group2clusters[group].append(cluster) else: group2clusters[group] = [cluster] self._fetfilehandles, self._clufilehandles = {}, {} for group, clusters in group2clusters.items(): self._new_group(group, nbClusters=len(clusters)) def _new_group(self, id_group, nbClusters): fetfilename = os.path.join(self.filename, self.basename + ('.fet.%d' % id_group)) clufilename = os.path.join(self.filename, self.basename + ('.clu.%d' % id_group)) if os.path.exists(fetfilename): shutil.copyfile(fetfilename, fetfilename + '~') if os.path.exists(clufilename): shutil.copyfile(clufilename, clufilename + '~') self._fetfilehandles[id_group] = open(fetfilename, mode='w') self._clufilehandles[id_group] = open(clufilename, mode='w') self._clufilehandles[id_group].write("%d\n" % nbClusters) def _close_all_files(self): for val in self._fetfilehandles.values(): val.close() for val in self._clufilehandles.values(): val.close() class FilenameParser: def __init__(self, dirname, basename=None): self.dirname = os.path.normpath(dirname) self.basename = basename if not os.path.isdir(self.dirname): raise ValueError("filename must be a directory")
BSD 3-Clause New or Revised License
paddlepaddle/paddle
python/paddle/fluid/contrib/mixed_precision/fp16_utils.py
fp16_guard
python
def fp16_guard(): with framework.name_scope(prefix=_fp16_guard_pattern): yield
As for the pure fp16 training, if users set `use_fp16_guard` to True, only those ops created in the context manager `fp16_guard` will be transformed as float16 type. Examples: .. code-block:: python import numpy as np import paddle import paddle.nn.functional as F paddle.enable_static() data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) with paddle.static.amp.fp16_guard(): bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") pool = F.max_pool2d(bn, kernel_size=2, stride=2) hidden = paddle.static.nn.fc(pool, size=10) loss = paddle.mean(hidden)
https://github.com/paddlepaddle/paddle/blob/056b87414880e0520bb4560fc40d5b62db9c5175/python/paddle/fluid/contrib/mixed_precision/fp16_utils.py#L334-L357
from __future__ import print_function from ... import core from ... import framework from ... import layers from ... import global_scope from ...log_helper import get_logger from ...wrapped_decorator import signature_safe_contextmanager from .fp16_lists import AutoMixedPrecisionLists import collections import logging import numpy as np __all__ = ["fp16_guard", "cast_model_to_fp16", "cast_parameters_to_fp16"] _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s') _valid_types = [ core.VarDesc.VarType.LOD_TENSOR, core.VarDesc.VarType.SELECTED_ROWS, core.VarDesc.VarType.LOD_TENSOR_ARRAY ] _fp16_guard_pattern = "__use_fp16__" def _rename_arg(op, old_name, new_name): op_desc = op.desc if isinstance(op_desc, tuple): op_desc = op_desc[0] op_desc._rename_input(old_name, new_name) op_desc._rename_output(old_name, new_name) def _rename_op_input(program, op_var_rename_map, origin_ops, keep_fp32_ops): for block in program.blocks: ops = block.ops block_id = block.idx for op in ops: if op not in origin_ops or op in keep_fp32_ops: continue for name in op.input_arg_names: if name in op_var_rename_map[block_id]: op._rename_input(name, op_var_rename_map[block_id][name]) def _dtype_to_str(dtype): if dtype == core.VarDesc.VarType.FP16: return 'fp16' else: return 'fp32' def _keep_fp32_input(op, in_name): op_type = op.type if op_type in ['batch_norm', 'layer_norm']: return in_name != 'X' if op_type == 'fused_bn_add_activation': return in_name not in {'X', 'Z'} if op_type == 'resnet_unit': return in_name not in {'X', 'FilterX', 'Z', 'FilterZ'} return False def _keep_fp32_output(op, out_name): op_type = op.type if op_type in ['batch_norm', 'fused_bn_add_activation', 'layer_norm']: return out_name != 'Y' if op_type == 'resnet_unit': return out_name not in {'Y', 'ConvX', 'ConvZ'} return False def _insert_cast_op(block, op, idx, src_dtype, dest_dtype): num_cast_ops = 0 for in_name in op.input_names: if src_dtype == core.VarDesc.VarType.FP32 and _keep_fp32_input(op, in_name): continue for in_var_name in op.input(in_name): in_var = block._find_var_recursive(in_var_name) if in_var.type not in _valid_types or in_var.dtype == dest_dtype: continue if in_var.dtype == src_dtype: cast_name = in_var.name + '.cast_' + _dtype_to_str(dest_dtype) out_var = block.vars.get(cast_name) if out_var is None or out_var.dtype != dest_dtype: op_device = op.attr('op_device') if src_dtype == core.VarDesc.VarType.FP32 and in_var.stop_gradient: prev_op = None if in_var.op is op: prev_op = find_true_prev_op(block.ops, op, in_var_name) elif in_var.op is not None: prev_op = in_var.op prev_op_device = None if prev_op is not None: prev_op_device = prev_op.attr('op_device') if prev_op_device is not None and 'all' in prev_op_device: op_device = prev_op_device out_var = block.create_var( name=cast_name, dtype=dest_dtype, persistable=False, stop_gradient=in_var.stop_gradient) block._insert_op_without_sync( idx, type="cast", inputs={"X": in_var}, outputs={"Out": out_var}, attrs={ "in_dtype": in_var.dtype, "out_dtype": out_var.dtype, "op_device": op_device }) num_cast_ops += 1 _rename_arg(op, in_var.name, out_var.name) else: if op.has_attr('in_dtype'): op._set_attr('in_dtype', dest_dtype) if src_dtype == core.VarDesc.VarType.FP32 and dest_dtype == core.VarDesc.VarType.FP16: for out_name in op.output_names: if _keep_fp32_output(op, out_name): continue for out_var_name in op.output(out_name): out_var = block.var(out_var_name) if out_var.type not in _valid_types: continue if out_var.dtype == core.VarDesc.VarType.FP32: out_var.desc.set_dtype(core.VarDesc.VarType.FP16) if op.has_attr('out_dtype'): op._set_attr('out_dtype', core.VarDesc.VarType.FP16) return num_cast_ops def _insert_cast_post_op(block, op, idx, src_dtype, dest_dtype, target_name, op_var_rename_map): num_cast_ops = 0 target_var = block.var(target_name) if target_var.type not in _valid_types or target_var.dtype == dest_dtype: return num_cast_ops assert target_var.dtype == src_dtype, "The real dtype({}) is not equal to the src dtype({})".format( _dtype_to_str(target_var.dtype), _dtype_to_str(src_dtype)) cast_name = target_var.name + '.cast_' + _dtype_to_str(dest_dtype) cast_var = block.vars.get(cast_name) if cast_var is None or cast_var.dtype != dest_dtype: cast_var = block.create_var( name=cast_name, dtype=dest_dtype, persistable=False, stop_gradient=target_var.stop_gradient) block._insert_op( idx, type="cast", inputs={"X": target_var}, outputs={"Out": cast_var}, attrs={ "in_dtype": target_var.dtype, "out_dtype": cast_var.dtype, "op_device": op.attr("op_device") }) num_cast_ops += 1 op_var_rename_map[block.idx][target_var.name] = cast_var.name return num_cast_ops def find_true_prev_op(ops, cur_op, var_name): prev_op = [] for op in ops: if op == cur_op: break for out_name in op.output_names: for out_var_name in op.output(out_name): if out_var_name == var_name: prev_op.append(op) if prev_op: if not len(prev_op) == 1: raise ValueError("There must be only one previous op " "that outputs {0} variable".format(var_name)) else: return prev_op[0] return None def find_true_post_op(ops, cur_op, var_name, search_all=False): post_op = [] if search_all: idx = -1 else: for idx, op in enumerate(ops): if op == cur_op: break for i in range(idx + 1, len(ops)): op = ops[i] for in_name in op.input_names: for in_var_name in op.input(in_name): if in_var_name == var_name: post_op.append(op) return post_op def find_op_index(block_desc, cur_op_desc): for idx in range(block_desc.op_size()): if cur_op_desc == block_desc.op(idx): return idx return -1 def _is_in_black_varnames(op, amp_lists): for in_name in op.input_arg_names: if in_name in amp_lists.black_varnames: return True for out_name in op.output_arg_names: if out_name in amp_lists.black_varnames: return True return False def _need_keep_fp32(op, unsupported_op_list, use_fp16_guard): if op.type in unsupported_op_list: return True in_out_arg_names = [] in_out_arg_names.extend(list(op.input_arg_names)) in_out_arg_names.extend(list(op.output_arg_names)) for name in in_out_arg_names: if "learning_rate" in name: return True if use_fp16_guard: if op.has_attr("op_namescope") and (_fp16_guard_pattern in op.attr("op_namescope")): return False else: return True else: return False @signature_safe_contextmanager
Apache License 2.0
mklan/nx-rom-market
lib/python3.5/enum.py
EnumMeta.__call__
python
def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): if names is None: return cls.__new__(cls, value) return cls._create_(value, names, module=module, qualname=qualname, type=type, start=start)
Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='red green blue')). When used for the functional API: `value` will be the name of the new class. `names` should be either a string of white-space/comma delimited names (values will start at `start`), or an iterator/mapping of name, value pairs. `module` should be set to the module this class is being created in; if it is not set, an attempt to find that module will be made, but if it fails the class will not be picklable. `qualname` should be set to the actual location this class can be found at in its module; by default it is set to the global scope. If this is not correct, unpickling will fail in some circumstances. `type`, if set, will be mixed in as the first base class.
https://github.com/mklan/nx-rom-market/blob/33613d2177b63df9e0568038ffdf1dd91ad334d8/lib/python3.5/enum.py#L215-L243
import sys from collections import OrderedDict from types import MappingProxyType, DynamicClassAttribute __all__ = ['Enum', 'IntEnum', 'unique'] def _is_descriptor(obj): return ( hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')) def _is_dunder(name): return (name[:2] == name[-2:] == '__' and name[2:3] != '_' and name[-3:-2] != '_' and len(name) > 4) def _is_sunder(name): return (name[0] == name[-1] == '_' and name[1:2] != '_' and name[-2:-1] != '_' and len(name) > 2) def _make_class_unpicklable(cls): def _break_on_call_reduce(self, proto): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>' class _EnumDict(dict): def __init__(self): super().__init__() self._member_names = [] def __setitem__(self, key, value): if _is_sunder(key): raise ValueError('_names_ are reserved for future Enum use') elif _is_dunder(key): pass elif key in self._member_names: raise TypeError('Attempted to reuse key: %r' % key) elif not _is_descriptor(value): if key in self: raise TypeError('Key already defined as: %r' % self[key]) self._member_names.append(key) super().__setitem__(key, value) Enum = None class EnumMeta(type): @classmethod def __prepare__(metacls, cls, bases): return _EnumDict() def __new__(metacls, cls, bases, classdict): member_type, first_enum = metacls._get_mixins_(bases) __new__, save_new, use_args = metacls._find_new_(classdict, member_type, first_enum) members = {k: classdict[k] for k in classdict._member_names} for name in classdict._member_names: del classdict[name] invalid_names = set(members) & {'mro', } if invalid_names: raise ValueError('Invalid enum member name: {0}'.format( ','.join(invalid_names))) if '__doc__' not in classdict: classdict['__doc__'] = 'An enumeration.' enum_class = super().__new__(metacls, cls, bases, classdict) enum_class._member_names_ = [] enum_class._member_map_ = OrderedDict() enum_class._member_type_ = member_type base_attributes = {a for b in enum_class.mro() for a in b.__dict__} enum_class._value2member_map_ = {} if '__reduce_ex__' not in classdict: if member_type is not object: methods = ('__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__') if not any(m in member_type.__dict__ for m in methods): _make_class_unpicklable(enum_class) for member_name in classdict._member_names: value = members[member_name] if not isinstance(value, tuple): args = (value, ) else: args = value if member_type is tuple: args = (args, ) if not use_args: enum_member = __new__(enum_class) if not hasattr(enum_member, '_value_'): enum_member._value_ = value else: enum_member = __new__(enum_class, *args) if not hasattr(enum_member, '_value_'): enum_member._value_ = member_type(*args) value = enum_member._value_ enum_member._name_ = member_name enum_member.__objclass__ = enum_class enum_member.__init__(*args) for name, canonical_member in enum_class._member_map_.items(): if canonical_member._value_ == enum_member._value_: enum_member = canonical_member break else: enum_class._member_names_.append(member_name) if member_name not in base_attributes: setattr(enum_class, member_name, enum_member) enum_class._member_map_[member_name] = enum_member try: enum_class._value2member_map_[value] = enum_member except TypeError: pass for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): class_method = getattr(enum_class, name) obj_method = getattr(member_type, name, None) enum_method = getattr(first_enum, name, None) if obj_method is not None and obj_method is class_method: setattr(enum_class, name, enum_method) if Enum is not None: if save_new: enum_class.__new_member__ = __new__ enum_class.__new__ = Enum.__new__ return enum_class def __bool__(self): return True
MIT License
michaelaquilina/python-tools
lib/jedi/api/classes.py
Completion.description
python
def description(self): if self._definition is None: return '' t = self.type if t == 'statement' or t == 'import': desc = self._definition.get_code() else: desc = '.'.join(unicode(p) for p in self._path()) line = '' if self.in_builtin_module else '@%s' % self.line return '%s: %s%s' % (t, desc, line)
Provide a description of the completion object.
https://github.com/michaelaquilina/python-tools/blob/2fbee20f9ce286ba55050adafcea8bb43c0922b3/lib/jedi/api/classes.py#L421-L432
import warnings from itertools import chain import re from jedi._compatibility import unicode, use_metaclass from jedi import settings from jedi import common from jedi.parser import tree from jedi.evaluate.cache import memoize_default, CachedMetaClass from jedi.evaluate import representation as er from jedi.evaluate import iterable from jedi.evaluate import imports from jedi.evaluate import compiled from jedi.api import keywords from jedi.evaluate.finder import filter_definition_names def defined_names(evaluator, scope): dct = scope.names_dict names = list(chain.from_iterable(dct.values())) names = filter_definition_names(names, scope) return [Definition(evaluator, d) for d in sorted(names, key=lambda s: s.start_pos)] class BaseDefinition(object): _mapping = { 'posixpath': 'os.path', 'riscospath': 'os.path', 'ntpath': 'os.path', 'os2emxpath': 'os.path', 'macpath': 'os.path', 'genericpath': 'os.path', 'posix': 'os', '_io': 'io', '_functools': 'functools', '_sqlite3': 'sqlite3', '__builtin__': '', 'builtins': '', } _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { 'argparse._ActionsContainer': 'argparse.ArgumentParser', '_sre.SRE_Match': 're.MatchObject', '_sre.SRE_Pattern': 're.RegexObject', }.items()) def __init__(self, evaluator, name): self._evaluator = evaluator self._name = name self._definition = evaluator.wrap(self._name.get_definition()) self.is_keyword = isinstance(self._definition, keywords.Keyword) self._module = name.get_parent_until() if self.in_builtin_module(): self.module_path = None else: self.module_path = self._module.path @property def name(self): return unicode(self._name) @property def start_pos(self): warnings.warn("Use line/column instead.", DeprecationWarning) return self._name.start_pos @property def type(self): stripped = self._definition if isinstance(stripped, er.InstanceElement): stripped = stripped.var if isinstance(stripped, compiled.CompiledObject): return stripped.api_type() elif isinstance(stripped, iterable.Array): return 'instance' elif isinstance(stripped, tree.Import): return 'import' string = type(stripped).__name__.lower().replace('wrapper', '') if string == 'exprstmt': return 'statement' else: return string def _path(self): path = [] par = self._definition while par is not None: if isinstance(par, tree.Import): path += imports.ImportWrapper(self._evaluator, self._name).import_path break try: name = par.name except AttributeError: pass else: if isinstance(par, er.ModuleWrapper): path[0:0] = par.py__name__().split('.') break else: path.insert(0, unicode(name)) par = par.parent return path @property def module_name(self): return str(self._module.name) def in_builtin_module(self): return isinstance(self._module, compiled.CompiledObject) @property def line(self): if self.in_builtin_module(): return None return self._name.start_pos[0] @property def column(self): if self.in_builtin_module(): return None return self._name.start_pos[1] def docstring(self, raw=False): if raw: return _Help(self._definition).raw() else: return _Help(self._definition).full() @property def doc(self): warnings.warn("Use docstring() instead.", DeprecationWarning) return self.docstring() @property def raw_doc(self): warnings.warn("Use docstring() instead.", DeprecationWarning) return self.docstring(raw=True) @property def description(self): return unicode(self._name) @property def full_name(self): path = [unicode(p) for p in self._path()] if not path: return None with common.ignored(KeyError): path[0] = self._mapping[path[0]] for key, repl in self._tuple_mapping.items(): if tuple(path[:len(key)]) == key: path = [repl] + path[len(key):] return '.'.join(path if path[0] else path[1:]) def goto_assignments(self): defs = self._evaluator.goto(self._name) return [Definition(self._evaluator, d) for d in defs] @memoize_default() def _follow_statements_imports(self): if self._definition.isinstance(tree.ExprStmt): return self._evaluator.eval_statement(self._definition) elif self._definition.isinstance(tree.Import): return imports.ImportWrapper(self._evaluator, self._name).follow() else: return [self._definition] @property @memoize_default() def params(self): followed = self._follow_statements_imports() if not followed or not hasattr(followed[0], 'py__call__'): raise AttributeError() followed = followed[0] if followed.type == 'funcdef': if isinstance(followed, er.InstanceElement): params = followed.params[1:] else: params = followed.params elif followed.isinstance(er.compiled.CompiledObject): params = followed.params else: try: sub = followed.get_subscope_by_name('__init__') params = sub.params[1:] except KeyError: return [] return [_Param(self._evaluator, p.name) for p in params] def parent(self): scope = self._definition.get_parent_scope() scope = self._evaluator.wrap(scope) return Definition(self._evaluator, scope.name) def __repr__(self): return "<%s %s>" % (type(self).__name__, self.description) class Completion(BaseDefinition): def __init__(self, evaluator, name, needs_dot, like_name_length): super(Completion, self).__init__(evaluator, name) self._needs_dot = needs_dot self._like_name_length = like_name_length self._same_name_completions = [] def _complete(self, like_name): dot = '.' if self._needs_dot else '' append = '' if settings.add_bracket_after_function and self.type == 'Function': append = '(' if settings.add_dot_after_module: if isinstance(self._definition, tree.Module): append += '.' if isinstance(self._definition, tree.Param): append += '=' name = str(self._name) if like_name: name = name[self._like_name_length:] return dot + name + append @property def complete(self): return self._complete(True) @property def name_with_symbols(self): return self._complete(False) @property
MIT License
tensorflow/transform
tensorflow_transform/graph_context.py
TFGraphContext.get_module_to_export
python
def get_module_to_export(cls) -> Optional[tf.Module]: return cls._get_current_state().module_to_export
Retrieves the value of module_to_export. None if called outside a TFGraphContext scope. Returns: A tf.Module object
https://github.com/tensorflow/transform/blob/6349d7f6d847cb8979f31b9b315981d79ffba3e5/tensorflow_transform/graph_context.py#L121-L129
import os import threading from typing import Any, Dict, Optional import tensorflow as tf from tfx_bsl.types import tfx_namedtuple class TFGraphContext: class _State( tfx_namedtuple.namedtuple('_State', [ 'module_to_export', 'temp_dir', 'evaluated_replacements', ])): @classmethod def make_empty(cls): return cls(*(None,) * len(cls._fields)) _TEMP_SUBDIR = 'analyzer_temporary_assets' _thread_local = threading.local() def __init__(self, module_to_export: tf.Module, temp_dir: Optional[str] = None, evaluated_replacements: Optional[Dict[str, Any]] = None): self._module_to_export = module_to_export self._temp_dir = temp_dir self._evaluated_replacements = evaluated_replacements def __enter__(self): assert getattr(self._thread_local, 'current_state', None) is None self._thread_local.current_state = self._State( module_to_export=self._module_to_export, temp_dir=self._temp_dir, evaluated_replacements=self._evaluated_replacements) def __exit__(self, *exn_info): self._thread_local.current_state = None @property def module_to_export(self): return self._module_to_export @classmethod def _get_current_state(cls) -> 'TFGraphContext._State': if hasattr(cls._thread_local, 'current_state'): return cls._thread_local.current_state return cls._State.make_empty() @classmethod def get_or_create_temp_dir(cls) -> Optional[str]: current_state = cls._get_current_state() if current_state.temp_dir is None: return None if not current_state.temp_dir: raise ValueError('A temp dir was requested, but empty temp_dir was set. ' 'Use the TFGraphContext context manager.') result = os.path.join(current_state.temp_dir, cls._TEMP_SUBDIR) tf.io.gfile.makedirs(result) return result @classmethod def get_evaluated_replacements(cls) -> Optional[Dict[str, Any]]: return cls._get_current_state().evaluated_replacements @classmethod
Apache License 2.0
verashira/tspnet
fairseq/models/transformer_from_sign.py
TransformerEncoderSign.upgrade_state_dict_named
python
def upgrade_state_dict_named(self, state_dict, name): if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: logger.info('deleting {0}'.format(weights_key)) del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict
Upgrade a (possibly old) state dict for new versions of fairseq.
https://github.com/verashira/tspnet/blob/ee454165dcc61cdbbff19565364e2221727ed2b8/fairseq/models/transformer_from_sign.py#L434-L452
from collections import namedtuple import logging import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, FairseqDecoder) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, TransformerDecoderLayer, TransformerEncoderLayer, ) import random DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 logger = logging.getLogger(__name__) @register_model('transformer_sign') class TransformerModel(FairseqEncoderDecoderModel): @classmethod def hub_models(cls): def moses_subword(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'subword_nmt', } def moses_fastbpe(path): return { 'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe', } return { 'transformer.wmt14.en-fr': moses_subword( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'), 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2', 'transformer.wmt18.en-de': moses_subword( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'), 'transformer.wmt19.en-de': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.en-ru': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'), 'transformer.wmt19.de-en': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'), 'transformer.wmt19.ru-en': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'), 'transformer.wmt19.en-de.single_model': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'), 'transformer.wmt19.en-ru.single_model': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'), 'transformer.wmt19.de-en.single_model': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'), 'transformer.wmt19.ru-en.single_model': moses_fastbpe( 'https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'), } def __init__(self, args, encoder, decoder): super().__init__(encoder, decoder) self.args = args @staticmethod def add_args(parser): parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--no-cross-attention', default=False, action='store_true', help='do not perform cross-attention') parser.add_argument('--cross-self-attention', default=False, action='store_true', help='perform cross+self-attention') parser.add_argument('--layer-wise-attention', default=False, action='store_true', help='perform layer-wise attention (cross-attention or cross+self-attention)') parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for encoder') parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder') parser.add_argument('--encoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list') parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding') parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings') parser.add_argument('--src-embed-dim', type=int, metavar='N', help='source input feature dimension') parser.add_argument('--early-proj', action='store_true', help='if True, apply a linear layer on the input embedding before feeding into encoder.') @classmethod def build_model(cls, args, task): base_architecture(args) if args.encoder_layers_to_keep: args.encoder_layers = len(args.encoder_layers_to_keep.split(",")) if args.decoder_layers_to_keep: args.decoder_layers = len(args.decoder_layers_to_keep.split(",")) if getattr(args, 'max_source_positions', None) is None: args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if getattr(args, 'max_target_positions', None) is None: args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS tgt_dict = task.target_dictionary def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) logging.info('Loaded pretrained embeddings from {}'.format(path)) return emb decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) encoder = cls.build_encoder(args) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return cls(args, encoder, decoder) @classmethod def build_encoder(cls, args): return TransformerEncoderSign(args) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder( args, tgt_dict, embed_tokens, no_encoder_attn=getattr(args, 'no_cross_attention', False), ) EncoderOut = namedtuple('TransformerEncoderOut', [ 'encoder_out', 'encoder_padding_mask', 'encoder_embedding', 'encoder_states', ]) class TransformerEncoderSign(FairseqEncoder): def __init__(self, args): super().__init__(dict()) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.encoder_layerdrop = args.encoder_layerdrop embed_dim = args.encoder_embed_dim self.max_source_positions = args.max_source_positions self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, padding_idx=0, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layer_wise_attention = getattr(args, 'layer_wise_attention', False) self.early_proj = args.early_proj if self.early_proj: self.inp_fc = Linear(args.src_embed_dim, embed_dim) self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None if getattr(args, 'layernorm_embedding', False): self.layernorm_embedding = LayerNorm(embed_dim) else: self.layernorm_embedding = None def forward_embedding(self, src_embed, pseudo_src_tokens): x = embed = self.embed_scale * src_embed if self.embed_positions is not None: x = embed + self.embed_positions(pseudo_src_tokens) if self.layernorm_embedding: x = self.layernorm_embedding(x) x = F.dropout(x, p=self.dropout, training=self.training) return x, embed def forward(self, src_tokens, src_lengths, cls_input=None, return_all_hiddens=False, **kwargs): if self.layer_wise_attention: return_all_hiddens = True if self.early_proj: src_tokens = self.inp_fc(src_tokens) encoder_padding_mask = kwargs['encoder_padding_mask'] A = kwargs['adjacent_matrix'] As = kwargs['candidate_matrices'] encoder_states = [] if return_all_hiddens else None x_lvls, encoder_padding_mask_lvls = [], [] for src_token_lv, encoder_padding_mask_lv in zip(src_tokens, encoder_padding_mask): pseudo_src_tokens_lv = 1 - encoder_padding_mask_lv.int() x_lv, encoder_embedding_lv = self.forward_embedding(src_embed=src_token_lv, pseudo_src_tokens=pseudo_src_tokens_lv) x_lvls.append(x_lv) encoder_padding_mask_lvls.append(encoder_padding_mask_lv) x = torch.cat(x_lvls, dim=1) x = x.transpose(0, 1) encoder_padding_mask = torch.cat(encoder_padding_mask_lvls, dim=1) reverse_link = 1 - A for layer in self.layers: dropout_probability = random.uniform(0, 1) if not self.training or (dropout_probability > self.encoder_layerdrop): x = layer(x, encoder_padding_mask=encoder_padding_mask, attn_mask=reverse_link, ) if return_all_hiddens: encoder_states.append(x) x = x[:src_tokens[0].shape[1]] encoder_embedding = x.transpose(0, 1) encoder_padding_mask = encoder_padding_mask_lvls[0] if self.layer_norm: x = self.layer_norm(x) if return_all_hiddens: encoder_states[-1] = x return EncoderOut( encoder_out=x, encoder_padding_mask=encoder_padding_mask, encoder_embedding=encoder_embedding, encoder_states=encoder_states, ) def reorder_encoder_out(self, encoder_out, new_order): if encoder_out.encoder_out is not None: encoder_out = encoder_out._replace( encoder_out=encoder_out.encoder_out.index_select(1, new_order) ) if encoder_out.encoder_padding_mask is not None: encoder_out = encoder_out._replace( encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order) ) if encoder_out.encoder_embedding is not None: encoder_out = encoder_out._replace( encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order) ) if encoder_out.encoder_states is not None: for idx, state in enumerate(encoder_out.encoder_states): encoder_out.encoder_states[idx] = state.index_select(1, new_order) return encoder_out def max_positions(self): if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim]
MIT License
drexly/openhgsenti
lib/django/contrib/gis/gdal/prototypes/geom.py
pnt_func
python
def pnt_func(f): return double_output(f, [c_void_p, c_int])
For accessing point information.
https://github.com/drexly/openhgsenti/blob/d7806f58c81127d32091d9875a99ac13aef94a8a/lib/django/contrib/gis/gdal/prototypes/geom.py#L21-L23
from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p from django.contrib.gis.gdal.envelope import OGREnvelope from django.contrib.gis.gdal.libgdal import lgdal from django.contrib.gis.gdal.prototypes.errcheck import check_envelope from django.contrib.gis.gdal.prototypes.generation import ( const_string_output, double_output, geom_output, int_output, srs_output, string_output, void_output, ) def env_func(f, argtypes): f.argtypes = argtypes f.restype = None f.errcheck = check_envelope return f
Apache License 2.0
swd-bits-goa/swd_django
swd/main/models.py
VacationDatesFill.check_start_end_dates_in_range
python
def check_start_end_dates_in_range(self, dateTimeStart, dateTimeEnd): first_cond = self.check_date_in_range(dateTimeStart) and self.check_date_in_range(dateTimeEnd) return first_cond and dateTimeStart < dateTimeEnd
Checks whether both start and end date time objects are in range and start date less than end date params: dateTimeStart, dateTimeEnd: datetime object
https://github.com/swd-bits-goa/swd_django/blob/1fafb657f0a7cb7f7ac66bde789eeb336f8df065/swd/main/models.py#L580-L590
from django.db import models from django.contrib.auth.models import User import os import hashlib import re from django.utils import timezone from datetime import datetime from datetime import date try: from tools.dev_info import SALT_IMG as SALT except ModuleNotFoundError: SALT = '1234567890' MESS_CHOICES = ( ('A','Dining Hall A'), ('C','Dining Hall C'), ('D','Dining Hall D')) CONSENT_CHOICES = ( ('Letter', 'Letter'), ('Fax', 'Fax'), ('Email', 'Email') ) BONAFIDE_REASON_CHOICES = ( ('Bank Loan', 'Bank Loan'), ('Passport', 'Passport'), ('Other', 'Other')) BONAFIDE_STATUS_CHOICES = ( ('Approved', 'Approved'), ('Pending', 'Pending'), ('Rejected', 'Rejected') ) BRANCH = { 'A1': 'B.E. Chemical Engineering', 'A3': 'B.E. Electrical and Electronics Engineering', 'A4': 'B.E. Mechanical Engineering', 'A7': 'B.E. Computer Science', 'A8': 'B.E. Electronics and Instrumentation Engineering', 'B1': 'M.Sc. Biology', 'B2': 'M.Sc. Chemistry', 'B3': 'M.Sc. Economics', 'B4': 'M.Sc. Mathematics', 'B5': 'M.Sc. Physics', 'AA': 'B.E. Electronics and Communication Engineering', 'PH': 'PhD.', 'H1': 'M.E. Computer Science', } ME = { 'H101':'M.E. Chemical', 'H103':'M.E. Computer Science', 'H112':'M.E. Software Systems', 'H123':'M.E. Microelectronics', 'H129':'M.E. biotechnology', 'H140':'M.E. Embedded Systems', 'H141':'M.E. Design Engineering', 'H106':'M.E. Mechanical', 'H151':'M.E. Sanitation Science, Technology and Management', 'H152':'M.Phil. In Liberal Studies', } YEARNAMES = { 1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', } STUDENT_STATUS = ( ('Student', 'Student'), ('Thesis', 'Thesis'), ('PS2', 'PS2'), ('Graduate', 'Graduate')) HOSTELS = ( ('AH1', 'AH1'), ('AH2', 'AH2'), ('AH3', 'AH3'), ('AH4', 'AH4'), ('AH5', 'AH5'), ('AH6', 'AH6'), ('AH7', 'AH7'), ('AH8', 'AH8'), ('AH9', 'AH9'), ('CH1', 'CH1'), ('CH2', 'CH2'), ('CH3', 'CH3'), ('CH4', 'CH4'), ('CH5', 'CH5'), ('CH6', 'CH6'), ('CH7', 'CH7'), ('DH1', 'DH1'), ('DH2', 'DH2'), ('DH3', 'DH3'), ('DH4', 'DH4'), ) class Warden(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=50, null=True, blank=True) chamber = models.CharField(max_length=15, null=True, blank=True) residence = models.CharField(max_length=10, null=True, blank=True) phone_off = models.CharField(max_length=15, null=True, blank=True) phone_res = models.CharField(max_length=15, null=True, blank=True) email = models.EmailField(null=True, blank=True) hostel = models.CharField(max_length=5, choices=HOSTELS, null=True, blank=True) def __str__(self): return self.hostel + ' ' + self.name + ' ' + self.email + ' ' + self.chamber class HostelSuperintendent(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) name = models.CharField(max_length=50, null=True, blank=True) email = models.EmailField(null=True, blank=True) hostel = models.TextField(null=True, blank=True) chamber = models.CharField(max_length = 10, null=True, blank=True) office_ph = models.CharField(max_length = 12, null = True, blank=True) residence_ph = models.CharField(max_length = 12, null = True, blank=True) chamber = models.CharField(max_length=15, null=True, blank=True) phone_off = models.CharField(max_length=15, null=True, blank=True) phone_res = models.CharField(max_length=15, null=True, blank=True) def __str__(self): return self.hostel + ' ' + self.name + ' ' + self.email class Staff(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=50) staffType = models.CharField(max_length=20) phone = models.CharField(max_length=15) def __str__(self): return self.staffType + ' ' + self.name class Student(models.Model): def hash_upload(instance, filename): ext = filename.split('.')[-1] tempname = (SALT+instance.bitsId).encode('utf-8') return '{}.{}'.format(hashlib.md5(tempname).hexdigest(), ext) user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=50) bitsId = models.CharField(max_length=15) gender = models.CharField(max_length=1, blank=True) bDay = models.DateField(blank=True, null=True) profile_picture=models.FileField(upload_to=hash_upload, blank=True, null=True) phone = models.CharField(max_length=15, blank=True, null=True) email = models.EmailField(blank=True, null=True) address = models.TextField(blank=True, null=True) bloodGroup = models.CharField(max_length=10, blank=True, null=True) cgpa = models.FloatField(blank=True, null=True) admit = models.DateField(blank=True, null=True) parentName = models.CharField(max_length=50, blank=True, null=True) parentPhone = models.CharField(max_length=20, blank=True, null=True) parentEmail = models.CharField(max_length=50, blank=True, null=True) bank_account_no = models.CharField(max_length=30, blank=True, null=True) def nophd(self): return re.match(r"^20\d{2}(PHX[PF]|PH\d{2})\d{3,4}G$", self.bitsId, flags=re.IGNORECASE) def __str__(self): return self.bitsId + ' (' + self.name + ')' def render_parentPhone(self): if self.parentPhone: s = self.parentPhone for wild in ['/', ',']: s = (s).split(wild) if len(s) > 1: s = ' / '.join(s) else: s = s[0] return s else: return '' def change_cgpa(self, new_cg): if ((new_cg > 0.0) and (new_cg <= 10.0)): self.cgpa = new_cg self.save() return True else: return False class DayScholar(models.Model): student = models.OneToOneField('Student', on_delete = models.CASCADE) def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' class HostelPS(models.Model): student = models.OneToOneField('Student', on_delete = models.CASCADE, related_name='hostelps') acadstudent = models.BooleanField() status = models.CharField(max_length=10, choices=STUDENT_STATUS) psStation = models.TextField(null=True, blank=True) hostel = models.TextField(null=True, blank=True) room = models.CharField(max_length = 7, null=True, blank=True) def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' def save(self, *args, **kwargs): if self.acadstudent == True: self.status = 'Student' super(HostelPS, self).save(*args, **kwargs) class CSA(models.Model): student = models.OneToOneField('Student', on_delete = models.CASCADE) title = models.CharField(max_length=20) email = models.EmailField() pic = models.ImageField(blank=True, null=True) priority=models.IntegerField(blank=True, null=True) def __str__(self): return self.title + ' ' + self.student.name + ' ' + self.email class MessOption(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) monthYear = models.DateField() mess = models.CharField(max_length=1, choices=MESS_CHOICES) def __str__(self): return self.mess + ' ' + self.student.bitsId class Bonafide(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) reason = models.CharField(max_length=20, choices=BONAFIDE_REASON_CHOICES) otherReason = models.TextField(null=True, blank=True) reqDate = models.DateField() printed = models.BooleanField(default=0, blank=True) status = models.CharField(max_length=20, choices=BONAFIDE_STATUS_CHOICES, default= 'Pending') text = models.TextField(default='', blank=True) rejectedReason = models.TextField(default='', blank=True) def createText(self): gender = "Mr. " if self.student.gender.lower() == 'm' else "Ms. " pronoun = "He " if gender=="Mr. " else "She " firstDeg=self.student.bitsId[4:6] secondDeg=self.student.bitsId[6:8] res=HostelPS.objects.get(student=self.student) branch = BRANCH[firstDeg] if secondDeg != 'PS' and firstDeg != 'H1' and firstDeg != 'PH': branch = branch +' and '+ BRANCH[secondDeg] if firstDeg == 'H1': branch = ME[self.student.bitsId[4:8]] yearNum=self.reqDate.year-int(self.student.bitsId[0:4]) + 1 if(self.reqDate.month <8): yearNum=yearNum-1 yearName=YEARNAMES[yearNum] date_admit = res.student.admit.strftime('%d/%m/%Y') today = date.today() if (today.month<8): year = today.year - 1 else: year = today.year reason = self.otherReason if self.reason.lower()=='other' else self.reason if(res.status == "Student"): return '''&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;This is to certify that <i style="font-family: Monotype Corsiva">''' + gender + self.student.name.upper() + '''</i>, ID No. <i style="font-family: Monotype Corsiva">''' + self.student.bitsId + '''</i> is a bonafide student of '''+ yearName + ''' year class. ''' + pronoun+ ''' was admitted to the institute on ''' + str(date_admit) + ''', for pursuing the <i style="font-family: Monotype Corsiva">'''+ branch + '''</i> programme of studies. ''' +pronoun+'''is residing in the Hostel <i style="font-family: Monotype Corsiva">'''+res.hostel+'''-'''+res.room+'''</i> of this institute. Date of joining the current academic session is 1 August '''+str(year)+'''.<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;This certificate is issued for the purpose of applying for ''' + reason + '''.''' elif(res.status == "Thesis" or res.status == "PS2"): return '''&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;This is to certify that <i>''' + gender + self.student.name.upper() + '''</i>, ID No. <i>''' + self.student.bitsId + '''</i> is a bonafide student of '''+ yearName + ''' year class. ''' + pronoun +''' was admitted to the Institute on ''' + str(date_admit) + ''', for pursuing the <i>'''+ branch +'''</i> programme of studies. '''+ pronoun+ ''' is pursuing <i>''' + res.status + '''</i> at <i>'''+ res.psStation +'''</i> as a part of the academic requirement of BITS-Pilani, Deemed University.<br>This certificate is issued for the purpose of applying for ''' + reason + '''.''' else: return 'Bonafide is invalid for Graduate students' def save(self, *args, **kwargs): if self.text == '': self.text = self.createText() super(Bonafide, self).save(*args, **kwargs) def __str__(self): return self.student.bitsId + ' (' + self.student.name + ') ' + self.reason class Leave(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) dateTimeStart = models.DateTimeField() dateTimeEnd = models.DateTimeField() reason = models.TextField() consent = models.CharField(max_length=10, choices=CONSENT_CHOICES) corrAddress = models.TextField() corrPhone = models.CharField(max_length=15) approvedBy = models.ForeignKey('Warden', blank=True, null=True, on_delete="PROTECT") approved = models.BooleanField(default=0, blank=True) disapproved = models.BooleanField(default=0, blank=True) inprocess = models.BooleanField(default=1, blank=True) comment = models.TextField(default='', blank=True) def __str__(self): return self.student.bitsId + ' '+ self.student.name + ' ' + str(self.id) class DayPass(models.Model): def document_path(instance, filename): ext = filename.split('.')[-1] tempname = (SALT+instance.student.bitsId+str(datetime)).encode('utf-8') return 'documents/{}.{}'.format( hashlib.md5(tempname).hexdigest(), ext) student = models.ForeignKey('Student', on_delete = models.CASCADE) reason = models.TextField() dateTime = models.DateTimeField(null=True, blank=False) inTime = models.DateTimeField(null=True, blank=False) corrAddress = models.TextField() approvedBy = models.ForeignKey('HostelSuperintendent', blank=True, null=True, on_delete="PROTECT") approved = models.BooleanField(default=0, blank=True) disapproved = models.BooleanField(default=0, blank=True) inprocess = models.BooleanField(default=1, blank=True) comment = models.TextField() document = models.FileField(upload_to=document_path, null=True, blank=True) def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' class LateComer(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) dateTime = models.DateTimeField() def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' class InOut(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) place = models.CharField(max_length=20) outDateTime = models.DateTimeField() inDateTime = models.DateTimeField() onCampus = models.BooleanField() onLeave = models.BooleanField() def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' class Disco(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) dateOfViolation = models.DateField(blank = True, null = True) subject = models.TextField() action = models.TextField() def __str__(self): return self.student.bitsId + ' (' + self.student.name + ')' class MessOptionOpen(models.Model): monthYear = models.DateField() dateOpen = models.DateField() dateClose = models.DateField() def __str__(self): return str(self.monthYear.month) + ' Open: ' + str(self.dateOpen) + ' Close: ' + str(self.dateClose) class Transaction(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) timestamp = models.DateTimeField(auto_now=True) def __str__(self): return self.timestamp + ' ' + student.user class MessBill(models.Model): month = models.DateField() amount = models.FloatField() rebate = models.FloatField() class Meta: get_latest_by = "month" def __str__(self): return str(self.month) + ' ' + str(self.amount) + ' ' + str(self.rebate) class TeeAdd(models.Model): title = models.CharField(max_length=30) desc = models.TextField() pic = models.ImageField(blank=True, null=True) price = models.FloatField() nick_price = models.FloatField(blank = True, null = True) nick = models.BooleanField(blank=True) colors = models.CharField(max_length=100, blank=True, null=True) sizes = models.CharField(max_length=100, blank=True, null=True) available = models.BooleanField(default=False) def __str__(self): return self.title + ' - Rs.' + str(self.price) class ItemAdd(models.Model): title = models.CharField(max_length=30) desc = models.TextField() pic = models.ImageField(blank=True, null=True) price = models.FloatField() available = models.BooleanField(default=False) def __str__(self): return self.title + ' - Rs.' + str(self.price) class TeeBuy(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) tee = models.ForeignKey('TeeAdd', on_delete = models.CASCADE) qty = models.IntegerField() nick = models.CharField(max_length=100, blank=True, null=True) color = models.CharField(max_length=10, blank=True, null=True) size = models.CharField(max_length=10, blank=True, null=True) created = models.DateTimeField(auto_now_add=True) totamt = models.FloatField() def __str__(self): return self.student.bitsId + ' ' + self.tee.title def save(self, *args, **kwargs): if self.nick == "": self.totamt = float(self.qty) * float(self.tee.price) else: self.totamt = float(self.qty) * float(self.tee.nick_price) super(TeeBuy, self).save(*args, **kwargs) class ItemBuy(models.Model): student = models.ForeignKey('Student', on_delete = models.CASCADE) item = models.ForeignKey('ItemAdd', on_delete = models.CASCADE) created = models.DateTimeField(auto_now_add=True) def __str__(self): return self.student.bitsId + ' ' + self.item.title class DueCategory(models.Model): name = models.CharField(max_length=100) description = models.CharField(max_length=500) def __str__(self): return "Due category with name {} and description '{}'".format(self.name, self.description) class Due(models.Model): student = models.ForeignKey(Student, on_delete=models.CASCADE) amount = models.FloatField() due_category = models.ForeignKey(DueCategory, on_delete=models.CASCADE) description = models.CharField(max_length=500) date_added = models.DateField() class Meta: verbose_name_plural = "Dues" def __str__(self): return self.student.bitsId + "'s due entry with amount " + str(self.amount) class DuesPublished(models.Model): date_published = models.DateTimeField(auto_now_add=True) def __str__(self): return str(self.date_published) class FileAdd(models.Model): file = models.FileField() link = models.CharField(max_length=200, blank=True, null=True, editable = False, default='/') def __str__(self): return self.link def save(self, *args, **kwargs): self.link = '/media/' + self.file.name super().save(*args, **kwargs) class Notice(models.Model): date = models.DateField(editable=False) title = models.CharField(max_length=100) desc = models.TextField() file = models.ForeignKey(FileAdd, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.desc def save(self, *args, **kwargs): self.date = timezone.now() super().save(*args, **kwargs) class Document(models.Model): title = models.CharField(max_length=100) file = models.ForeignKey(FileAdd, on_delete=models.CASCADE, null=True, blank=True) def __str__(self): return self.title class AntiRagging(models.Model): title = models.CharField(max_length=100) link = models.CharField(max_length=100) def __str__(self): return self.title class VacationDatesFill(models.Model): description = models.CharField(max_length=50) dateOpen = models.DateField( help_text="Students can start filling details from this date (inclusive)") dateClose = models.DateField( help_text="Students can fill details only before this date (inclusive)") allowDateAfter = models.DateTimeField( help_text="Allowed Vacation Dates start from this date (inclusive)") allowDateBefore = models.DateTimeField( help_text="Allowed Vacation Dates end before this (inclusive)") messOption = models.ForeignKey( MessOptionOpen, on_delete=models.CASCADE, default=None, help_text="Mess Option for the months near corresponding Vacation") class Meta: verbose_name = "Vacation Dates Option" verbose_name_plural = "Vacation Dates Option" def __str__(self): return str(self.description) + ' Open: ' + str(self.dateOpen) + ' Close: ' + str(self.dateClose) def check_student_valid(self, student): leaves_count = Leave.objects.filter( student=student, dateTimeStart__gte=self.allowDateAfter, dateTimeEnd__lte=self.allowDateBefore ).count() if leaves_count == 0: return True else: return False def create_vacation(self, student, dateTimeStart, dateTimeEnd): try: leave = Leave(student=student, reason=self.description) leave.dateTimeStart = dateTimeStart leave.dateTimeEnd = dateTimeEnd leave.approved = True leave.disapproved = False leave.inprocess = False leave.comment = "Vacation" leave.save() return True, leave except Exception as e: return False, str(e) def check_date_in_range(self, date): if date.date() <= self.allowDateBefore.date() and date.date() >= self.allowDateAfter.date(): return True else: return False
MIT License
yfauser/planespotter
app-server/app/lib/python2.7/site-packages/click/termui.py
echo_via_pager
python
def echo_via_pager(text, color=None): color = resolve_color_default(color) if not isinstance(text, string_types): text = text_type(text) from ._termui_impl import pager return pager(text + '\n', color)
This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text: the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection.
https://github.com/yfauser/planespotter/blob/d400216502b6b5592a4889eb9fa277b2ddb75f9b/app-server/app/lib/python2.7/site-packages/click/termui.py#L198-L213
import os import sys import struct from ._compat import raw_input, text_type, string_types, isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN from .utils import echo from .exceptions import Abort, UsageError from .types import convert_type from .globals import resolve_color_default visible_prompt_func = raw_input _ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'reset') _ansi_reset_all = '\033[0m' def hidden_prompt_func(prompt): import getpass return getpass.getpass(prompt) def _build_prompt(text, suffix, show_default=False, default=None): prompt = text if default is not None and show_default: prompt = '%s [%s]' % (prompt, default) return prompt + suffix def prompt(text, default=None, hide_input=False, confirmation_prompt=False, type=None, value_proc=None, prompt_suffix=': ', show_default=True, err=False): result = None def prompt_func(text): f = hide_input and hidden_prompt_func or visible_prompt_func try: echo(text, nl=False, err=err) return f('') except (KeyboardInterrupt, EOFError): if hide_input: echo(None, err=err) raise Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default) while 1: while 1: value = prompt_func(prompt) if value: break elif default is not None: return default try: result = value_proc(value) except UsageError as e: echo('Error: %s' % e.message, err=err) continue if not confirmation_prompt: return result while 1: value2 = prompt_func('Repeat for confirmation: ') if value2: break if value == value2: return result echo('Error: the two entered values do not match', err=err) def confirm(text, default=False, abort=False, prompt_suffix=': ', show_default=True, err=False): prompt = _build_prompt(text, prompt_suffix, show_default, default and 'Y/n' or 'y/N') while 1: try: echo(prompt, nl=False, err=err) value = visible_prompt_func('').lower().strip() except (KeyboardInterrupt, EOFError): raise Abort() if value in ('y', 'yes'): rv = True elif value in ('n', 'no'): rv = False elif value == '': rv = default else: echo('Error: invalid input', err=err) continue break if abort and not rv: raise Abort() return rv def get_terminal_size(): if sys.version_info >= (3, 3): import shutil shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) if shutil_get_terminal_size: sz = shutil_get_terminal_size() return sz.columns, sz.lines if get_winterm_size is not None: return get_winterm_size() def ioctl_gwinsz(fd): try: import fcntl import termios cr = struct.unpack( 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) except Exception: return return cr cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) if not cr: try: fd = os.open(os.ctermid(), os.O_RDONLY) try: cr = ioctl_gwinsz(fd) finally: os.close(fd) except Exception: pass if not cr or not cr[0] or not cr[1]: cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', DEFAULT_COLUMNS)) return int(cr[1]), int(cr[0])
MIT License
tuturto/pyherc
src/pyherc/test/builders/item.py
ItemBuilder.with_required_ammunition_type
python
def with_required_ammunition_type(self, ammunition_type): if self.weapon_data is None: self.weapon_data = WeaponData() self.weapon_data.ammunition_type = ammunition_type return self
Configure type of ammunition this weapon requires :param ammunition_type: type of ammunition this weapon requires :type ammunition_type: string
https://github.com/tuturto/pyherc/blob/4e7c72a4d80d335f7d3c48cecac96cd7105acac4/src/pyherc/test/builders/item.py#L135-L145
from pyherc.data import Item from pyherc.data.effects import EffectsCollection from pyherc.data.item import (AmmunitionData, ArmourData, WeaponData, TrapData, BootsData) class ItemBuilder(): def __init__(self): super().__init__() self.name = 'prototype' self.appearance = '' self.effect_handles = [] self.effects = [] self.location = () self.icon = 0 self.weapon_data = None self.armour_data = None self.ammunition_data = None self.trap_data = None self.boots_data = None self.tags = [] def with_name(self, name): self.name = name return self def with_appearance(self, appearance): self.appearance = appearance return self def with_effect_handle(self, handle): if hasattr(handle, 'build'): self.effect_handles.append(handle.build()) else: self.effect_handles.append(handle) return self def with_effect(self, effect): if hasattr(effect, 'build'): self.effects.append(effect.build()) else: self.effects.append(effect) return self def with_location(self, location): self.location = location return self def with_icon(self, icon): self.icon = icon return self def with_damage(self, damage, damage_type): if self.weapon_data is None: self.weapon_data = WeaponData() self.weapon_data.damage.append((damage, damage_type)) return self
MIT License
oscaar/oscaar
oscaar/dataBank.py
dataBank.outOfTransit
python
def outOfTransit(self): return (self.getTimes() < self.ingress) + (self.getTimes() > self.egress)
Boolean array where `True` are the times in `getTimes()` that are before ingress or after egress. Returns ------- List of bools
https://github.com/oscaar/oscaar/blob/5c953570d870c8b855ee388436aa360bde70869a/oscaar/dataBank.py#L436-L446
import numpy as np import pyfits from matplotlib import pyplot as plt from scipy import optimize from glob import glob import os import re import oscaar import mathMethods import sys import systematics oscaarpath = os.path.dirname(os.path.abspath(oscaar.__file__)) oscaarpathplus = os.path.join(oscaarpath,'extras') class dataBank: def __init__(self, initParFilePath=None): self.dict = {} self.parseInit(initParFilePath) self.flatPath = self.dict["flatPath"] self.rawRegionsList = self.dict["regPaths"] self.ingress = self.dict["ingress"] self.egress = self.dict["egress"] self.apertureRadii = self.dict["apertureRadius"] self.trackingZoom = self.dict["trackingZoom"] self.ccdGain = self.dict["ccdGain"] self.trackPlots = self.dict["trackPlots"] self.photPlots = self.dict["photPlots"] self.smoothConst = self.dict ["smoothConst"] self.darksPath = self.dict["darksPath"] self.imagesPaths = self.dict["imagesPaths"] self.timeKeyword = self.dict["timeKeyword"] if self.timeKeyword == 'JD': self.convertToJD = lambda x: x elif self.timeKeyword == 'DATE-OBS': self.convertToJD = mathMethods.ut2jdSplitAtT if not hasattr(sys, 'real_prefix'): self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0])) elif self.flatPath != '': self.masterFlat = pyfits.getdata(self.flatPath) self.masterFlatPath = self.flatPath elif self.flatPath == '': self.masterFlat = np.ones_like(pyfits.getdata(self.imagesPaths[0])) self.allStarsDict = {} self.regionsFileList, self.regionsFITSrefsList = self.parseRawRegionsList(self.rawRegionsList) init_x_list,init_y_list = self.parseRegionsFile(self.regionsFileList[0]) zeroArray = np.zeros_like(self.imagesPaths,dtype=np.float32) self.times = np.zeros_like(self.imagesPaths,dtype=np.float64) self.keys = [] self.targetKey = '000' Nradii = len(self.apertureRadii) for i in range(0,len(init_x_list)): self.allStarsDict[str(i).zfill(3)] = {'x-pos':np.copy(zeroArray), 'y-pos':np.copy(zeroArray), 'rawFlux':[np.copy(zeroArray) for j in range(Nradii)], 'rawError':[np.copy(zeroArray) for j in range(Nradii)],'flag':False, 'scaledFlux':[np.copy(zeroArray) for j in range(Nradii)], 'scaledError':[np.copy(zeroArray) for j in range(Nradii)], 'chisq':np.zeros_like(self.apertureRadii)} self.allStarsDict[str(i).zfill(3)]['x-pos'][0] = init_x_list[i] self.allStarsDict[str(i).zfill(3)]['y-pos'][0] = init_y_list[i] self.keys.append(str(i).zfill(3)) def getDict(self): return self.allStarsDict def getMeanDarkFrame(self): if type(self.darksPath) == str and self.darksPath == "": return np.zeros_like(pyfits.getdata(self.imagesPaths[0])) else: return systematics.meanDarkFrame(self.darksPath) def centroidInitialGuess(self,expNumber,star): if expNumber == 0: est_x = self.allStarsDict[star]['x-pos'][0] est_y = self.allStarsDict[star]['y-pos'][0] elif self.imagesPaths[expNumber] in self.regionsFITSrefsList: refIndex = self.regionsFITSrefsList.index(self.imagesPaths[expNumber]) init_x_list, init_y_list = self.parseRegionsFile(self.regionsFileList[refIndex]) est_x = init_x_list[int(star)] est_y = init_y_list[int(star)] else: est_x = self.allStarsDict[star]['x-pos'][expNumber-1] est_y = self.allStarsDict[star]['y-pos'][expNumber-1] return est_x, est_y def storeCentroid(self,star,exposureNumber,xCentroid,yCentroid): self.allStarsDict[star]['x-pos'][exposureNumber] = xCentroid self.allStarsDict[star]['y-pos'][exposureNumber] = yCentroid def storeFlux(self,star,exposureNumber,rawFlux,rawError): self.allStarsDict[star]['rawFlux'][exposureNumber] = rawFlux self.allStarsDict[star]['rawError'][exposureNumber] = rawError def storeFluxes(self,star,exposureNumber,rawFluxes,rawErrors): for apertureRadiusIndex in range(len(self.apertureRadii)): self.allStarsDict[star]['rawFlux'][apertureRadiusIndex][exposureNumber] = rawFluxes[apertureRadiusIndex] self.allStarsDict[star]['rawError'][apertureRadiusIndex][exposureNumber] = rawErrors[apertureRadiusIndex] def getPaths(self): return self.imagesPaths def getFluxes(self,star): return self.allStarsDict[star]['rawFlux'] def getErrors(self,star): return self.allStarsDict[star]['rawError'] def storeTime(self,expNumber): timeStamp = pyfits.getheader(self.getPaths()[expNumber])[self.timeKeyword] self.times[expNumber] = self.convertToJD(timeStamp) def getTimes(self): return self.times def getFlag(self,star): return self.allStarsDict[star]['flag'] def getAllFlags(self): flags = [] for star in self.allStarsDict: flags.append(self.allStarsDict[star]['flag']) self.flags = flags return flags def setFlag(self,star,setting): self.allStarsDict[star]['flag'] = setting def getKeys(self): return self.keys def scaleFluxes(self): for star in self.allStarsDict: if star != self.targetKey: self.allStarsDict[star]['scaledFlux'], m = mathMethods.regressionScale(self.getFluxes(star),self.getFluxes(self.targetKey),self.getTimes(),self.ingress,self.egress,returncoeffs=True) print m self.allStarsDict[star]['scaledError'] = np.abs(m)*self.getErrors(star) if star == self.targetKey: self.allStarsDict[star]['scaledFlux'] = self.allStarsDict[star]['rawFlux'] self.allStarsDict[star]['scaledError'] = self.allStarsDict[star]['rawError'] def getFluxes_multirad(self,star,apertureRadiusIndex): return self.allStarsDict[star]['rawFlux'][apertureRadiusIndex] def getErrors_multirad(self,star,apertureRadiusIndex): return self.allStarsDict[star]['rawError'][apertureRadiusIndex] def scaleFluxes_multirad(self): for star in self.allStarsDict: for apertureRadiusIndex in range(len(self.apertureRadii)): if star != self.targetKey: print self.getFluxes_multirad(star,apertureRadiusIndex)[0] self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex], m = mathMethods.regressionScale(self.getFluxes_multirad(star,apertureRadiusIndex),self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getTimes(),self.ingress,self.egress,returncoeffs=True) self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = np.abs(m)*self.getErrors_multirad(star,apertureRadiusIndex) if star == self.targetKey: self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex] = self.allStarsDict[star]['rawFlux'][apertureRadiusIndex] self.allStarsDict[star]['scaledError'][apertureRadiusIndex] = self.allStarsDict[star]['rawError'][apertureRadiusIndex] def getScaledFluxes(self,star): return np.array(self.allStarsDict[star]['scaledFlux']) def getScaledErrors(self,star): return np.array(self.allStarsDict[star]['scaledError']) def getScaledFluxes_multirad(self,star,apertureRadiusIndex): return np.array(self.allStarsDict[star]['scaledFlux'][apertureRadiusIndex]) def getScaledErrors_multirad(self,star,apertureRadiusIndex): return np.array(self.allStarsDict[star]['scaledError'][apertureRadiusIndex]) def calcChiSq(self): for star in self.allStarsDict: self.allStarsDict[star]['chisq'] = mathMethods.chiSquared(self.getFluxes(self.targetKey),self.getFluxes(star)) chisq = [] for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq']) self.chisq = np.array(chisq) self.meanChisq = np.mean(chisq) self.stdChisq = np.std(chisq) def calcChiSq_multirad(self,apertureRadiusIndex): for star in self.allStarsDict: print self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex) self.allStarsDict[star]['chisq'][apertureRadiusIndex] = mathMethods.chiSquared(self.getFluxes_multirad(self.targetKey,apertureRadiusIndex),self.getFluxes_multirad(star,apertureRadiusIndex)) chisq = [] for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq'][apertureRadiusIndex]) self.chisq = np.array(chisq) self.meanChisq = np.mean(chisq) self.stdChisq = np.std(chisq) def calcMeanComparison_multirad(self,ccdGain=1): self.meanComparisonStars = [] self.meanComparisonStarErrors = [] self.comparisonStarWeights = [] for apertureRadiusIndex in range(len(self.apertureRadii)): chisq = [] for star in self.allStarsDict: chisq.append(self.allStarsDict[star]['chisq']) chisq = np.array(chisq) if (chisq==0).all(): self.calcChiSq_multirad(apertureRadiusIndex) numCompStars = len(self.allStarsDict) - 1 targetFullLength = len(self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex)) print "Aperture rad:", apertureRadiusIndex print "Target raw flux:",self.getFluxes_multirad(self.targetKey,apertureRadiusIndex) print "Target scaled flux:",self.getScaledFluxes_multirad(self.targetKey,apertureRadiusIndex) target = self.getFluxes_multirad(self.targetKey,apertureRadiusIndex)[self.outOfTransit()] compStars = np.zeros([targetFullLength,numCompStars]) compStarsOOT = np.zeros([len(target),numCompStars]) compErrors = np.copy(compStars) columnCounter = 0 acceptedCompStarKeys = [] compStarKeys = [] for star in self.allStarsDict: if star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) < 2*self.stdChisq).any(): compStars[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex).astype(np.float64) compStarsOOT[:,columnCounter] = self.getScaledFluxes_multirad(star,apertureRadiusIndex)[self.outOfTransit()].astype(np.float64) compErrors[:,columnCounter] = self.getScaledErrors_multirad(star,apertureRadiusIndex).astype(np.float64) compStarKeys.append(int(star)) columnCounter += 1 elif star != self.targetKey and (np.abs(self.meanChisq - self.allStarsDict[star]['chisq']) > 2*self.stdChisq): print 'Star '+str(star)+' excluded from regression' compStarKeys.append(int(star)) columnCounter += 1 initP = np.zeros([numCompStars])+ 1./numCompStars def errfunc(p,target): if all(p >=0.0): return np.dot(p,compStarsOOT.T) - target bestFitP = optimize.leastsq(errfunc,initP[:],args=(target.astype(np.float64)),maxfev=10000000,epsfcn=np.finfo(np.float32).eps)[0] print '\nBest fit regression coefficients:',bestFitP print 'Default weight:',1./numCompStars self.comparisonStarWeights_i = np.vstack([compStarKeys,bestFitP]) self.meanComparisonStar = np.dot(bestFitP,compStars.T) self.meanComparisonStarError = np.sqrt(np.dot(bestFitP**2,compErrors.T**2)) self.meanComparisonStars.append(self.meanComparisonStar) self.meanComparisonStarErrors.append(self.meanComparisonStarError) self.comparisonStarWeights.append(self.comparisonStarWeights_i) return self.meanComparisonStars, self.meanComparisonStarErrors def getAllChiSq(self): return self.chisq
MIT License
derfies/panda3d-editor
src/pandaEditor/game/nodes/manager.py
Manager.get_type_string
python
def get_type_string(self, comp): if hasattr(comp.__class__, 'cType'): return comp.cType type_str = type(comp).__name__ if type_str == 'NodePath': type_str = comp.node().get_tag(TAG_NODE_TYPE) if not type_str: type_str = type(comp.node()).__name__ return type_str
Return the type of the component as a string. Components are identified in the following method (in order): - If the component has the class variable 'cType' then this string will be used as the type. - Use the component's type's name as the type. - If this is 'NodePath' then look for a overriding tag on the node for the type. - If this tag is missing, use the NodePath's node as the type.
https://github.com/derfies/panda3d-editor/blob/a50939bd4bfa5c22d27a9ddee090717e8d95f404/src/pandaEditor/game/nodes/manager.py#L115-L135
from game.nodes.actor import Actor from game.nodes.base import Base from game.nodes.bullet import ( BulletBoxShape, BulletCapsuleShape, BulletDebugNode, BulletPlaneShape, BulletRigidBodyNode, BulletSphereShape, BulletWorld, ) from game.nodes.camera import Camera from game.nodes.collision import ( CollisionBox, CollisionCapsule, CollisionInvSphere, CollisionNode, CollisionRay, CollisionSphere, ) from game.nodes.constants import TAG_NODE_TYPE from game.nodes.fog import Fog from game.nodes.lensnode import LensNode from game.nodes.lights import ( AmbientLight, DirectionalLight, PointLight, Spotlight ) from game.nodes.modelnode import ModelNode from game.nodes.modelroot import ModelRoot from game.nodes.nodepath import NodePath from game.nodes.nongraphobject import NonGraphObject from game.nodes.pandanode import PandaNode from game.nodes.particleeffect import ParticleEffect from game.nodes.sceneroot import SceneRoot from game.nodes.showbasedefaults import ( Aspect2d, BaseCam, BaseCamera, Cam2d, Camera2d, Pixel2d, Render, Render2d, ) from game.nodes.texture import Texture class Manager: def __init__(self): self.wrappers = { 'Actor': Actor, 'AmbientLight': AmbientLight, 'Aspect2d': Aspect2d, 'Base': Base, 'BaseCam': BaseCam, 'BaseCamera': BaseCamera, 'BulletBoxShape': BulletBoxShape, 'BulletCapsuleShape': BulletCapsuleShape, 'BulletDebugNode': BulletDebugNode, 'BulletPlaneShape': BulletPlaneShape, 'BulletSphereShape': BulletSphereShape, 'BulletRigidBodyNode': BulletRigidBodyNode, 'BulletWorld': BulletWorld, 'Cam2d': Cam2d, 'Camera': Camera, 'Camera2d': Camera2d, 'CollisionBox': CollisionBox, 'CollisionCapsule': CollisionCapsule, 'CollisionInvSphere': CollisionInvSphere, 'CollisionNode': CollisionNode, 'CollisionRay': CollisionRay, 'CollisionSphere': CollisionSphere, 'DirectionalLight': DirectionalLight, 'Fog': Fog, 'LensNode': LensNode, 'ModelNode': ModelNode, 'ModelRoot': ModelRoot, 'NodePath': NodePath, 'NonGraphObject': NonGraphObject, 'PandaNode': PandaNode, 'ParticleEffect': ParticleEffect, 'Pixel2d': Pixel2d, 'PointLight': PointLight, 'Render': Render, 'Render2d': Render2d, 'SceneRoot': SceneRoot, 'Spotlight': Spotlight, 'Texture': Texture, } def wrap(self, obj): comp_cls = self.get_wrapper(obj) if comp_cls is not None: return comp_cls(obj) else: comp_cls = self.get_default_wrapper(obj) return comp_cls(obj) def get_wrapper(self, obj): type_ = self.get_type_string(obj) return self.wrappers.get(type_) def get_component_by_name(self, c_type): return self.wrappers.get(c_type)
MIT License
sony/nnabla
python/src/nnabla/backward_function/r_div_scalar.py
r_div_scalar_backward
python
def r_div_scalar_backward(inputs, val=1): dy = inputs[0] x0 = inputs[1] dx0 = - dy * val / x0 ** 2 return dx0
Args: inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function. kwargs (dict of arguments): Dictionary of the corresponding function arguments. Return: list of Variable: Return the gradients wrt inputs of the corresponding function.
https://github.com/sony/nnabla/blob/fef9b6bca02a002de880a13f3196df14369445f4/python/src/nnabla/backward_function/r_div_scalar.py#L16-L28
Apache License 2.0
cihai/cihai
cihai/config.py
Configurator.write
python
def write(self, **updates): if updates: self._data.update(**updates) pass
If no delta is created from DEFAULT, it not write. If file doesn't exist, it will create.
https://github.com/cihai/cihai/blob/650e865655c0c0b15f39a44a8b24d69761acbb11/cihai/config.py#L112-L120
import os from appdirs import AppDirs from ._compat import string_types def expand_config(d, dirs): context = { 'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context) class Configurator(object): def __init__(self, namespace=''): self.dirs = AppDirs("cihai", "cihai team") self.namespace = namespace def get_names(self): pass @property def file(self): return def read(self): return def get_delta(self, **updates): pass
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/lock/verisure.py
VerisureDoorlock.available
python
def available(self): return hub.get_first( "$.doorLockStatusList[?(@.deviceLabel=='%s')]", self._device_label) is not None
Return True if entity is available.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/lock/verisure.py#L56-L60
import logging from time import sleep from time import time from homeassistant.components.verisure import HUB as hub from homeassistant.components.verisure import (CONF_LOCKS, CONF_CODE_DIGITS) from homeassistant.components.lock import LockDevice from homeassistant.const import ( ATTR_CODE, STATE_LOCKED, STATE_UNKNOWN, STATE_UNLOCKED) _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices, discovery_info=None): locks = [] if int(hub.config.get(CONF_LOCKS, 1)): hub.update_overview() locks.extend([ VerisureDoorlock(device_label) for device_label in hub.get( "$.doorLockStatusList[*].deviceLabel")]) add_devices(locks) class VerisureDoorlock(LockDevice): def __init__(self, device_label): self._device_label = device_label self._state = STATE_UNKNOWN self._digits = hub.config.get(CONF_CODE_DIGITS) self._changed_by = None self._change_timestamp = 0 @property def name(self): return hub.get_first( "$.doorLockStatusList[?(@.deviceLabel=='%s')].area", self._device_label) @property def state(self): return self._state @property
MIT License
tyohei/chainerkfac
chainerkfac/communicators/pure_nccl_communicator.py
PureNcclCommunicator.all_gather_v_arrays
python
def all_gather_v_arrays(self, arrays, stream=None, debug=False): if stream is None: stream = chainer.cuda.Stream.null local_rank = self.rank self._init_comms() nccl_comm = self.nccl_comm nelems = _get_divideable_nelems(nccl_comm, _utility.get_nelems(arrays)) needs_sync = False nbytes = nelems * self._agv_comm_dtype.itemsize needs_sync |= _utility.assign(self.gpu_buf_a, nbytes) needs_sync |= _utility.assign(self.gpu_buf_b, nbytes) if needs_sync: stream.synchronize() chainer.cuda.Stream.null.synchronize() if self._agv_memset_kernel is None: self._agv_memset_kernel = _get_memset_kernel(self._agv_comm_dtype) self._agv_memset_kernel( self.gpu_buf_a.array(nelems, dtype=self._agv_comm_dtype), stream=stream) target = local_rank self._pack_arrays_to_buffer(arrays, self.gpu_buf_a, self._agv_comm_dtype, target, stream, debug) sendbuf = self.gpu_buf_a.ptr() recvbuf = self.gpu_buf_b.ptr() nccl_dtype = _get_nccl_dtype(self._agv_comm_dtype) nccl_comm.allReduce(sendbuf, recvbuf, nelems, nccl_dtype, nccl.NCCL_SUM, stream.ptr) divisor = 1 target = -1 self._unpack_arrays_from_buffer(arrays, self.gpu_buf_b, self._agv_comm_dtype, target, divisor, stream, debug)
Executes All-Gather-V. 0. memset: gbuf_A <- (zero) 1. pack: gbuf_A <- arrays 2. send: .... <- gbuf_A 3. recv: gbuf_B <- .... 4. unpack: arrays <- gbuf_B
https://github.com/tyohei/chainerkfac/blob/99e88396268e8b7d099fdb6bbf54e309e98293c8/chainerkfac/communicators/pure_nccl_communicator.py#L150-L218
import math import cupy import numpy as np import chainer from chainermn.communicators import _communication_utility from chainermn.communicators import _memory_utility from chainermn import nccl from chainerkfac.communicators import _utility from chainerkfac.communicators import base class PureNcclCommunicator(base.KfacCommunicatorBase): def __init__(self, mpi_comm): super(PureNcclCommunicator, self).__init__(mpi_comm) self.nccl_comm = None self.gpu_buf_a = _memory_utility.DeviceMemory() self.gpu_buf_b = _memory_utility.DeviceMemory() self._rsv_comm_dtype = np.dtype(np.float32) self._agv_comm_dtype = np.dtype(np.float32) self._rsv_memset_kernel = None self._agv_memset_kernel = None self._ainfo = None def _init_comms(self): if self.nccl_comm is not None: return self.nccl_comm = _communication_utility.init_nccl_comm(self.mpi_comm) def reduce_scatter_v_arrays(self, arrays, stream=None, debug=False): if stream is None: stream = chainer.cuda.Stream.null self._init_comms() nccl_comm = self.nccl_comm local_rank = self.rank nelems = _utility.get_nelems(arrays) needs_sync = False nbytes = nelems * self._rsv_comm_dtype.itemsize needs_sync |= _utility.assign(self.gpu_buf_a, nbytes) needs_sync |= _utility.assign(self.gpu_buf_b, nbytes) if needs_sync: stream.synchronize() chainer.cuda.Stream.null.synchronize() target = -1 self._pack_arrays_to_buffer(arrays, self.gpu_buf_a, self._rsv_comm_dtype, target, stream, debug) sendbuf = self.gpu_buf_a.ptr() recvbuf = self.gpu_buf_b.ptr() nccl_dtype = _get_nccl_dtype(self._rsv_comm_dtype) nccl_comm.allReduce(sendbuf, recvbuf, nelems, nccl_dtype, nccl.NCCL_SUM, stream.ptr) divisor = self.size target = local_rank self._unpack_arrays_from_buffer(arrays, self.gpu_buf_b, self._rsv_comm_dtype, target, divisor, stream, debug) def _pack_arrays_to_buffer(self, arrays, gpu_buf, buf_dtype, target, stream, debug=False): self._ainfo = _ArraysInfo(arrays) ainfo = self._ainfo if debug and self.rank == 0: ainfo.show() n_arrays = ainfo.n_arrays buf_dtype = _get_nccl_dtype(buf_dtype) total_threads = ainfo.size_total n_threads = 128 n_blocks = (total_threads + n_threads - 1) // n_threads _cupy_batched_pack_arrays()( (n_blocks,), (n_threads,), (gpu_buf.memory.ptr, buf_dtype, ainfo.buf_size_csum, ainfo.dptr, ainfo.dtype, ainfo.size_csum, ainfo.triangle_size, ainfo.rank, target, total_threads, n_arrays), stream=stream) def _unpack_arrays_from_buffer(self, arrays, gpu_buf, buf_dtype, target, divisor, stream, debug=False): if self._ainfo is None: self._ainfo = _ArraysInfo(arrays) ainfo = self._ainfo if debug and self.rank == 0: ainfo.show() n_arrays = ainfo.n_arrays buf_dtype = _get_nccl_dtype(buf_dtype) total_threads = ainfo.size_total n_threads = 128 n_blocks = (total_threads + n_threads - 1) // n_threads _cupy_batched_unpack_arrays()( (n_blocks,), (n_threads,), (ainfo.dptr, ainfo.dtype, ainfo.size_csum, gpu_buf.memory.ptr, buf_dtype, ainfo.buf_size_csum, ainfo.triangle_size, ainfo.rank, target, divisor, total_threads, n_arrays), stream=stream) self._ainfo = None
MIT License
mabuchilab/qnet
src/qnet/algebra/core/circuit_algebra.py
SLH.space
python
def space(self): args_spaces = (self.S.space, self.L.space, self.H.space) return ProductSpace.create(*args_spaces)
Total Hilbert space
https://github.com/mabuchilab/qnet/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/circuit_algebra.py#L368-L371
import os import re from abc import ABCMeta, abstractmethod from collections import OrderedDict from functools import reduce import numpy as np import sympy from sympy import I from sympy import Matrix as SympyMatrix from sympy import symbols, sympify from .abstract_algebra import ( Expression, Operation, substitute, ) from .algebraic_properties import ( assoc, check_cdims, filter_neutral, filter_cid, match_replace, match_replace_binary) from .exceptions import ( AlgebraError, BasisNotSetError, CannotConvertToSLH, CannotEliminateAutomatically, CannotVisualize, IncompatibleBlockStructures, WrongCDimError) from .hilbert_space_algebra import LocalSpace, ProductSpace from .matrix_algebra import ( Matrix, block_matrix, identity_matrix, permutation_matrix, vstackm, zerosm) from .operator_algebra import ( IdentityOperator, LocalProjector, LocalSigma, Operator, OperatorPlus, OperatorSymbol, ScalarTimesOperator, ZeroOperator, adjoint, get_coeffs) from ...utils.permutations import ( BadPermutationError, block_perm_and_perms_within_blocks, check_permutation, full_block_perm, invert_permutation, permutation_to_block_permutations, ) from ...utils.singleton import Singleton, singleton_object __all__ = [ 'CPermutation', 'CircuitSymbol', 'Concatenation', 'Feedback', 'Circuit', 'SLH', 'SeriesInverse', 'SeriesProduct', 'FB', 'circuit_identity', 'eval_adiabatic_limit', 'extract_channel', 'getABCD', 'map_channels', 'move_drive_to_H', 'pad_with_identity', 'prepare_adiabatic_limit', 'try_adiabatic_elimination', 'CIdentity', 'CircuitZero', 'Component', ] __private__ = [] class Circuit(metaclass=ABCMeta): @property @abstractmethod def cdim(self) -> int: raise NotImplementedError(self.__class__.__name__) @property def block_structure(self) -> tuple: return self._block_structure @property def _block_structure(self) -> tuple: return tuple((self.cdim, )) def index_in_block(self, channel_index: int) -> int: if channel_index < 0 or channel_index >= self.cdim: raise ValueError() struct = self.block_structure if len(struct) == 1: return channel_index, 0 i = 1 while sum(struct[:i]) <= channel_index and i < self.cdim: i += 1 block_index = i - 1 index_in_block = channel_index - sum(struct[:block_index]) return index_in_block, block_index def get_blocks(self, block_structure=None): if block_structure is None: block_structure = self.block_structure try: return self._get_blocks(block_structure) except IncompatibleBlockStructures as e: raise e def _get_blocks(self, block_structure): if block_structure == self.block_structure: return (self, ) raise IncompatibleBlockStructures("Requested incompatible block " "structure %s" % (block_structure,)) def series_inverse(self) -> 'Circuit': return self._series_inverse() def _series_inverse(self) -> 'Circuit': return SeriesInverse.create(self) def feedback(self, *, out_port=None, in_port=None): if out_port is None: out_port = self.cdim - 1 if in_port is None: in_port = self.cdim - 1 return self._feedback(out_port=out_port, in_port=in_port) def _feedback(self, *, out_port: int, in_port: int) -> 'Circuit': return Feedback.create(self, out_port=out_port, in_port=in_port) def show(self): from IPython.display import Image, display fname = self.render() display(Image(filename=fname)) def render(self, fname=''): import qnet.visualization.circuit_pyx as circuit_visualization from tempfile import gettempdir from time import time, sleep if not fname: tmp_dir = gettempdir() fname = os.path.join(tmp_dir, "tmp_{}.png".format(hash(time))) if circuit_visualization.draw_circuit(self, fname): done = False for k in range(20): if os.path.exists(fname): done = True break else: sleep(.5) if done: return fname raise CannotVisualize() def creduce(self) -> 'Circuit': return self._creduce() @abstractmethod def _creduce(self) -> 'Circuit': return self def toSLH(self) -> 'SLH': return self._toSLH() @abstractmethod def _toSLH(self) -> 'SLH': raise NotImplementedError(self.__class__.__name__) def coherent_input(self, *input_amps) -> 'Circuit': return self._coherent_input(*input_amps) def _coherent_input(self, *input_amps) -> 'Circuit': n_inputs = len(input_amps) if n_inputs != self.cdim: raise WrongCDimError() from qnet.algebra.library.circuit_components import ( CoherentDriveCC as Displace_cc) if n_inputs == 1: concat_displacements = Displace_cc(displacement=input_amps[0]) else: displacements = [ Displace_cc(displacement=amp) if amp != 0 else circuit_identity(1) for amp in input_amps] concat_displacements = Concatenation(*displacements) return self << concat_displacements def __lshift__(self, other): if isinstance(other, Circuit): return SeriesProduct.create(self, other) return NotImplemented def __add__(self, other): if isinstance(other, Circuit): return Concatenation.create(self, other) return NotImplemented class SLH(Circuit, Expression): def __init__(self, S, L, H): if not isinstance(S, Matrix): S = Matrix(S) if not isinstance(L, Matrix): L = Matrix(L) if S.shape[0] != L.shape[0]: raise ValueError('S and L misaligned: S = {!r}, L = {!r}' .format(S, L)) if L.shape[1] != 1: raise ValueError(("L has wrong shape %s. L must be a column vector " "of operators (shape n × 1)") % str(L.shape)) if not all(isinstance(s, Operator) for s in S.matrix.ravel()): S = S * IdentityOperator if not all(isinstance(l, Operator) for l in L.matrix.ravel()): L = L * IdentityOperator if not isinstance(H, Operator): H = H * IdentityOperator self.S = S self.L = L self.H = H super().__init__(S, L, H) @property def args(self): return self.S, self.L, self.H @property def Ls(self): return list(self.L.matrix[:, 0]) @property def cdim(self): return self.S.shape[0] def _creduce(self): return self @property
MIT License
nrel/floris
floris/tools/optimization/scipy/power_density_1D.py
PowerDensityOptimization1D.optimize
python
def optimize(self): print("=====================================================") print("Optimizing turbine layout...") print("Number of parameters to optimize = ", len(self.x0)) print("=====================================================") opt_vars_norm = self._optimize() print("Optimization complete!") opt_locs = [ self._unnorm(valx, self.bndx_min, self.bndx_max) for valx in opt_vars_norm[0 : self.nturbs] ] opt_yaw = [ self._unnorm(yaw, self.yaw_min, self.yaw_max) for yaw in opt_vars_norm[self.nturbs :] ] return [opt_locs, opt_yaw]
This method finds the optimized layout of wind turbines for power production given the provided frequencies of occurance of wind conditions (wind speed, direction). Returns: opt_locs (iterable): A list of the optimized x, y locations of each turbine (m).
https://github.com/nrel/floris/blob/ef4934ec7feb7afd2615772d364a1eaa28db93e9/floris/tools/optimization/scipy/power_density_1D.py#L218-L247
import numpy as np import matplotlib.pyplot as plt from scipy.optimize import minimize from .optimization import Optimization class PowerDensityOptimization1D(Optimization): def __init__( self, fi, wd, ws, freq, AEP_initial, x0=None, bnds=None, min_dist=None, opt_method="SLSQP", opt_options=None, ): super().__init__(fi) self.epsilon = np.finfo(float).eps self.counter = 0 if opt_options is None: self.opt_options = {"maxiter": 100, "disp": True, "iprint": 2, "ftol": 1e-9} self.reinitialize_opt( wd=wd, ws=ws, freq=freq, AEP_initial=AEP_initial, x0=x0, bnds=bnds, min_dist=min_dist, opt_method=opt_method, opt_options=opt_options, ) def _PowDens_opt(self, optVars): locs = optVars[0 : self.nturbs] locs_unnorm = [ self._unnorm(valx, self.bndx_min, self.bndx_max) for valx in locs ] turb_controls = [ optVars[self.nturbs + i * self.nturbs : 2 * self.nturbs + i * self.nturbs] for i in range(len(self.wd)) ] turb_controls_unnorm = [ self._unnorm(yaw, self.yaw_min, self.yaw_max) for yaw in turb_controls ] self._change_coordinates(locs_unnorm) for i, turbine in enumerate(self.fi.floris.farm.turbine_map.turbines): turbine.yaw_angle = turb_controls_unnorm[0][i] layout_dist = self._avg_dist(locs) return layout_dist / self.layout_dist_initial def _avg_dist(self, locs): dist = [] for i in range(len(locs) - 1): dist.append(locs[i + 1] - locs[i]) return np.mean(dist) def _change_coordinates(self, locs): layout_x = locs layout_y = [coord.x2 for coord in self.fi.floris.farm.turbine_map.coords] layout_array = [layout_x, layout_y] self.fi.reinitialize_flow_field(layout_array=layout_array) def _set_opt_bounds(self): self.bnds = [ (0.0, 0.0), (0.083333, 0.25), (0.166667, 0.5), (0.25, 0.75), (0.33333, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, 1.0), ] def _AEP_single_wd(self, wd, ws): self.fi.reinitialize_flow_field(wind_direction=wd, wind_speed=ws) self.fi.calculate_wake() turb_powers = [turbine.power for turbine in self.fi.floris.farm.turbines] return np.sum(turb_powers) * self.freq[0] * 8760 def _AEP_constraint(self, optVars): locs = optVars[0 : self.nturbs] locs_unnorm = [ self._unnorm(valx, self.bndx_min, self.bndx_max) for valx in locs ] turb_controls = [ optVars[self.nturbs + i * self.nturbs : 2 * self.nturbs + i * self.nturbs] for i in range(len(self.wd)) ] turb_controls_unnorm = [ self._unnorm(yaw, self.yaw_min, self.yaw_max) for yaw in turb_controls ] for i, turbine in enumerate(self.fi.floris.farm.turbine_map.turbines): turbine.yaw_angle = turb_controls_unnorm[0][i] self._change_coordinates(locs_unnorm) return ( self._AEP_single_wd(self.wd[0], self.ws[0]) / self.AEP_initial - 1 ) * 1000000.0 def _space_constraint(self, x_in, min_dist): x = np.nan_to_num(x_in[0 : self.nturbs]) y = np.nan_to_num(x_in[self.nturbs :]) dist = [ np.sqrt((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2) for i in range(self.nturbs) for j in range(self.nturbs) if i != j ] return np.min(dist) - self._norm(min_dist, self.bndx_min, self.bndx_max) def _generate_constraints(self): tmp1 = { "type": "ineq", "fun": lambda x, *args: self._space_constraint(x, self.min_dist), "args": (self.min_dist,), } tmp2 = {"type": "ineq", "fun": lambda x, *args: self._AEP_constraint(x)} self.cons = [tmp1, tmp2] def _optimize(self): self.residual_plant = minimize( self._PowDens_opt, self.x0, method=self.opt_method, bounds=self.bnds, constraints=self.cons, options=self.opt_options, ) opt_results = self.residual_plant.x return opt_results
Apache License 2.0
dtuwindenergy/pywake
py_wake/examples/data/iea37/iea37_aepcalc.py
calcAEP
python
def calcAEP(turb_coords, wind_freq, wind_speed, wind_dir, turb_diam, turb_ci, turb_co, rated_ws, rated_pwr): num_bins = len(wind_freq) pwr_produced = np.zeros(num_bins) for i in range(num_bins): pwr_produced[i] = DirPower(turb_coords, wind_dir[i], wind_speed, turb_diam, turb_ci, turb_co, rated_ws, rated_pwr) hrs_per_year = 365. * 24. AEP = hrs_per_year * (wind_freq * pwr_produced) AEP /= 1.E6 return AEP
Calculate the wind farm AEP.
https://github.com/dtuwindenergy/pywake/blob/ab02a41b5b4ebe7d17877e265ae64d2902324298/py_wake/examples/data/iea37/iea37_aepcalc.py#L116-L135
from __future__ import print_function import numpy as np import yaml from math import radians as DegToRad coordinate = np.dtype([('x', 'f8'), ('y', 'f8')]) def WindFrame(turb_coords, wind_dir_deg): wind_dir_deg = 270. - wind_dir_deg wind_dir_rad = DegToRad(wind_dir_deg) cos_dir = np.cos(-wind_dir_rad) sin_dir = np.sin(-wind_dir_rad) frame_coords = np.recarray(turb_coords.shape, coordinate) frame_coords.x = (turb_coords.x * cos_dir) - (turb_coords.y * sin_dir) frame_coords.y = (turb_coords.x * sin_dir) + (turb_coords.y * cos_dir) return frame_coords def GaussianWake(frame_coords, turb_diam): num_turb = len(frame_coords) CT = 4.0 * 1. / 3. * (1.0 - 1. / 3.) k = 0.0324555 loss = np.zeros(num_turb) for i in range(num_turb): loss_array = np.zeros(num_turb) for j in range(num_turb): x = frame_coords.x[i] - frame_coords.x[j] y = frame_coords.y[i] - frame_coords.y[j] if x > 0.: sigma = k * x + turb_diam / np.sqrt(8.) exponent = -0.5 * (y / sigma)**2 radical = 1. - CT / (8. * sigma**2 / turb_diam**2) loss_array[j] = (1. - np.sqrt(radical)) * np.exp(exponent) loss[i] = np.sqrt(np.sum(loss_array**2)) return loss def DirPower(turb_coords, wind_dir_deg, wind_speed, turb_diam, turb_ci, turb_co, rated_ws, rated_pwr): num_turb = len(turb_coords) frame_coords = WindFrame(turb_coords, wind_dir_deg) loss = GaussianWake(frame_coords, turb_diam) wind_speed_eff = wind_speed * (1. - loss) turb_pwr = np.zeros(num_turb) for n in range(num_turb): if ((turb_ci <= wind_speed_eff[n]) and (wind_speed_eff[n] < rated_ws)): turb_pwr[n] = rated_pwr * ((wind_speed_eff[n] - turb_ci) / (rated_ws - turb_ci))**3 elif ((rated_ws <= wind_speed_eff[n]) and (wind_speed_eff[n] < turb_co)): turb_pwr[n] = rated_pwr pwrDir = np.sum(turb_pwr) return pwrDir
MIT License
lobocv/crashreporter
crashreporter/tools.py
analyze_traceback
python
def analyze_traceback(tb, inspection_level=None, limit=None): info = [] tb_level = tb extracted_tb = traceback.extract_tb(tb, limit=limit) for ii, (filepath, line, module, code) in enumerate(extracted_tb): func_source, func_lineno = inspect.getsourcelines(tb_level.tb_frame) d = {"File": filepath, "Error Line Number": line, "Module": module, "Error Line": code, "Module Line Number": func_lineno, "Custom Inspection": {}, "Source Code": ''} if inspection_level is None or len(extracted_tb) - ii <= inspection_level: d['Source Code'] = ''.join(func_source) d['Local Variables'] = get_local_references(tb_level) d['Object Variables'] = get_object_references(tb_level, d['Source Code']) tb_level = getattr(tb_level, 'tb_next', None) info.append(d) return info
Extract trace back information into a list of dictionaries. :param tb: traceback :return: list of dicts containing filepath, line, module, code, traceback level and source code for tracebacks
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/tools.py#L155-L183
__author__ = 'calvin' import inspect import logging import re import traceback from types import FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType try: import numpy as np _NUMPY_INSTALLED = True except ImportError: _NUMPY_INSTALLED = False obj_ref_regex = re.compile("[A-z]+[0-9]*\.(?:[A-z]+[0-9]*\.?)+(?!\')(?:\[(?:\'|\").*(?:\'|\")\])*(?:\.[A-z]+[0-9]*)*") dict_lookup_regex = re.compile("(?<=\[)(?:\'|\")([^\'\"]*)(?:\'|\")(?=\])") _repr = repr def repr(object): try: return _repr(object) except Exception as e: logging.error(e) return 'String Representation not found' def string_variable_lookup(tb, s): refs = [] dot_refs = s.split('.') DOT_LOOKUP = 0 DICT_LOOKUP = 1 for ii, ref in enumerate(dot_refs): dict_refs = dict_lookup_regex.findall(ref) if dict_refs: bracket = ref.index('[') refs.append((DOT_LOOKUP, ref[:bracket])) refs.extend([(DICT_LOOKUP, t) for t in dict_refs]) else: refs.append((DOT_LOOKUP, ref)) scope = tb.tb_frame.f_locals.get(refs[0][1], ValueError) if scope is ValueError: return scope for lookup, ref in refs[1:]: try: if lookup == DOT_LOOKUP: scope = getattr(scope, ref, ValueError) else: scope = scope.get(ref, ValueError) except Exception as e: logging.error(e) scope = ValueError if scope is ValueError: return scope elif isinstance(scope, (FunctionType, MethodType, ModuleType, BuiltinMethodType, BuiltinFunctionType)): return ValueError return scope def get_object_references(tb, source, max_string_length=1000): global obj_ref_regex referenced_attr = set() for line in source.split('\n'): referenced_attr.update(set(re.findall(obj_ref_regex, line))) referenced_attr = sorted(referenced_attr) info = [] for attr in referenced_attr: v = string_variable_lookup(tb, attr) if v is not ValueError: ref_string = format_reference(v, max_string_length=max_string_length) info.append((attr, ref_string)) return info def get_local_references(tb, max_string_length=1000): if 'self' in tb.tb_frame.f_locals: _locals = [('self', repr(tb.tb_frame.f_locals['self']))] else: _locals = [] for k, v in tb.tb_frame.f_locals.iteritems(): if k == 'self': continue try: vstr = format_reference(v, max_string_length=max_string_length) _locals.append((k, vstr)) except TypeError: pass return _locals def format_reference(ref, max_string_length=1000): _pass = lambda *args: None _numpy_info = ('dtype', 'shape', 'size', 'min', 'max') additionals = [] if _NUMPY_INSTALLED and isinstance(ref, np.ndarray): for np_attr in _numpy_info: np_value = getattr(ref, np_attr, None) if np_value is not None: if inspect.isbuiltin(np_value): try: np_value = np_value() except Exception as e: logging.error(e) continue additionals.append((np_attr, np_value)) elif isinstance(ref, (list, tuple, set, dict)): length = getattr(ref, '__len__', _pass)() if length is not None: additionals.append(('length', length)) if additionals: vstr = ', '.join(['%s: %s' % a for a in additionals] + [repr(ref)]) else: vstr = repr(ref) if len(vstr) > max_string_length: vstr = vstr[:max_string_length] + ' ...' return vstr
MIT License
online-ml/river
river/base/classifier.py
Classifier.predict_one
python
def predict_one(self, x: dict) -> base.typing.ClfTarget: y_pred = self.predict_proba_one(x) if y_pred: return max(y_pred, key=y_pred.get) return None
Predict the label of a set of features `x`. Parameters ---------- x A dictionary of features. Returns ------- The predicted label.
https://github.com/online-ml/river/blob/842f7c5be5574e62a3aab0b46d996eb5f1d73beb/river/base/classifier.py#L53-L72
import abc import typing import pandas as pd from river import base from . import estimator class Classifier(estimator.Estimator): @abc.abstractmethod def learn_one(self, x: dict, y: base.typing.ClfTarget, **kwargs) -> "Classifier": def predict_proba_one(self, x: dict) -> typing.Dict[base.typing.ClfTarget, float]: raise NotImplementedError
BSD 3-Clause New or Revised License
catalyst-team/catalyst
catalyst/contrib/nn/criterion/wing.py
WingLoss.forward
python
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: loss = self.loss_fn(outputs, targets) return loss
Args: @TODO: Docs. Contribution is welcome.
https://github.com/catalyst-team/catalyst/blob/a6fc305eaddc499c17584824794fa8d006072842/catalyst/contrib/nn/criterion/wing.py#L71-L77
from functools import partial import math import torch from torch import nn def wing_loss( outputs: torch.Tensor, targets: torch.Tensor, width: int = 5, curvature: float = 0.5, reduction: str = "mean", ) -> torch.Tensor: diff_abs = (targets - outputs).abs() loss = diff_abs.clone() idx_smaller = diff_abs < width idx_bigger = diff_abs >= width loss[idx_smaller] = width * torch.log(1 + diff_abs[idx_smaller] / curvature) c = width - width * math.log(1 + width / curvature) loss[idx_bigger] = loss[idx_bigger] - c if reduction == "sum": loss = loss.sum() if reduction == "mean": loss = loss.mean() return loss class WingLoss(nn.Module): def __init__(self, width: int = 5, curvature: float = 0.5, reduction: str = "mean"): super().__init__() self.loss_fn = partial(wing_loss, width=width, curvature=curvature, reduction=reduction)
Apache License 2.0
brain-research/data-linter
linters.py
DuplicateExampleDetector._lint
python
def _lint(self, examples): feature_names = sorted(f.name for f in self._stats.features) tuplize = utils.example_tuplizer(feature_names, denan=True) duplicates = ( examples | 'Tuplize' >> beam.Map(lambda x: (tuplize(x), x)) | 'CollectDuplicates' >> beam.GroupByKey() | 'ExamplesToList' >> beam.Map( lambda (example_tuple, examples): (example_tuple, list(examples))) | 'FilterDuplicates' >> beam.Filter( lambda (_, examples): len(examples) > 1)) samples = ( duplicates | 'TakeExamples' >> beam.Map(lambda (_, examples): examples[0]) | 'Sample' >> beam.combiners.Sample.FixedSizeGlobally( self.N_LINT_SAMPLES) | 'ToSample' >> beam.Map( lambda x: lint_result_pb2.LintSample(examples=x))) n_duplicates = ( duplicates | 'CountDuplicates' >> beam.Map(lambda (_, examples): len(examples)) | 'ExcessCounts' >> beam.Map(lambda x: x - 1) | 'Total' >> beam.CombineGlobally(sum)) return ( examples.pipeline | 'SyncSideInputs' >> beam.Create([None]) | 'ToResult' >> beam.Map(self._to_result, beam.pvalue.AsSingleton(n_duplicates), beam.pvalue.AsSingleton(samples)))
Returns the `PTransform` for the DuplicateExampleDetector linter. Args: examples: A `PTransform` that yields a `PCollection` of `tf.Example`s. Returns: A `PTransform` that yields a `LintResult` of the format warnings: [num_duplicates] lint_sample: [ features: [sample duplicates...] ]
https://github.com/brain-research/data-linter/blob/ef62043ae1a2022d48b3c1e83171cfd500a11524/linters.py#L713-L756
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import datetime import itertools import re import apache_beam as beam import dateutil.parser import numpy as np import scipy.stats import six import lint_result_pb2 import data_linter_utils as utils class LintDetector(beam.PTransform): N_LINT_SAMPLES = 2 def should_run(self): return True def __eq__(self, other): return isinstance(other, type(self)) @classmethod def _make_result(cls, **kwargs): return lint_result_pb2.LintResult(linter_name=cls.__name__, **kwargs) def _lint(self, examples): raise NotImplementedError() def expand(self, examples): result = self._lint(examples) if not isinstance(result, (beam.pvalue.PCollection, beam.transforms.PTransform)): result_pcoll = beam.Create([result] if result else []) result = examples.pipeline | 'Materialize' >> result_pcoll return result | 'PairWithName' >> beam.Map( lambda r: (type(self).__name__, r)) class DateTimeAsStringDetector(LintDetector): _NEAR_FUTURE_YEAR = datetime.datetime.today().year + 100 _EPOCH_YEAR = 1970 @classmethod def _string_is_datetime(cls, maybe_dt): try: dateutil.parser.parse(maybe_dt) try: float_dt = float(maybe_dt) if float_dt > 1e8: return True elif float_dt >= cls._EPOCH_YEAR and float_dt <= cls._NEAR_FUTURE_YEAR: return True return False except ValueError: return True except (ValueError, OverflowError): return False def __init__(self, stats): super(DateTimeAsStringDetector, self).__init__() self._stats = stats def should_run(self): return bool(utils.get_string_features(self._stats)) def _lint(self, examples): result = self._make_result() string_features = utils.get_string_features(self._stats) lint_samples = collections.defaultdict(set) for feature in self._stats.features: if feature.name not in string_features: continue str_stats = feature.string_stats n_samples = str_stats.common_stats.num_non_missing if n_samples == 0: continue num_date_parsable = 0 for bucket in str_stats.rank_histogram.buckets: if self._string_is_datetime(bucket.label): num_date_parsable += bucket.sample_count samples = lint_samples[feature.name] if len(samples) < self.N_LINT_SAMPLES: samples.add(bucket.label) if num_date_parsable / n_samples > 0.5: result.warnings.append(feature.name) result.lint_samples.add(strings=lint_samples[feature.name]) return result class TokenizableStringDetector(LintDetector): def __init__(self, stats, length_threshold=30, enum_threshold=20): super(TokenizableStringDetector, self).__init__() self._stats = stats self._length_threshold = length_threshold self._enum_threshold = enum_threshold def should_run(self): return bool(utils.get_string_features(self._stats)) def _lint(self, examples): result = self._make_result() string_features = utils.get_string_features(self._stats) for feature in self._stats.features: if feature.name not in string_features: continue str_stats = feature.string_stats if (str_stats.avg_length > self._length_threshold and str_stats.unique > self._enum_threshold): result.warnings.append(feature.name) samples = [bucket.label for bucket in str_stats.rank_histogram.buckets if len(bucket.label) > self._length_threshold] result.lint_samples.add(strings=samples[:self.N_LINT_SAMPLES]) return result class ZipCodeAsNumberDetector(LintDetector): _ZIP_RE = re.compile(r'([\W_]|\b)zip(code|[\W_]|\b)') def __init__(self, stats): super(ZipCodeAsNumberDetector, self).__init__() self._stats = stats def should_run(self): return bool(utils.get_numeric_features(self._stats)) def _lint(self, examples): result = self._make_result() numeric_features = utils.get_numeric_features(self._stats) for feature in self._stats.features: if (feature.name in numeric_features and self._ZIP_RE.search(feature.name.lower())): result.warnings.append(feature.name) return result class NumberAsStringDetector(LintDetector): def __init__(self, stats, non_num_tol=0.5): super(NumberAsStringDetector, self).__init__() self._stats = stats self._non_num_tol = non_num_tol def should_run(self): return bool(utils.get_string_features(self._stats)) def _lint(self, examples): result = self._make_result() string_features = utils.get_string_features(self._stats) lint_samples = collections.defaultdict(set) for feature in self._stats.features: if feature.name not in string_features: continue str_stats = feature.string_stats n_samples = str_stats.common_stats.num_non_missing if n_samples == 0: continue num_numeric = 0 for bucket in str_stats.rank_histogram.buckets: try: nums_only = re.sub(r'\D', '', bucket.label) if len(nums_only) / len(bucket.label) >= 1 - self._non_num_tol: num_numeric += bucket.sample_count samples = lint_samples[feature.name] if len(samples) < self.N_LINT_SAMPLES: samples.add(bucket.label) except (ValueError, ZeroDivisionError): pass if num_numeric / n_samples > 0.5: result.warnings.append(feature.name) result.lint_samples.add(strings=lint_samples[feature.name]) return result class NonNormalNumericFeatureDetector(LintDetector): IGNORE_FEATURE_NAMES = {'lat', 'lon', 'latitude', 'longitude', 'id'} _TYPICAL_STATS_ID = '_typical_' WARNING_FMT = '{}:{}' def __init__(self, stats, max_deviance=2, trim_proportion=0.1): super(NonNormalNumericFeatureDetector, self).__init__() self._stats = stats self._max_deviance = max_deviance self._trim_proportion = trim_proportion def should_run(self): return any(feature_name.lower() not in self.IGNORE_FEATURE_NAMES for feature_name in utils.get_numeric_features(self._stats)) def _get_trimmed_stats(self, values): values.sort() trimmed_values = scipy.stats.trimboth(values, self._trim_proportion) return trimmed_values.mean(), trimmed_values.std() def _lint(self, examples): result = self._make_result() numeric_features = utils.get_numeric_features(self._stats) numeric_feature_stats = [] feature_means = [] feature_std_devs = [] for feature_stats in self._stats.features: if (feature_stats.name not in numeric_features or feature_stats.name in self.IGNORE_FEATURE_NAMES): continue numeric_feature_stats.append(feature_stats) num_stats = feature_stats.num_stats feature_means.append(num_stats.mean) feature_std_devs.append(num_stats.std_dev) means_trimmed_mean, means_trimmed_std = self._get_trimmed_stats( feature_means) std_devs_trimmed_mean, std_devs_trimmed_std = self._get_trimmed_stats( feature_std_devs) typical_stats = lint_result_pb2.Statistics( id=self._TYPICAL_STATS_ID, mean=means_trimmed_mean, std_dev=std_devs_trimmed_mean) result.lint_samples.add(stats=[typical_stats]) for feature_stats in numeric_feature_stats: num_stats = feature_stats.num_stats mean_deviance = utils.get_zscore( num_stats.mean, means_trimmed_mean, means_trimmed_std) std_dev_deviance = utils.get_zscore( num_stats.std_dev, std_devs_trimmed_mean, std_devs_trimmed_std) warnings = [] if mean_deviance > self._max_deviance: warnings.append('mean') if std_dev_deviance > self._max_deviance: warnings.append('std_dev') if warnings: result.warnings.append( self.WARNING_FMT.format(feature_stats.name, ','.join(warnings))) result.lint_samples.add(stats=[lint_result_pb2.Statistics( id=feature_stats.name, mean=num_stats.mean, std_dev=num_stats.std_dev, min=num_stats.min, max=num_stats.max)]) return result class UniqueValueCountsDetector(LintDetector): @property def _counted_features(self): raise NotImplementedError() @property def _count_transformer(self): raise NotImplementedError() def _check_feature(self, feature_w_val_counts): raise NotImplementedError() def should_run(self): return bool(self._counted_features) def _flatten_feature_vals(self, feature_vals): feature, vals = feature_vals return [(feature, v) for v in vals] def _shift_key(self, feature_val_w_counts): (feature, val), counts = feature_val_w_counts return feature, (val, counts) def _val_counts_as_dict(self, feature_val_counts): feature, val_counts = feature_val_counts return feature, dict(val_counts) def _to_result(self, warning_samples): if warning_samples: warnings, samples = zip(*warning_samples) return self._make_result(warnings=warnings, lint_samples=samples) def _lint(self, examples): feature_val_w_counts = ( examples | 'Tuplize' >> beam.FlatMap( utils.example_tuplizer(self._counted_features)) | 'FlattenFeatureVals' >> beam.FlatMap(self._flatten_feature_vals) | 'CountFeatureVals' >> beam.combiners.Count.PerElement()) if hasattr(self, '_count_transformer'): feature_val_w_counts |= 'TransformCounts' >> self._count_transformer return ( feature_val_w_counts | 'PairValWithCount' >> beam.Map(self._shift_key) | 'GroupByFeature' >> beam.GroupByKey() | 'ValCountsToDict' >> beam.Map(self._val_counts_as_dict) | 'GenResults' >> beam.Map(self._check_feature) | 'DropUnwarned' >> beam.Filter(bool) | 'AsList' >> beam.combiners.ToList() | 'ToResult' >> beam.Map(self._to_result)) class EnumDetector(UniqueValueCountsDetector): N_LINT_SAMPLES = 4 def __init__(self, stats, enum_threshold=20, ignore_strings=True): super(EnumDetector, self).__init__() self._stats = stats self._enum_threshold = enum_threshold self._ignore_strings = ignore_strings self._numeric_features = utils.get_numeric_features(self._stats) @property def _counted_features(self): checked_features = self._numeric_features if not self._ignore_strings: checked_features.update(utils.get_string_features(self._stats)) return checked_features def _check_feature(self, feature_w_val_counts): feature, counts = feature_w_val_counts if len(counts) >= self._enum_threshold: return None samp_vals = itertools.islice(iter(counts), self.N_LINT_SAMPLES) if feature not in self._numeric_features: samp_strs = [six.text_type(s).encode('utf8') for s in samp_vals] samples = lint_result_pb2.LintSample(strings=samp_strs) else: samples = lint_result_pb2.LintSample(nums=samp_vals) return feature, samples class IntAsFloatDetector(UniqueValueCountsDetector): def __init__(self, stats, int_threshold=0.95): super(IntAsFloatDetector, self).__init__() self._stats = stats self._int_threshold = int_threshold @property def _counted_features(self): return utils.get_float_features(self._stats) @property def _count_transformer(self): return ( 'DropNaN' >> beam.Filter(lambda (f_v, _): not np.isnan(f_v[1])) | 'IsIntegral' >> beam.Map( lambda (f_v, c): ((f_v[0], f_v[1] % 1 == 0), c)) | 'Count' >> beam.CombinePerKey(sum)) def _check_feature(self, feature_w_intp_counts): feature, intp_counts = feature_w_intp_counts num_present = sum(six.itervalues(intp_counts)) int_count = intp_counts.get(True, 0) if int_count / num_present >= self._int_threshold: sample = lint_result_pb2.LintSample(nums=[num_present, int_count]) return feature, sample return None class UncommonSignDetector(UniqueValueCountsDetector): _SIGN_TO_STR = {1: 'positive', -1: 'negative', 0: 'zero'} def __init__(self, stats, domain_freq_threshold=0.05): super(UncommonSignDetector, self).__init__() self._stats = stats self._domain_freq_threshold = domain_freq_threshold @property def _counted_features(self): return utils.get_numeric_features(self._stats) @property def _count_transformer(self): return ( 'ToSigns' >> beam.Map( lambda (f_v, _): (f_v[0], np.sign(f_v[1]))) | 'CountSigns' >> beam.combiners.Count.PerElement()) def _check_feature(self, feature_sign_counts): feature_name, sign_counts = feature_sign_counts num_stats = next(stats for stats in self._stats.features if stats.name == feature_name).num_stats n_unique = sum(six.itervalues(sign_counts)) uncommon_sign_counts = {} for sign, count in six.iteritems(sign_counts): if sign == 0: count = num_stats.num_zeros elif sign == float('nan'): common_stats = num_stats.common_stats count = common_stats.tot_num_values - common_stats.num_non_missing sign_freq = count / n_unique if sign_freq > 0 and sign_freq <= self._domain_freq_threshold: uncommon_sign_counts[sign] = count if uncommon_sign_counts: sample = lint_result_pb2.LintSample(nums=[n_unique]) for sign, count in six.iteritems(uncommon_sign_counts): sample.strings.append(self._SIGN_TO_STR.get(sign, str(sign))) sample.nums.append(count) return feature_name, sample return None class DuplicateExampleDetector(LintDetector): N_LINT_SAMPLES = 10 def __init__(self, stats): super(DuplicateExampleDetector, self).__init__() self._stats = stats def _to_result(self, _, n_duplicates, samples): warning = [str(n_duplicates)] if n_duplicates else [] return self._make_result(warnings=warning, lint_samples=[samples])
Apache License 2.0
diofant/diofant
diofant/core/mul.py
_unevaluated_Mul
python
def _unevaluated_Mul(*args): args = list(args) newargs = [] ncargs = [] co = S.One while args: a = args.pop() if a.is_Mul: c, nc = a.args_cnc() args.extend(c) if nc: ncargs.append(Mul._from_args(nc)) elif a.is_Number: co *= a else: newargs.append(a) newargs.sort(key=default_sort_key) if co is not S.One: newargs.insert(0, co) if ncargs: newargs.append(Mul._from_args(ncargs)) return Mul._from_args(newargs)
Return a well-formed unevaluated Mul: Numbers are collected and put in slot 0, any arguments that are Muls will be flattened, and args are sorted. Use this when args have changed but you still want to return an unevaluated Mul. Examples ======== >>> a = _unevaluated_Mul(*[Float(3.0), x, Integer(2)]) >>> a.args[0] 6.00000000000000 >>> a.args[1] x Two unevaluated Muls with the same arguments will always compare as equal during testing: >>> m = _unevaluated_Mul(sqrt(2), sqrt(3)) >>> m == _unevaluated_Mul(sqrt(3), sqrt(2)) True >>> u = Mul(sqrt(3), sqrt(2), evaluate=False) >>> m == _unevaluated_Mul(u) True >>> m == Mul(*m.args) False
https://github.com/diofant/diofant/blob/05c50552b0e0533f1dbf2ec05e65b6c45b7e2c11/diofant/core/mul.py#L27-L75
from collections import defaultdict from ..utilities import default_sort_key from .basic import Basic from .cache import cacheit from .logic import _fuzzy_group, fuzzy_and from .operations import AssocOp from .singleton import S from .sympify import sympify class NC_Marker: is_Order = False is_Mul = False is_Number = False is_Poly = False is_commutative = False
BSD 3-Clause New or Revised License
google-research/federated
reconstruction/movielens/movielens_dataset.py
create_user_split_np_arrays
python
def create_user_split_np_arrays( ratings_df: pd.DataFrame, max_examples_per_user: Optional[int] = None, train_fraction: float = 0.8, val_fraction: float = 0.1, ) -> Tuple[ServerDataArray, ServerDataArray, ServerDataArray]: num_users = len(set(ratings_df.UserID)) all_user_examples = [] for user_id in range(num_users): all_user_examples.append( get_user_examples(ratings_df, user_id, max_examples_per_user)) np.random.seed(NP_RANDOM_SEED) np.random.shuffle(all_user_examples) train_idx = int(len(all_user_examples) * train_fraction) val_idx = int(len(all_user_examples) * (train_fraction + val_fraction)) train_user_examples = all_user_examples[:train_idx] val_user_examples = all_user_examples[train_idx:val_idx] test_user_examples = all_user_examples[val_idx:] def get_users_movies_ratings(user_examples): users = [] movies = [] ratings = [] for user in user_examples: for example in user: users.append(example[0]) movies.append(example[1]) ratings.append(example[2]) users = np.array(users, dtype=np.int64) movies = np.array(movies, dtype=np.int64) ratings = np.array(ratings, dtype=np.float32) users = np.reshape(users, [np.shape(users)[0], 1]) movies = np.reshape(movies, [np.shape(movies)[0], 1]) ratings = np.reshape(ratings, [np.shape(ratings)[0], 1]) return (users, movies, ratings) train_users_movies_ratings = get_users_movies_ratings(train_user_examples) val_users_movies_ratings = get_users_movies_ratings(val_user_examples) test_users_movies_ratings = get_users_movies_ratings(test_user_examples) return (train_users_movies_ratings, val_users_movies_ratings, test_users_movies_ratings)
Creates arrays for train/val/test user data for server-side evaluation. Loads a server-side version of the MovieLens dataset that contains ratings from users partitioned into train/val/test populations. Note that unlike `create_tf_datasets` and `create_tf_dataset_for_user`, the output data does not generate batches (batching can be later applied during the call to `model.fit`). This produces datasets for user with server-side Keras where the split is by users, so train/val/test sets contain disjoint users. For standard server evaluation, this is expected to perform less well since the user embeddings for users seen at val/test time will not be trained. If splitting within users (so each user's data is split into train/val/test) is desired, see `split_ratings_df` and `create_merged_np_arrays`. For each of train/val/test, produces (merged_users, merged_movies, merged_ratings), which can be used for training of a model produced by `get_matrix_factorization_model()` from models.py by calling `model.fit([merged_users, merged_movies], merged_ratings, ...)`. Args: ratings_df: a pandas DataFrame with ratings data, as returned by `load_movielens_data` or `split_ratings_df` (if using train/test split data is desired). max_examples_per_user: if not None, limit the number of rating examples for each user to this many examples. train_fraction: the approximate fraction of users to allocate to training data. val_fraction: the approximate fraction of users to allocate to val data. Remaining users not allocated to train and val (if any) are allocated to test. Returns: A 3-tuple of 3-tuples: ((train_users, train_movies, train_ratings), (val_users, val_movies, val_ratings), (test_users, test_movies, test_ratings)). For each of train/val/test, the tuple contains: users: a np.ndarray with type np.int64 and shape (num_examples, 1) containing UserIDs. The order of entries corresponds to the order of the examples: users[i] is the UserID for example i. movies: a np.ndarray with type np.int64 and shape (num_examples, 1) containing MovieIDs. The order of entries corresponds to the order of the examples: movies[i] is the MovieID for example i. ratings: a np.ndarray with type np.float32 and shape (num_examples, 1) containing Ratings. The order of entries corresponds to the order of the examples: ratings[i] is the Rating for example i.
https://github.com/google-research/federated/blob/909953fa8945cfac01328e0a6d878e1dc0376c3c/reconstruction/movielens/movielens_dataset.py#L467-L569
import collections import io import os from typing import Any, List, Optional, Tuple import zipfile import numpy as np import pandas as pd import requests import tensorflow as tf MOVIELENS_1M_URL = "http://files.grouplens.org/datasets/movielens/ml-1m.zip" DEFAULT_DATA_DIRECTORY = "/tmp" NP_RANDOM_SEED = 42 ServerDataArray = Tuple[np.ndarray, np.ndarray, np.ndarray] def download_and_extract_data(url: str = MOVIELENS_1M_URL, data_directory: str = DEFAULT_DATA_DIRECTORY): r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(path=data_directory) def load_movielens_data( data_directory: str = DEFAULT_DATA_DIRECTORY, normalize_ratings: bool = True ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: ratings_df = pd.read_csv( os.path.join(data_directory, "ml-1m", "ratings.dat"), sep="::", names=["UserID", "MovieID", "Rating", "Timestamp"]) movies_df = pd.read_csv( os.path.join(data_directory, "ml-1m", "movies.dat"), sep="::", names=["MovieID", "Title", "Genres"]) users_df = pd.read_csv( os.path.join(data_directory, "ml-1m", "users.dat"), sep="::", names=["UserID", "Gender", "Age", "Occupation", "Zip-code"]) movie_mapping = { old_movie: new_movie for new_movie, old_movie in enumerate( ratings_df.MovieID.astype("category").cat.categories) } user_mapping = { old_user: new_user for new_user, old_user in enumerate( ratings_df.UserID.astype("category").cat.categories) } ratings_df.MovieID = ratings_df.MovieID.map(movie_mapping) ratings_df.UserID = ratings_df.UserID.map(user_mapping) movies_df.MovieID = movies_df.MovieID.map(movie_mapping) users_df.UserID = users_df.UserID.map(user_mapping) movies_df = movies_df[pd.notnull(movies_df.MovieID)] users_df = users_df[pd.notnull(users_df.UserID)] if normalize_ratings: ratings_df.Rating = (ratings_df.Rating - 3) / 2 return ratings_df, movies_df, users_df def split_ratings_df( ratings_df: pd.DataFrame, train_fraction: float = 0.8, val_fraction: float = 0.1 ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: if train_fraction + val_fraction > 1: raise ValueError( "train_fraction and val_fraction can't sum to greater than 1, got {}" "and {}.".format(train_fraction, val_fraction)) ranks_per_user = ratings_df.groupby("UserID")["Timestamp"].rank( method="first") counts_per_user = ratings_df["UserID"].map( ratings_df.groupby("UserID")["Timestamp"].apply(len)) normalized_ranks_per_user = ranks_per_user / counts_per_user train_mask = normalized_ranks_per_user <= train_fraction val_mask = ((normalized_ranks_per_user <= (train_fraction + val_fraction)) & ~train_mask) test_mask = ~train_mask & ~val_mask train_ratings_df = ratings_df[train_mask] val_ratings_df = ratings_df[val_mask] test_ratings_df = ratings_df[test_mask] return train_ratings_df, val_ratings_df, test_ratings_df def get_user_examples(ratings_df: pd.DataFrame, user_id: int, max_examples_per_user: Optional[int] = None) -> List[Any]: user_subset = ratings_df[ratings_df.UserID == user_id] user_examples = [(user_subset.UserID.iloc[i], user_subset.MovieID.iloc[i], user_subset.Rating.iloc[i]) for i in range(user_subset.shape[0])] np.random.seed(NP_RANDOM_SEED) np.random.shuffle(user_examples) if max_examples_per_user is not None: user_examples = user_examples[:max_examples_per_user] return user_examples def create_tf_dataset_for_user(ratings_df: pd.DataFrame, user_id: int, personal_model: bool = False, batch_size: int = 1, max_examples_per_user: Optional[int] = None, num_local_epochs: int = 1) -> tf.data.Dataset: def rating_batch_map_fn(rating_batch): if personal_model: return collections.OrderedDict([ ("x", tf.cast(rating_batch[:, 1:2], tf.int64)), ("y", tf.cast(rating_batch[:, 2:3], tf.float32)) ]) return collections.OrderedDict([ ("x", (tf.cast(rating_batch[:, 0:1], tf.int64), tf.cast(rating_batch[:, 1:2], tf.int64))), ("y", tf.cast(rating_batch[:, 2:3], tf.float32)) ]) user_examples = get_user_examples(ratings_df, user_id, max_examples_per_user) tf_dataset = tf.data.Dataset.from_tensor_slices(user_examples) return tf_dataset.batch(batch_size).map( rating_batch_map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE).repeat(num_local_epochs) def create_tf_datasets(ratings_df: pd.DataFrame, personal_model: bool = False, batch_size: int = 1, max_examples_per_user: Optional[int] = None, num_local_epochs: int = 1) -> List[tf.data.Dataset]: num_users = len(set(ratings_df.UserID)) tf_datasets = [ create_tf_dataset_for_user(ratings_df, i, personal_model, batch_size, max_examples_per_user, num_local_epochs) for i in range(num_users) ] return tf_datasets def split_tf_datasets( tf_datasets: List[tf.data.Dataset], train_fraction: float = 0.8, val_fraction: float = 0.1, ) -> Tuple[List[tf.data.Dataset], List[tf.data.Dataset], List[tf.data.Dataset]]: if train_fraction + val_fraction > 1: raise ValueError( "train_fraction and val_fraction can't sum to greater than 1, got {}" "and {}.".format(train_fraction, val_fraction)) np.random.seed(NP_RANDOM_SEED) np.random.shuffle(tf_datasets) train_idx = int(len(tf_datasets) * train_fraction) val_idx = int(len(tf_datasets) * (train_fraction + val_fraction)) return (tf_datasets[:train_idx], tf_datasets[train_idx:val_idx], tf_datasets[val_idx:]) def create_merged_np_arrays( ratings_df: pd.DataFrame, max_examples_per_user: Optional[int] = None, shuffle_across_users: bool = True) -> ServerDataArray: num_users = len(set(ratings_df.UserID)) merged_examples = [] for user_id in range(num_users): user_examples = get_user_examples(ratings_df, user_id, max_examples_per_user) merged_examples += user_examples if shuffle_across_users: np.random.seed(NP_RANDOM_SEED) np.random.shuffle(merged_examples) merged_users = np.array([[x[0]] for x in merged_examples], dtype=np.int64) merged_movies = np.array([[x[1]] for x in merged_examples], dtype=np.int64) merged_ratings = np.array([[x[2]] for x in merged_examples], dtype=np.float32) return merged_users, merged_movies, merged_ratings
Apache License 2.0
adafruit/adafruit_circuitpython_esp32spi
adafruit_esp32spi/PWMOut.py
PWMOut._is_deinited
python
def _is_deinited(self): if self._pwm_pin is None: raise ValueError( "PWMOut Object has been deinitialized and can no longer " "be used. Create a new PWMOut object." )
Checks if PWMOut object has been previously de-initalized
https://github.com/adafruit/adafruit_circuitpython_esp32spi/blob/7b048134c49c6fb45b33fab4534114b8a89e4d29/adafruit_esp32spi/PWMOut.py#L53-L59
class PWMOut: ESP32_PWM_PINS = set( [0, 1, 2, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 32, 33] ) def __init__( self, esp, pwm_pin, *, frequency=500, duty_cycle=0, variable_frequency=False ): if pwm_pin in self.ESP32_PWM_PINS: self._pwm_pin = pwm_pin else: raise AttributeError("Pin %d is not a valid ESP32 GPIO Pin." % pwm_pin) self._esp = esp self._duty_cycle = duty_cycle self._freq = frequency self._var_freq = variable_frequency def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): self.deinit() def deinit(self): self._duty_cycle = 0 self._freq = 0 self._pwm_pin = None
MIT License
wrr/wwwhisper
wwwhisper_auth/models.py
User.login_successful
python
def login_successful(self): return
Must be called after successful login.
https://github.com/wrr/wwwhisper/blob/38a55dd9c828fbb1b5a8234ea3ddf2242e684983/wwwhisper_auth/models.py#L239-L243
from django.contrib.auth.models import AbstractBaseUser from django.db import connection from django.db import models from django.db import IntegrityError from django.forms import ValidationError from django.utils import timezone from functools import wraps from wwwhisper_auth import url_utils from wwwhisper_auth import email_re import logging import random import re import threading import uuid as uuidgen logger = logging.getLogger(__name__) class LimitExceeded(Exception): pass class ValidatedModel(models.Model): class Meta: abstract = True app_label = 'wwwhisper_auth' def save(self, *args, **kwargs): self.full_clean() return super(ValidatedModel, self).save(*args, **kwargs) SINGLE_SITE_ID = 'theone' class Site(ValidatedModel): site_id = models.TextField(primary_key=True, db_index=True, editable=False) mod_id = models.IntegerField(default=0) _default_skin = { 'title': 'wwwhisper: Web Access Control', 'header': 'Protected site', 'message': 'Access to this site is restricted, please verify your email:' } title = models.CharField(max_length=80, blank=True) header = models.CharField(max_length=100, blank=True) message = models.CharField(max_length=500, blank=True) branding = models.BooleanField(default=True) aliases_limit = None users_limit = None locations_limit = None def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.mod_id_lock = threading.Lock() def heavy_init(self): self.locations = LocationsCollection(self) self.users = UsersCollection(self) self.aliases = AliasesCollection(self) def site_modified(self): cursor = connection.cursor() cursor.execute( 'UPDATE wwwhisper_auth_site ' 'SET mod_id = mod_id + 1 WHERE site_id = %s', [self.site_id]) cursor.close() mod_id = self.mod_id_from_db() with self.mod_id_lock: self.mod_id = mod_id def skin(self): result = dict([(attr, getattr(self, attr) or self._default_skin[attr]) for attr in self._default_skin.iterkeys()]) result['branding'] = self.branding return result def update_skin(self, title, header, message, branding): for attr in self._default_skin.iterkeys(): arg = locals()[attr].strip() if arg == self._default_skin[attr]: arg = '' setattr(self, attr, arg) self.branding = branding self.save() self.site_modified() def get_mod_id_ts(self): with self.mod_id_lock: return self.mod_id def mod_id_from_db(self): cursor = connection.cursor() cursor.execute( 'SELECT mod_id FROM wwwhisper_auth_site WHERE site_id = %s', [self.site_id]) row = cursor.fetchone() cursor.close() if row is None: return None return row[0] def modify_site(decorated_method): @wraps(decorated_method) def wrapper(self, *args, **kwargs): result = decorated_method(self, *args, **kwargs) self.site.site_modified() return result return wrapper class SitesCollection(object): def create_item(self, site_id, **kwargs): site = Site.objects.create(site_id=site_id, **kwargs) site.heavy_init() return site def find_item(self, site_id): site = _find(Site, site_id=site_id) if site is not None: site.heavy_init() return site def delete_item(self, site_id): site = self.find_item(site_id) if site is None: return False site.delete() return True class User(AbstractBaseUser): class Meta: app_label = 'wwwhisper_auth' unique_together = ('site', 'email') site = models.ForeignKey(Site, related_name='+') uuid = models.CharField(max_length=36, db_index=True, editable=False, unique=True) email = models.EmailField(db_index=True) USERNAME_FIELD = 'uuid' REQUIRED_FIELDS = ['email', 'site'] def attributes_dict(self, site_url): return _add_common_attributes(self, site_url, {'email': self.email}) @models.permalink def get_absolute_url(self): return ('wwwhisper_user', (), {'uuid' : self.uuid}) @modify_site
MIT License
pmatigakis/huginn
huginn/rest.py
FlightControlsResource.get
python
def get(self): flight_controls_data = { "aileron": self.controls.aileron, "elevator": self.controls.elevator, "rudder": self.controls.rudder, "throttle": self.controls.throttle, } return flight_controls_data
returns the flight controls values
https://github.com/pmatigakis/huginn/blob/a35fec1df844eec05c7ab97a7c70c750e43a9f08/huginn/rest.py#L273-L282
from logging import getLogger from flask import request from flask_restful import Resource, reqparse, marshal_with, abort from tinydb import Query from huginn.schemas import (AccelerationsSchema, VelocitiesSchema, OrientationSchema, AtmosphereShema, ForcesSchema, InitialConditionSchema, PositionSchema, AirspeedIndicatorSchema, AltimeterSchema, AttitudeIndicatorSchema, HeadingIndicatorSchema, VerticalSpeedIndicatorSchema) from huginn.fdm import (Accelerations, Velocities, Orientation, Atmosphere, Forces, InitialCondition, Position) from huginn import request_models from huginn import request_parsers logger = getLogger(__name__) class FDMResource(Resource): def __init__(self, fdm, aircraft): self.fdm = fdm self.aircraft = aircraft def get(self): sensors = self.aircraft.sensors flight_data = { "time": self.fdm.fdmexec.GetSimTime(), "dt": self.fdm.fdmexec.GetDeltaT(), "latitude": self.fdm.position.latitude, "longitude": self.fdm.position.longitude, "altitude": self.fdm.position.altitude, "airspeed": self.fdm.velocities.true_airspeed, "heading": self.fdm.orientation.psi, "x_acceleration": self.fdm.accelerations.x, "y_acceleration": self.fdm.accelerations.y, "z_acceleration": self.fdm.accelerations.z, "roll_rate": self.fdm.velocities.p, "pitch_rate": self.fdm.velocities.q, "yaw_rate": self.fdm.velocities.r, "temperature": self.fdm.atmosphere.temperature, "static_pressure": self.fdm.atmosphere.pressure, "total_pressure": sensors.pitot_tube.true_pressure, "roll": self.fdm.orientation.phi, "pitch": self.fdm.orientation.theta, "climb_rate": self.fdm.velocities.climb_rate } return flight_data class AircraftResource(Resource): def __init__(self, aircraft): self.aircraft = aircraft def get(self): return {"type": self.aircraft.type} class GPSResource(Resource): def __init__(self, gps): self.gps = gps def get(self): gps_data = { "latitude": self.gps.latitude, "longitude": self.gps.longitude, "altitude": self.gps.altitude, "airspeed": self.gps.airspeed, "heading": self.gps.heading } return gps_data class AccelerometerResource(Resource): def __init__(self, accelerometer): self.accelerometer = accelerometer def get(self): accelerometer_data = { "x": self.accelerometer.x, "y": self.accelerometer.y, "z": self.accelerometer.z, } return accelerometer_data class GyroscopeResource(Resource): def __init__(self, gyroscope): self.gyroscope = gyroscope def get(self): gyroscope_data = { "roll_rate": self.gyroscope.roll_rate, "pitch_rate": self.gyroscope.pitch_rate, "yaw_rate": self.gyroscope.yaw_rate } return gyroscope_data class ThermometerResource(Resource): def __init__(self, thermometer): self.thermometer = thermometer def get(self): thermometer_data = { "temperature": self.thermometer.temperature, } return thermometer_data class PressureSensorResource(Resource): def __init__(self, pressure_sensor): self.pressure_sensor = pressure_sensor def get(self): pressure_sensor_data = { "static_pressure": self.pressure_sensor.pressure, } return pressure_sensor_data class PitotTubeResource(Resource): def __init__(self, pitot_tube): self.pitot_tube = pitot_tube def get(self): pitot_tube_data = { "total_pressure": self.pitot_tube.pressure, } return pitot_tube_data class InertialNavigationSystemResource(Resource): def __init__(self, inertial_navigation_system): self.inertial_navigation_system = inertial_navigation_system def get(self): inertial_navigation_system_data = { "latitude": self.inertial_navigation_system.latitude, "longitude": self.inertial_navigation_system.longitude, "altitude": self.inertial_navigation_system.altitude, "airspeed": self.inertial_navigation_system.airspeed, "heading": self.inertial_navigation_system.heading, "roll": self.inertial_navigation_system.roll, "pitch": self.inertial_navigation_system.pitch, } return inertial_navigation_system_data class EngineResource(Resource): def __init__(self, engine): self.engine = engine def get(self): engine_data = { "thrust": self.engine.thrust, "throttle": self.engine.throttle, } return engine_data class FlightControlsResource(Resource): def __init__(self, controls): self.controls = controls
BSD 3-Clause New or Revised License
antoineco/kovhernetes
kovh/userdata.py
UserData.gen_kubelet_unit
python
def gen_kubelet_unit(self, roles): labels = ("node-role.kubernetes.io/{}=''".format(r) for r in roles) self.add_sunits([ { 'name': 'kubelet.service', 'enable': True, 'contents': ( files['kubelet'].decode() .replace('__IMAGE_TAG__', 'v{}'.format(self.k8s_ver)) .replace('__NODE_LABELS__', ','.join(labels))) } ])
Generate kubelet service unit
https://github.com/antoineco/kovhernetes/blob/bb8a7fefede33e24c9946633ce6e17a6bdcaff77/kovh/userdata.py#L141-L155
from gzip import compress from urllib.parse import quote from pkg_resources import resource_string from json import loads, dumps from collections import OrderedDict def res_plain(resource): return resource_string(__name__, resource) def res_gzip(resource): return compress(res_plain(resource)) files = { 'coremeta' : res_plain('data/systemd/coreos-metadata.service.d/10-provider.conf'), 'coremetassh' : res_plain('data/systemd/[email protected]/10-provider.conf'), 'kubelet' : res_plain('data/systemd/kubelet.service'), 'etcd' : res_plain('data/systemd/etcd-member.service.d/10-daemon.conf'), 'docker' : res_plain('data/systemd/docker.service.d/10-daemon.conf'), 'apiserver' : res_plain('data/k8s/manifests/kube-apiserver.json'), 'proxy' : res_plain('data/k8s/manifests/kube-proxy.json'), 'controller-manager' : res_plain('data/k8s/manifests/kube-controller-manager.json'), 'scheduler' : res_plain('data/k8s/manifests/kube-scheduler.json'), 'addon-manager' : res_gzip('data/k8s/manifests/kube-addon-manager.yml'), 'kubelet-config' : res_gzip('data/k8s/kubeletconfig.json'), 'proxy-config' : res_gzip('data/k8s/kubeproxyconfig.json'), 'controller-manager-config' : res_gzip('data/k8s/kubecontrollermanagerconfig.json'), 'scheduler-config' : res_gzip('data/k8s/kubeschedulerconfig.json'), 'kubedns' : res_gzip('data/k8s/addons/kubedns.yml'), 'flannel' : res_gzip('data/k8s/addons/flannel.yml'), 'kubeconfig' : res_plain('data/k8s/kubeconfig.json') } class UserData: def __init__(self, k8s_ver='1.12.2'): self.k8s_ver = k8s_ver self.data = { 'ignition': { 'version': '2.1.0' }, 'storage': {}, 'systemd': {} } def add_files(self, definition): if not 'files' in self.data['storage']: self.data['storage']['files'] = [] if isinstance(definition, list): self.data['storage']['files'].extend(definition) else: raise TypeError("'definition must be a list, not '{}'".format(type(definition))) def add_sunits(self, definition): if not 'units' in self.data['systemd']: self.data['systemd']['units'] = [] if isinstance(definition, list): self.data['systemd']['units'].extend(definition) else: raise TypeError("'definition must be a list, not '{}'".format(type(definition))) def configure_clinux_core(self): self.add_sunits([ { 'name': 'coreos-metadata.service', 'dropins': [{ 'name': '10-provider.conf', 'contents': files['coremeta'].decode() }] }, { 'name': '[email protected]', 'enable': True, 'dropins': [{ 'name': '10-provider.conf', 'contents': files['coremetassh'].decode() }] }, { 'name': 'locksmithd.service', 'mask': True } ]) def gen_kubeconfig(self, component, server='localhost'): kubeconfig = loads(files['kubeconfig'].decode(), object_pairs_hook=OrderedDict) kubeconfig['users'][0]['user']['client-certificate'] = 'tls/client/{}.crt'.format(component) kubeconfig['clusters'][0]['cluster']['server'] = 'https://' + server + ':6443' kubeconfig = compress((dumps(kubeconfig, indent=2) + '\n').encode()) self.add_files([ { 'filesystem': 'root', 'path': '/etc/kubernetes/kubeconfig-' + component + '.gz', 'mode': 416, 'contents': { 'source': 'data:,' + quote(kubeconfig) } } ]) def gen_kubemanifest(self, component, tag): manifest = loads(files[component].decode(), object_pairs_hook=OrderedDict) manifest['spec']['containers'][0]['image'] = 'k8s.gcr.io/hyperkube:v{}'.format(self.k8s_ver) manifest = compress((dumps(manifest, indent=2) + '\n').encode()) self.add_files([ { 'filesystem': 'root', 'path': '/etc/kubernetes/manifests/kube-{}.json'.format(component) + '.gz', 'mode': 416, 'contents': { 'source': 'data:,' + quote(manifest) } } ])
Apache License 2.0
crm416/semantic
semantic/dates.py
DateService.extractDates
python
def extractDates(self, inp): def merge(param): day, time = param if not (day or time): return None if not day: return time if not time: return day return datetime.datetime( day.year, day.month, day.day, time.hour, time.minute ) days = self.extractDays(inp) times = self.extractTimes(inp) return map(merge, zip_longest(days, times, fillvalue=None))
Extract semantic date information from an input string. In effect, runs both parseDay and parseTime on the input string and merges the results to produce a comprehensive datetime object. Args: inp (str): Input string to be parsed. Returns: A list of datetime objects containing the extracted dates from the input snippet, or an empty list if not found.
https://github.com/crm416/semantic/blob/46deb8fefb3ea58aad2fedc8d0d62f3ee254b8fe/semantic/dates.py#L349-L378
import re import datetime try: from itertools import zip_longest except: from itertools import izip_longest as zip_longest from .numbers import NumberService class DateService(object): def __init__(self, tz=None, now=None): self.tz = tz if now: self.now = now else: self.now = datetime.datetime.now(tz=self.tz) __months__ = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] __shortMonths__ = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sept', 'oct', 'nov', 'dec'] __daysOfWeek__ = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] __relativeDates__ = ['tomorrow', 'tonight', 'next'] __todayMatches__ = ['tonight', 'today', 'this morning', 'this evening', 'this afternoon'] __tomorrowMatches__ = ['tomorrow', 'next morning', 'next evening', 'next afternoon'] __dateDescriptors__ = { 'one': 1, 'first': 1, 'two': 2, 'second': 2, 'three': 3, 'third': 3, 'four': 4, 'fourth': 4, 'five': 5, 'fifth': 5, 'six': 6, 'sixth': 6, 'seven': 7, 'seventh': 7, 'eight': 8, 'eighth': 8, 'nine': 9, 'ninth': 9, 'ten': 10, 'tenth': 10, 'eleven': 11, 'eleventh': 11, 'twelve': 12, 'twelth': 12, 'thirteen': 13, 'thirteenth': 13, 'fourteen': 14, 'fourteenth': 14, 'fifteen': 15, 'fifteenth': 15, 'sixteen': 16, 'sixteenth': 16, 'seventeen': 17, 'seventeenth': 17, 'eighteen': 18, 'eighteenth': 18, 'nineteen': 19, 'nineteenth': 19, 'twenty': 20, 'twentieth': 20, 'twenty one': 21, 'twenty first': 21, 'twenty two': 22, 'twenty second': 22, 'twenty three': 23, 'twenty third': 23, 'twenty four': 24, 'twenty fourth': 24, 'twenty five': 25, 'twenty fifth': 25, 'twenty six': 26, 'twenty sixth': 26, 'twenty seven': 27, 'twenty seventh': 27, 'twenty eight': 28, 'twenty eighth': 28, 'twenty nine': 29, 'twenty ninth': 29, 'thirty': 30, 'thirtieth': 30, 'thirty one': 31, 'thirty first': 31 } _dayRegex = re.compile( r"""(?ix) ((week|day)s?\ from\ )? ( tomorrow |tonight |today |(next|this)[\ \b](morning|afternoon|evening|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday) |(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday) |(Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|June?|July?|Aug(?:ust)?|Sept(?:ember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\ (\w+)((\s|\-)?\w*) ) """) _timeRegex = re.compile( r"""(?ix) .*? ( morning |afternoon |evening |(\d{1,2}\:\d{2})\ ?(am|pm)? |in\ (.+?)\ (hours|minutes)(\ (?:and\ )?(.+?)\ (hours|minutes))? ) .*?""") def _preprocess(self, inp): return inp.replace('-', ' ').lower() def extractDays(self, inp): inp = self._preprocess(inp) def extractDayOfWeek(dayMatch): if dayMatch.group(5) in self.__daysOfWeek__: return self.__daysOfWeek__.index(dayMatch.group(5)) elif dayMatch.group(6) in self.__daysOfWeek__: return self.__daysOfWeek__.index(dayMatch.group(6)) def extractMonth(dayMatch): if dayMatch.group(7) in self.__months__: return self.__months__.index(dayMatch.group(7)) + 1 elif dayMatch.group(7) in self.__shortMonths__: return self.__shortMonths__.index(dayMatch.group(7)) + 1 def extractDay(dayMatch): combined = dayMatch.group(8) + dayMatch.group(9) if combined in self.__dateDescriptors__: return self.__dateDescriptors__[combined] elif dayMatch.group(8) in self.__dateDescriptors__: return self.__dateDescriptors__[dayMatch.group(8)] elif int(dayMatch.group(8)) in self.__dateDescriptors__.values(): return int(dayMatch.group(8)) def extractDaysFrom(dayMatch): if not dayMatch.group(1): return 0 def numericalPrefix(dayMatch): prefix = inp.split(dayMatch.group(1))[0].strip().split(' ') prefix.reverse() prefix = list(filter(lambda s: s != 'and', prefix)) service = NumberService() num = prefix[0] if service.isValid(num): for n in prefix[1:]: inc = n + " " + num if service.isValid(inc): num = inc else: break return service.parse(num) return 1 factor = numericalPrefix(dayMatch) if dayMatch.group(2) == 'week': return factor * 7 elif dayMatch.group(2) == 'day': return factor * 1 def handleMatch(dayMatch): def safe(exp): try: return exp() except: return False days_from = safe(lambda: extractDaysFrom(dayMatch)) today = safe(lambda: dayMatch.group(3) in self.__todayMatches__) tomorrow = safe(lambda: dayMatch.group(3) in self.__tomorrowMatches__) next_week = safe(lambda: dayMatch.group(4) == 'next') day_of_week = safe(lambda: extractDayOfWeek(dayMatch)) month = safe(lambda: extractMonth(dayMatch)) day = safe(lambda: extractDay(dayMatch)) if not dayMatch: return None elif today: d = self.now elif tomorrow: d = self.now + datetime.timedelta(days=1) elif type(day_of_week) == int: current_day_of_week = self.now.weekday() num_days_away = (day_of_week - current_day_of_week) % 7 if next_week: num_days_away += 7 d = self.now + datetime.timedelta(days=num_days_away) elif month and day: d = datetime.datetime( self.now.year, month, day, self.now.hour, self.now.minute) if days_from: d += datetime.timedelta(days=days_from) return d matches = self._dayRegex.finditer(inp) return [handleMatch(dayMatch) for dayMatch in matches] def extractDay(self, inp): day = self.extractDay(inp) if day: return day[0] return None def extractTimes(self, inp): def handleMatch(time): relative = False if not time: return None elif time.group(1) == 'morning': h = 8 m = 0 elif time.group(1) == 'afternoon': h = 12 m = 0 elif time.group(1) == 'evening': h = 19 m = 0 elif time.group(4) and time.group(5): h, m = 0, 0 converter = NumberService() try: diff = converter.parse(time.group(4)) except: return None if time.group(5) == 'hours': h += diff else: m += diff if time.group(6): converter = NumberService() try: diff = converter.parse(time.group(7)) except: return None if time.group(8) == 'hours': h += diff else: m += diff relative = True else: t = time.group(2) h, m = int(t.split(':')[0]) % 12, int(t.split(':')[1]) try: if time.group(3) == 'pm': h += 12 except IndexError: pass if relative: return self.now + datetime.timedelta(hours=h, minutes=m) else: return datetime.datetime( self.now.year, self.now.month, self.now.day, h, m ) inp = self._preprocess(inp) return [handleMatch(time) for time in self._timeRegex.finditer(inp)] def extractTime(self, inp): times = self.extractTimes(inp) if times: return times[0] return None
MIT License
rjt1990/pyflux
pyflux/ssm/ndynlin.py
NDynReg._ss_matrices
python
def _ss_matrices(self,beta): T = np.identity(self.state_no) Z = self.X R = np.identity(self.state_no) Q = np.identity(self.state_no) for i in range(0,self.state_no): Q[i][i] = self.latent_variables.z_list[i].prior.transform(beta[i]) return T, Z, R, Q
Creates the state space matrices required Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- T, Z, R, Q : np.array State space matrices used in KFS algorithm
https://github.com/rjt1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/ndynlin.py#L345-L368
import sys if sys.version_info < (3,): range = xrange import numpy as np import pandas as pd import scipy.stats as ss from scipy import optimize from .. import inference as ifr from .. import families as fam from .. import output as op from .. import tsm as tsm from .. import data_check as dc from .. import covariances as cov from .. import results as res from .. import gas as gas from .kalman import * from .dynlin import * class NDynReg(tsm.TSM): def __init__(self, formula, data, family): super(NDynReg,self).__init__('NDynReg') self.max_lag = 0 self._z_hide = 0 self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"] self.default_method = "MLE" self.multivariate_model = False self.is_pandas = True self.data_original = data self.formula = formula self.y, self.X = dmatrices(formula, data) self.z_no = self.X.shape[1] self.y_name = self.y.design_info.describe() self.data_name = self.y_name self.X_names = self.X.design_info.describe().split(" + ") self.y = np.array([self.y]).ravel() self.data = self.y self.data_length = self.data.shape[0] self.X = np.array([self.X])[0] self.index = data.index self.state_no = self.X.shape[1] self._create_latent_variables() self.family = family self.model_name2, self.link, self.scale, self.shape, self.skewness, self.mean_transform, self.cythonized = self.family.setup() self.model_name = self.model_name2 + " Dynamic Regression Model" for no, i in enumerate(self.family.build_latent_variables()): self.latent_variables.add_z(i[0],i[1],i[2]) self.latent_variables.z_list[no+1].start = i[3] self.family_z_no = len(self.family.build_latent_variables()) self.z_no = len(self.latent_variables.z_list) def _get_scale_and_shape(self, parm): if self.scale is True: if self.shape is True: model_shape = parm[-1] model_scale = parm[-2] else: model_shape = 0 model_scale = parm[-1] else: model_scale = 0 model_shape = 0 if self.skewness is True: model_skewness = parm[-3] else: model_skewness = 0 return model_scale, model_shape, model_skewness def neg_loglik(self, beta): states = np.zeros([self.state_no, self.data_length]) for state_i in range(self.state_no): states[state_i,:] = beta[(self.z_no + (self.data_length*state_i)):(self.z_no + (self.data_length*(state_i+1)))] parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) scale, shape, skewness = self._get_scale_and_shape(parm) return self.state_likelihood(beta, states) + self.family.neg_loglikelihood(self.data, self.link(np.sum(self.X*states.T,axis=1)), scale, shape, skewness) def likelihood_markov_blanket(self, beta): states = np.zeros([self.state_no, self.data_length]) for state_i in range(self.state_no): states[state_i,:] = beta[(self.z_no + (self.data_length*state_i)):(self.z_no + (self.data_length*(state_i+1)))] parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) scale, shape, skewness = self._get_scale_and_shape(parm) return self.family.markov_blanket(self.data, self.link(np.sum(self.X*states.T,axis=1)), scale, shape, skewness) def state_likelihood(self, beta, alpha): _, _, _, Q = self._ss_matrices(beta) state_lik = 0 for i in range(alpha.shape[0]): state_lik += np.sum(ss.norm.logpdf(alpha[i][1:]-alpha[i][:-1],loc=0,scale=np.power(Q[i][i],0.5))) return state_lik def state_likelihood_markov_blanket(self, beta, alpha, col_no): _, _, _, Q = self._ss_matrices(beta) blanket = np.append(0,ss.norm.logpdf(alpha[col_no][1:]-alpha[col_no][:-1],loc=0,scale=np.sqrt(Q[col_no][col_no]))) blanket[:-1] = blanket[:-1] + blanket[1:] return blanket def neg_logposterior(self, beta): post = self.neg_loglik(beta) for k in range(0,self.z_no): post += -self.latent_variables.z_list[k].prior.logpdf(beta[k]) return post def markov_blanket(self, beta, alpha): likelihood_blanket = self.likelihood_markov_blanket(beta) state_blanket = self.state_likelihood_markov_blanket(beta, alpha, 0) for i in range(self.state_no-1): likelihood_blanket = np.append(likelihood_blanket,self.likelihood_markov_blanket(beta)) state_blanket = np.append(state_blanket,self.state_likelihood_markov_blanket(beta,alpha,i+1)) return likelihood_blanket + state_blanket def evo_blanket(self, beta, alpha): evo_blanket = np.zeros(self.state_no) for i in range(evo_blanket.shape[0]): evo_blanket[i] = self.state_likelihood_markov_blanket(beta, alpha, i).sum() if self.family_z_no > 0: evo_blanket = np.append([self.likelihood_markov_blanket(beta).sum()]*(self.family_z_no),evo_blanket) return evo_blanket def log_p_blanket(self, beta): states = np.zeros([self.state_no, self.data.shape[0]]) for state_i in range(self.state_no): states[state_i,:] = beta[(self.z_no + (self.data.shape[0]*state_i)):(self.z_no + (self.data.shape[0]*(state_i+1)))] return np.append(self.evo_blanket(beta,states),self.markov_blanket(beta,states)) def _create_latent_variables(self): for parm in range(self.z_no): self.latent_variables.add_z('Sigma^2 ' + self.X_names[parm], fam.Flat(transform='exp'), fam.Normal(0,3)) def _model(self, data, beta): T, Z, R, Q, H = self._ss_matrices(beta) return nld_univariate_kalman(data, Z, H, T, Q, R, 0.0) def _preoptimize_model(self): gaussian_model = DynReg(formula=self.formula, data=self.data_original) gaussian_model.fit() for i in range(self.z_no-self.family_z_no): self.latent_variables.z_list[i].start = gaussian_model.latent_variables.get_z_values()[i+1] if self.model_name2 == 't': def temp_function(params): return -np.sum(ss.t.logpdf(x=self.data, df=np.exp(params[0]), loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2]))) p = optimize.minimize(temp_function,np.array([2.0,0.0,-1.0]),method='L-BFGS-B') self.latent_variables.z_list[-2].start = p.x[2] self.latent_variables.z_list[-1].start = p.x[0] elif self.model_name2 == 'Skewt': def temp_function(params): return -np.sum(fam.Skewt.logpdf_internal(x=self.data,df=np.exp(params[0]), loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2]),gamma=np.exp(params[3]))) p = optimize.minimize(temp_function,np.array([2.0,0.0,-1.0,0.0]),method='L-BFGS-B') self.latent_variables.z_list[-3].start = p.x[3] self.latent_variables.z_list[-2].start = p.x[2] self.latent_variables.z_list[-1].start = p.x[0] return gaussian_model.latent_variables
BSD 3-Clause New or Revised License
iristyle/chocolateypackages
EthanBrown.SublimeText2.EditorPackages/tools/PackageCache/SmartMarkdown/headline_move.py
HeadlineMoveCommand.run
python
def run(self, edit, forward=True, same_level=True): new_sel = [] if same_level: level_type = headline.MATCH_PARENT else: level_type = headline.MATCH_ANY for region in self.view.sel(): if same_level: _, level = headline.headline_and_level_at_point(self.view, region.a, search_above_and_down=True) if level is None: return else: level = headline.ANY_LEVEL match_region, _ = headline.find_headline(self.view, region.a, level, forward, level_type, skip_headline_at_point=True, skip_folded=True) if is_region_void(match_region): return new_sel.append(sublime.Region(match_region.a, match_region.a)) self.adjust_view(new_sel)
Move between headlines, forward or backward. If same_level is true, only move to headline with the same level or higher level.
https://github.com/iristyle/chocolateypackages/blob/8c9833710577de6db6e8b1db5d9196e19e19d117/EthanBrown.SublimeText2.EditorPackages/tools/PackageCache/SmartMarkdown/headline_move.py#L20-L55
import sublime import sublime_plugin try: from . import headline from .utilities import is_region_void except ValueError: import headline from utilities import is_region_void class HeadlineMoveCommand(sublime_plugin.TextCommand):
MIT License
scikit-hep/uproot4
src/uproot/behaviors/TAxis.py
AxisTraits.discrete
python
def discrete(self): fNbins = self._axis.member("fNbins") fLabels = self._axis.member("fLabels", none_if_missing=True) return fLabels is not None and len(fLabels) == fNbins
True if bins are discrete: if they have string-valued labels.
https://github.com/scikit-hep/uproot4/blob/e0db77a2a10d701cb48f72e9f0d7867e1589572d/src/uproot/behaviors/TAxis.py#L39-L45
from __future__ import absolute_import try: from collections.abc import Sequence except ImportError: from collections import Sequence import numpy class AxisTraits(object): def __init__(self, axis): self._axis = axis def __repr__(self): return "AxisTraits({0})".format(repr(self._axis)) @property def circular(self): return False @property
BSD 3-Clause New or Revised License
michaelhush/m-loop
mloop/utilities.py
_generate_legend_labels
python
def _generate_legend_labels(param_indices, all_param_names): labels = [] for index in param_indices: label = str(index) name = all_param_names[index] if name: label = label + ': {name}'.format(name=name) labels.append(label) return labels
Generate a list of labels for the legend of a plot. This is a helper function for visualization methods, used to generate the labels in legends for plots that show the values for optimization parameters. The label has the parameter's index and, if available, a colon followed by the parameter's name e.g. '3: some_name'. If no name is available, then the label will simply be a string representation of the parameter's index, e.g. '3'. Args: param_indices (list-like of int): The indices of the parameters for which labels should be generated. Generally this should be the same as the list of indices of parameters included in the plot. all_param_names (list-like of str): The names of all parameters from the optimization. Note this this argument should be *all* of the names for all of the parameters, not just the ones to be included in the plot legend. Returns: labels (list of str): The labels generated for use in a plot legend.
https://github.com/michaelhush/m-loop/blob/24e0e67d993b81dcc319d7cc6390c3345036fc67/mloop/utilities.py#L417-L449
from __future__ import absolute_import, division, print_function __metaclass__ = type import scipy.io as si import pickle import logging import datetime import sys import os import numpy as np import numpy.random as nr import base64 import mloop python_version = sys.version_info[0] if python_version < 3: import Queue empty_exception = Queue.Empty else: import queue empty_exception = queue.Empty default_interface_in_filename = 'exp_output' default_interface_out_filename = 'exp_input' default_interface_file_type = 'txt' archive_foldername = './M-LOOP_archives/' log_foldername = './M-LOOP_logs/' default_log_filename = 'M-LOOP' filewrite_wait = 0.1 mloop_path = os.path.dirname(mloop.__file__) np.set_printoptions(threshold=np.inf) def config_logger(**kwargs): _ = _config_logger(**kwargs) def _config_logger(log_filename = default_log_filename, file_log_level=logging.DEBUG, console_log_level=logging.INFO, **kwargs): log = logging.getLogger('mloop') if len(log.handlers) == 0: log.setLevel(min(file_log_level,console_log_level)) if log_filename is not None: filename_suffix = generate_filename_suffix('log') full_filename = log_filename + filename_suffix filename_with_path = os.path.join(log_foldername, full_filename) actual_log_foldername = os.path.dirname(filename_with_path) if not os.path.exists(actual_log_foldername): os.makedirs(actual_log_foldername) fh = logging.FileHandler(filename_with_path) fh.setLevel(file_log_level) fh.setFormatter(logging.Formatter('%(asctime)s %(name)-20s %(levelname)-8s %(message)s')) log.addHandler(fh) ch = logging.StreamHandler(stream = sys.stdout) ch.setLevel(console_log_level) ch.setFormatter(logging.Formatter('%(levelname)-8s %(message)s')) log.addHandler(ch) log.info('M-LOOP version ' + mloop.__version__) log.debug('M-LOOP Logger configured.') return kwargs def datetime_to_string(datetime): return datetime.strftime('%Y-%m-%d_%H-%M') def generate_filename_suffix(file_type, file_datetime=None, random_bytes=False): if file_datetime is None: file_datetime = datetime.datetime.now() date_string = datetime_to_string(file_datetime) filename_suffix = '_' + date_string if random_bytes: random_string = base64.urlsafe_b64encode(nr.bytes(6)).decode() filename_suffix = filename_suffix + '_' + random_string filename_suffix = filename_suffix + '.' + file_type return filename_suffix def dict_to_txt_file(tdict,filename): with open(filename,'w') as out_file: for key in tdict: out_file.write(str(key) + '=' + repr(tdict[key]).replace('\n', '').replace('\r', '') + '\n') def txt_file_to_dict(filename): with open(filename,'r') as in_file: tdict_string = '' for line in in_file: temp = (line.partition('#')[0]).strip('\n').strip() if temp != '': tdict_string += temp+',' array = np.array inf = float('inf') nan = float('nan') tdict = eval('dict('+tdict_string+')') return tdict def save_dict_to_file(dictionary,filename,file_type=None): if file_type is None: file_type = get_file_type(filename) if file_type=='mat': si.savemat(filename,dictionary) elif file_type=='txt': dict_to_txt_file(dictionary,filename) elif file_type=='pkl': with open(filename,'wb') as out_file: pickle.dump(dictionary,out_file) else: raise ValueError def get_dict_from_file(filename,file_type=None): if file_type is None: file_type = get_file_type(filename) if file_type=='mat': dictionary = si.loadmat(filename) elif file_type=='txt': dictionary = txt_file_to_dict(filename) elif file_type=='pkl': with open(filename,'rb') as in_file: dictionary = pickle.load(in_file) else: raise ValueError return dictionary def get_file_type(filename): _, file_type = os.path.splitext(filename) file_type = file_type[1:] return file_type def get_controller_type_from_learner_archive(learner_filename): file_type = get_file_type(learner_filename) log = logging.getLogger(__name__) if not check_file_type_supported(file_type): message = 'File type not supported: ' + repr(file_type) log.error(message) raise ValueError(message) learner_dict = get_dict_from_file(learner_filename, file_type) archive_type = learner_dict['archive_type'] if archive_type == 'controller': message = ('{filename} is a controller archive, not a ' 'learner archive.').format(filename=learner_filename) log.error(message) raise ValueError(message) ARCHIVE_CONTROLLER_MAPPING = { 'gaussian_process_learner': 'gaussian_process', 'neural_net_learner': 'neural_net', 'differential_evolution': 'differential_evolution', } if archive_type in ARCHIVE_CONTROLLER_MAPPING: controller_type = ARCHIVE_CONTROLLER_MAPPING[archive_type] else: message = ('Learner archive has unsupported archive_type: ' '{archive_type}').format(archive_type=archive_type) log.error(message) raise NotImplementedError(message) return controller_type def check_file_type_supported(file_type): return file_type == 'mat' or 'txt' or 'pkl' def safe_cast_to_array(in_array): out_array = np.squeeze(np.array(in_array)) if out_array.shape == (): out_array = np.array([out_array[()]]) return out_array def safe_cast_to_list(in_array): if isinstance(in_array, np.ndarray): t_array = np.squeeze(in_array) if t_array.shape == (): out_list = [t_array[()]] else: out_list = list(t_array) else: out_list = list(in_array) return out_list def chunk_list(list_, chunk_size): if (chunk_size is None) or (chunk_size == float('inf')): return [list_] return [list_[i:(i+chunk_size)] for i in range(0, len(list_), chunk_size)] def _param_names_from_file_dict(file_dict): if 'param_names' in file_dict: param_names = [str(name) for name in file_dict['param_names']] else: num_params = int(file_dict['num_params']) param_names = [''] * num_params return param_names
MIT License
nccgroup/libptmalloc
libptmalloc/frontend/commands/gdb/pthelp.py
pthelp.invoke
python
def invoke(self, arg, from_tty): pu.print_header("{:<20}".format("pthelp"), end="") print("List all libptmalloc commands") for cmd in self.cmds: if cmd.parser != None: description = cmd.parser.description.split("\n")[0] elif cmd.description != None: description = cmd.description else: description = "Unknown" pu.print_header("{:<20}".format(cmd.name), end="") print(description) print("Note: Use a command name with -h to get additional help")
Inherited from gdb.Command See https://sourceware.org/gdb/current/onlinedocs/gdb/Commands-In-Python.html Print the usage of all the commands
https://github.com/nccgroup/libptmalloc/blob/e9011393db1ea79b769dcf5f52bd1170a367b304/libptmalloc/frontend/commands/gdb/pthelp.py#L32-L51
from __future__ import print_function import sys import logging from libptmalloc.frontend import printutils as pu from libptmalloc.ptmalloc import malloc_state as ms from libptmalloc.ptmalloc import ptmalloc as pt from libptmalloc.frontend import helpers as h from libptmalloc.frontend.commands.gdb import ptcmd log = logging.getLogger("libptmalloc") log.trace("pthelp.py") try: import gdb except ImportError: print("Not running inside of GDB, exiting...") raise Exception("sys.exit()") class pthelp(ptcmd.ptcmd): def __init__(self, ptm, commands=[]): log.debug("pthelp.__init__()") super(pthelp, self).__init__(ptm, "pthelp") self.cmds = commands @h.catch_exceptions
MIT License
universitadellacalabria/uniticket
uni_ticket/views/management.py
manage_closed_ticket_url
python
def manage_closed_ticket_url(request, structure_slug): structure = get_object_or_404(OrganizationalStructure, slug=structure_slug) user_type = get_user_type(request.user, structure) return redirect('uni_ticket:{}_closed_ticket'.format(user_type), structure_slug)
Makes URL redirect to closed ticket page depending of user role :type structure_slug: String :param structure_slug: slug of structure to manage :return: redirect
https://github.com/universitadellacalabria/uniticket/blob/b7c6e9b793eda273038a6339f6dfdfc3e3b5a344/uni_ticket/views/management.py#L69-L83
import json import logging import os import zipfile from django.conf import settings from django.contrib import messages from django.contrib.admin.models import LogEntry, ADDITION, CHANGE from django.contrib.admin.views.decorators import staff_member_required from django.contrib.auth.decorators import login_required from django.contrib.contenttypes.models import ContentType from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import get_object_or_404, render, redirect from django.urls import reverse from django.utils.html import strip_tags from django.utils.translation import gettext as _ from django_form_builder.utils import get_as_dict, get_labeled_errors from organizational_area.models import * from uni_ticket.decorators import (has_admin_privileges, ticket_assigned_to_structure, ticket_is_taken_and_not_closed, ticket_is_not_taken_and_not_closed, ticket_is_taken_for_employee) from uni_ticket.forms import * from uni_ticket.models import * from uni_ticket.utils import * logger = logging.getLogger(__name__) @login_required def manage_opened_ticket_url(request, structure_slug): structure = get_object_or_404(OrganizationalStructure, slug=structure_slug) user_type = get_user_type(request.user, structure) return redirect('uni_ticket:{}_opened_ticket'.format(user_type), structure_slug) @login_required def manage_unassigned_ticket_url(request, structure_slug): structure = get_object_or_404(OrganizationalStructure, slug=structure_slug) user_type = get_user_type(request.user, structure) return redirect('uni_ticket:{}_unassigned_ticket'.format(user_type), structure_slug) @login_required
Apache License 2.0
bigmlcom/bigmler
bigmler/checkpoint.py
is_evaluation_created
python
def is_evaluation_created(path): evaluation_id = None try: with open("%s%sevaluation" % (path, os.sep)) as evaluation_file: evaluation_id = evaluation_file.readline().strip() try: evaluation_id = bigml.api.get_evaluation_id(evaluation_id) return True, evaluation_id except ValueError: return False, None except IOError: return False, None
Checks existence and reads the evaluation id from the evaluation file in the path directory
https://github.com/bigmlcom/bigmler/blob/91973ca1e752954302bf26bb22aa6874dc34ce69/bigmler/checkpoint.py#L119-L134
import os import bigml.api from bigml.util import console_log from bigmler.utils import log_message def is_source_created(path, suffix=""): source_id = None try: with open("%s%ssource%s" % (path, os.sep, suffix)) as source_file: source_id = source_file.readline().strip() try: source_id = bigml.api.get_source_id(source_id) return True, source_id except ValueError: return False, None except IOError: return False, None def is_dataset_created(path, suffix=""): dataset_id = None try: with open("%s%sdataset%s" % (path, os.sep, suffix)) as dataset_file: dataset_id = dataset_file.readline().strip() try: dataset_id = bigml.api.get_dataset_id(dataset_id) return True, dataset_id except ValueError: return False, None except IOError: return False, None def are_datasets_created(path, number_of_datasets, suffix='parts'): dataset_ids = [] try: with open("%s%sdataset_%s" % (path, os.sep, suffix)) as datasets_file: for line in datasets_file: dataset = line.strip() try: dataset_id = bigml.api.get_dataset_id(dataset) dataset_ids.append(dataset_id) except ValueError: return False, dataset_ids return len(dataset_ids) == number_of_datasets, dataset_ids except IOError: return False, dataset_ids def are_models_created(path, number_of_models): model_ids = [] try: with open("%s%smodels" % (path, os.sep)) as models_file: for line in models_file: model = line.strip() try: model_id = bigml.api.get_model_id(model) model_ids.append(model_id) except ValueError: return False, model_ids return len(model_ids) == number_of_models, model_ids except IOError: return False, model_ids def are_predictions_created(predictions_file, number_of_tests): predictions = file_number_of_lines(predictions_file) if predictions != number_of_tests: os.remove(predictions_file) return False, None return True, None
Apache License 2.0