repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
mcs07/chemdataextractor
chemdataextractor/reader/markup.py
LxmlReader._make_tree
python
def _make_tree(self, fstring): pass
Read a string into an lxml elementtree.
https://github.com/mcs07/chemdataextractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/reader/markup.py#L200-L202
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging from abc import abstractmethod, ABCMeta from collections import defaultdict from lxml import etree from lxml.etree import XMLParser from lxml.html import HTMLParser import six from ..errors import ReaderError from ..doc.document import Document from ..doc.text import Title, Heading, Paragraph, Caption, Citation, Footnote, Text, Sentence from ..doc.table import Table, Cell from ..doc.figure import Figure from ..scrape import INLINE_ELEMENTS from ..scrape.clean import clean from ..scrape.csstranslator import CssHTMLTranslator from ..text import get_encoding from .base import BaseReader log = logging.getLogger(__name__) class LxmlReader(six.with_metaclass(ABCMeta, BaseReader)): cleaners = [clean] root_css = 'html' title_css = 'h1' heading_css = 'h2, h3, h4, h5, h6' table_css = 'table' table_caption_css = 'caption' table_head_row_css = 'thead tr' table_body_row_css = 'tbody tr' table_cell_css = 'th, td' table_footnote_css = 'tfoot tr th' reference_css = 'a.ref' figure_css = 'figure' figure_caption_css = 'figcaption' citation_css = 'cite' ignore_css = 'a.ref sup' inline_elements = INLINE_ELEMENTS def _parse_element_r(self, el, specials, refs, id=None, element_cls=Paragraph): elements = [] if el.tag in {etree.Comment, etree.ProcessingInstruction}: return [] if el in specials: return specials[el] id = el.get('id', id) references = refs.get(el, []) if el.text is not None: elements.append(element_cls(six.text_type(el.text), id=id, references=references)) elif references: elements.append(element_cls('', id=id, references=references)) for child in el: if child.tag not in {etree.Comment, etree.ProcessingInstruction} and child.tag.lower() == 'br': elements.append(element_cls('')) child_elements = self._parse_element_r(child, specials=specials, refs=refs, id=id, element_cls=element_cls) if (self._is_inline(child) and len(elements) > 0 and len(child_elements) > 0 and isinstance(elements[-1], (Text, Sentence)) and isinstance(child_elements[0], (Text, Sentence)) and type(elements[-1]) == type(child_elements[0])): elements[-1] += child_elements.pop(0) elements.extend(child_elements) if child.tail is not None: if self._is_inline(child) and len(elements) > 0 and isinstance(elements[-1], element_cls): elements[-1] += element_cls(six.text_type(child.tail), id=id) else: elements.append(element_cls(six.text_type(child.tail), id=id)) return elements def _parse_element(self, el, specials=None, refs=None, element_cls=Paragraph): if specials is None: specials = {} if refs is None: refs = {} elements = self._parse_element_r(el, specials=specials, refs=refs, element_cls=element_cls) final_elements = [] for element in elements: if isinstance(element, Text): if element.text.strip(): final_elements.append(element) else: final_elements.append(element) return final_elements def _parse_text(self, el, refs=None, specials=None, element_cls=Paragraph): if specials is None: specials = {} if refs is None: refs = {} elements = self._parse_element_r(el, specials=specials, refs=refs, element_cls=element_cls) if not elements: return [element_cls('')] element = elements[0] for next_element in elements[1:]: element += element_cls(' ') + next_element return [element] def _parse_figure(self, el, refs, specials): caps = self._css(self.figure_caption_css, el) caption = self._parse_text(caps[0], refs=refs, specials=specials, element_cls=Caption)[0] if caps else Caption('') fig = Figure(caption, id=el.get('id', None)) return [fig] def _parse_table_rows(self, els, refs, specials): hdict = {} for row, tr in enumerate(els): colnum = 0 for td in self._css(self.table_cell_css, tr): cell = self._parse_text(td, refs=refs, specials=specials, element_cls=Cell) colspan = int(td.get('colspan', '1')) rowspan = int(td.get('rowspan', '1')) for i in range(colspan): for j in range(rowspan): rownum = row + j if not rownum in hdict: hdict[rownum] = {} while colnum in hdict[rownum]: colnum += 1 hdict[rownum][colnum] = cell[0] if len(cell) > 0 else Cell('') colnum += 1 rows = [] for row in sorted(hdict): rows.append([]) for col in sorted(hdict[row]): rows[-1].append(hdict[row][col]) for r in rows: r.extend([Cell('')] * (len(max(rows, key=len)) - len(r))) rows = [r for r in rows if any(r)] return rows def _parse_table_footnotes(self, fns, refs, specials): return [self._parse_text(fn, refs=refs, specials=specials, element_cls=Footnote)[0] for fn in fns] def _parse_reference(self, el): if '#' in el.get('href', ''): return [el.get('href').split('#', 1)[1]] elif 'rid' in el.attrib: return [el.attrib['rid']] elif 'idref' in el.attrib: return [el.attrib['idref']] else: return [''.join(el.itertext()).strip()] def _parse_table(self, el, refs, specials): caps = self._css(self.table_caption_css, el) caption = self._parse_text(caps[0], refs=refs, specials=specials, element_cls=Caption)[0] if caps else Caption('') hrows = self._parse_table_rows(self._css(self.table_head_row_css, el), refs=refs, specials=specials) rows = self._parse_table_rows(self._css(self.table_body_row_css, el), refs=refs, specials=specials) footnotes = self._parse_table_footnotes(self._css(self.table_footnote_css, el), refs=refs, specials=specials) tab = Table(caption, headings=hrows, rows=rows, footnotes=footnotes, id=el.get('id', None)) return [tab] def _xpath(self, query, root): result = root.xpath(query, smart_strings=False) if type(result) is not list: result = [result] log.debug('Selecting XPath: {}: {}'.format(query, result)) return result def _css(self, query, root): return self._xpath(CssHTMLTranslator().css_to_xpath(query), root) def _is_inline(self, element): if element.tag not in {etree.Comment, etree.ProcessingInstruction} and element.tag.lower() in self.inline_elements: return True return False @abstractmethod
MIT License
thehappydinoa/ashssdk
lambda/awscli/customizations/s3/subcommands.py
CommandArchitecture.create_instructions
python
def create_instructions(self): if self.needs_filegenerator(): self.instructions.append('file_generator') if self.parameters.get('filters'): self.instructions.append('filters') if self.cmd == 'sync': self.instructions.append('comparator') self.instructions.append('file_info_builder') self.instructions.append('s3_handler')
This function creates the instructions based on the command name and extra parameters. Note that all commands must have an s3_handler instruction in the instructions and must be at the end of the instruction list because it sends the request to S3 and does not yield anything.
https://github.com/thehappydinoa/ashssdk/blob/d251a08ba6c35d81cf41b3267db666b08e875515/lambda/awscli/customizations/s3/subcommands.py#L882-L897
import os import logging import sys from botocore.client import Config from dateutil.parser import parse from dateutil.tz import tzlocal from awscli.compat import six from awscli.compat import queue from awscli.customizations.commands import BasicCommand from awscli.customizations.s3.comparator import Comparator from awscli.customizations.s3.fileinfobuilder import FileInfoBuilder from awscli.customizations.s3.fileformat import FileFormat from awscli.customizations.s3.filegenerator import FileGenerator from awscli.customizations.s3.fileinfo import FileInfo from awscli.customizations.s3.filters import create_filter from awscli.customizations.s3.s3handler import S3TransferHandlerFactory from awscli.customizations.s3.utils import find_bucket_key, AppendFilter, find_dest_path_comp_key, human_readable_size, RequestParamsMapper, split_s3_bucket_key from awscli.customizations.utils import uni_print from awscli.customizations.s3.syncstrategy.base import MissingFileSync, SizeAndLastModifiedSync, NeverSync from awscli.customizations.s3 import transferconfig LOGGER = logging.getLogger(__name__) RECURSIVE = {'name': 'recursive', 'action': 'store_true', 'dest': 'dir_op', 'help_text': ( "Command is performed on all files or objects " "under the specified directory or prefix.")} HUMAN_READABLE = {'name': 'human-readable', 'action': 'store_true', 'help_text': "Displays file sizes in human readable format."} SUMMARIZE = {'name': 'summarize', 'action': 'store_true', 'help_text': ( "Displays summary information " "(number of objects, total size).")} DRYRUN = {'name': 'dryrun', 'action': 'store_true', 'help_text': ( "Displays the operations that would be performed using the " "specified command without actually running them.")} QUIET = {'name': 'quiet', 'action': 'store_true', 'help_text': ( "Does not display the operations performed from the specified " "command.")} FORCE = {'name': 'force', 'action': 'store_true', 'help_text': ( "Deletes all objects in the bucket including the bucket itself. " "Note that versioned objects will not be deleted in this " "process which would cause the bucket deletion to fail because " "the bucket would not be empty. To delete versioned " "objects use the ``s3api delete-object`` command with " "the ``--version-id`` parameter.")} FOLLOW_SYMLINKS = {'name': 'follow-symlinks', 'action': 'store_true', 'default': True, 'group_name': 'follow_symlinks', 'help_text': ( "Symbolic links are followed " "only when uploading to S3 from the local filesystem. " "Note that S3 does not support symbolic links, so the " "contents of the link target are uploaded under the " "name of the link. When neither ``--follow-symlinks`` " "nor ``--no-follow-symlinks`` is specifed, the default " "is to follow symlinks.")} NO_FOLLOW_SYMLINKS = {'name': 'no-follow-symlinks', 'action': 'store_false', 'dest': 'follow_symlinks', 'default': True, 'group_name': 'follow_symlinks'} NO_GUESS_MIME_TYPE = {'name': 'no-guess-mime-type', 'action': 'store_false', 'dest': 'guess_mime_type', 'default': True, 'help_text': ( "Do not try to guess the mime type for " "uploaded files. By default the mime type of a " "file is guessed when it is uploaded.")} CONTENT_TYPE = {'name': 'content-type', 'help_text': ( "Specify an explicit content type for this operation. " "This value overrides any guessed mime types.")} EXCLUDE = {'name': 'exclude', 'action': AppendFilter, 'nargs': 1, 'dest': 'filters', 'help_text': ( "Exclude all files or objects from the command that matches " "the specified pattern.")} INCLUDE = {'name': 'include', 'action': AppendFilter, 'nargs': 1, 'dest': 'filters', 'help_text': ( "Don't exclude files or objects " "in the command that match the specified pattern. " 'See <a href="http://docs.aws.amazon.com/cli/latest/reference' '/s3/index.html#use-of-exclude-and-include-filters">Use of ' 'Exclude and Include Filters</a> for details.')} ACL = {'name': 'acl', 'choices': ['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control', 'log-delivery-write'], 'help_text': ( "Sets the ACL for the object when the command is " "performed. If you use this parameter you must have the " '"s3:PutObjectAcl" permission included in the list of actions ' "for your IAM policy. " "Only accepts values of ``private``, ``public-read``, " "``public-read-write``, ``authenticated-read``, ``aws-exec-read``, " "``bucket-owner-read``, ``bucket-owner-full-control`` and " "``log-delivery-write``. " 'See <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'acl-overview.html#canned-acl">Canned ACL</a> for details')} GRANTS = { 'name': 'grants', 'nargs': '+', 'help_text': ( '<p>Grant specific permissions to individual users or groups. You ' 'can supply a list of grants of the form</p><codeblock>--grants ' 'Permission=Grantee_Type=Grantee_ID [Permission=Grantee_Type=' 'Grantee_ID ...]</codeblock>To specify the same permission type ' 'for multiple ' 'grantees, specify the permission as such as <codeblock>--grants ' 'Permission=Grantee_Type=Grantee_ID,Grantee_Type=Grantee_ID,...' '</codeblock>Each value contains the following elements:' '<ul><li><code>Permission</code> - Specifies ' 'the granted permissions, and can be set to read, readacl, ' 'writeacl, or full.</li><li><code>Grantee_Type</code> - ' 'Specifies how the grantee is to be identified, and can be set ' 'to uri, emailaddress, or id.</li><li><code>Grantee_ID</code> - ' 'Specifies the grantee based on Grantee_Type. The ' '<code>Grantee_ID</code> value can be one of:<ul><li><b>uri</b> ' '- The group\'s URI. For more information, see ' '<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'ACLOverview.html#SpecifyingGrantee">' 'Who Is a Grantee?</a></li>' '<li><b>emailaddress</b> - The account\'s email address.</li>' '<li><b>id</b> - The account\'s canonical ID</li></ul>' '</li></ul>' 'For more information on Amazon S3 access control, see ' '<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'UsingAuthAccess.html">Access Control</a>')} SSE = { 'name': 'sse', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256', 'aws:kms'], 'help_text': ( 'Specifies server-side encryption of the object in S3. ' 'Valid values are ``AES256`` and ``aws:kms``. If the parameter is ' 'specified but no value is provided, ``AES256`` is used.' ) } SSE_C = { 'name': 'sse-c', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], 'help_text': ( 'Specifies server-side encryption using customer provided keys ' 'of the the object in S3. ``AES256`` is the only valid value. ' 'If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ``--sse-c-key`` ' 'must be specified as well.' ) } SSE_C_KEY = { 'name': 'sse-c-key', 'help_text': ( 'The customer-provided encryption key to use to server-side ' 'encrypt the object in S3. If you provide this value, ' '``--sse-c`` must be specified as well. The key provided should ' '**not** be base64 encoded.' ) } SSE_KMS_KEY_ID = { 'name': 'sse-kms-key-id', 'help_text': ( 'The AWS KMS key ID that should be used to server-side ' 'encrypt the object in S3. Note that you should only ' 'provide this parameter if KMS key ID is different the ' 'default S3 master KMS key.' ) } SSE_C_COPY_SOURCE = { 'name': 'sse-c-copy-source', 'nargs': '?', 'const': 'AES256', 'choices': ['AES256'], 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' 'key. It specifies the algorithm to use when decrypting the source ' 'object. ``AES256`` is the only valid ' 'value. If the parameter is specified but no value is provided, ' '``AES256`` is used. If you provide this value, ' '``--sse-c-copy-source-key`` must be specfied as well. ' ) } SSE_C_COPY_SOURCE_KEY = { 'name': 'sse-c-copy-source-key', 'help_text': ( 'This parameter should only be specified when copying an S3 object ' 'that was encrypted server-side with a customer-provided ' 'key. Specifies the customer-provided encryption key for Amazon S3 ' 'to use to decrypt the source object. The encryption key provided ' 'must be one that was used when the source object was created. ' 'If you provide this value, ``--sse-c-copy-source`` be specfied as ' 'well. The key provided should **not** be base64 encoded.' ) } STORAGE_CLASS = {'name': 'storage-class', 'choices': ['STANDARD', 'REDUCED_REDUNDANCY', 'STANDARD_IA'], 'help_text': ( "The type of storage to use for the object. " "Valid choices are: STANDARD | REDUCED_REDUNDANCY " "| STANDARD_IA. " "Defaults to 'STANDARD'")} WEBSITE_REDIRECT = {'name': 'website-redirect', 'help_text': ( "If the bucket is configured as a website, " "redirects requests for this object to another object " "in the same bucket or to an external URL. Amazon S3 " "stores the value of this header in the object " "metadata.")} CACHE_CONTROL = {'name': 'cache-control', 'help_text': ( "Specifies caching behavior along the " "request/reply chain.")} CONTENT_DISPOSITION = {'name': 'content-disposition', 'help_text': ( "Specifies presentational information " "for the object.")} CONTENT_ENCODING = {'name': 'content-encoding', 'help_text': ( "Specifies what content encodings have been " "applied to the object and thus what decoding " "mechanisms must be applied to obtain the media-type " "referenced by the Content-Type header field.")} CONTENT_LANGUAGE = {'name': 'content-language', 'help_text': ("The language the content is in.")} SOURCE_REGION = {'name': 'source-region', 'help_text': ( "When transferring objects from an s3 bucket to an s3 " "bucket, this specifies the region of the source bucket." " Note the region specified by ``--region`` or through " "configuration of the CLI refers to the region of the " "destination bucket. If ``--source-region`` is not " "specified the region of the source will be the same " "as the region of the destination bucket.")} EXPIRES = { 'name': 'expires', 'help_text': ( "The date and time at which the object is no longer cacheable.") } METADATA = { 'name': 'metadata', 'cli_type_name': 'map', 'schema': { 'type': 'map', 'key': {'type': 'string'}, 'value': {'type': 'string'} }, 'help_text': ( "A map of metadata to store with the objects in S3. This will be " "applied to every object which is part of this request. In a sync, this " "means that files which haven't changed won't receive the new metadata. " "When copying between two s3 locations, the metadata-directive " "argument will default to 'REPLACE' unless otherwise specified." ) } METADATA_DIRECTIVE = { 'name': 'metadata-directive', 'choices': ['COPY', 'REPLACE'], 'help_text': ( 'Specifies whether the metadata is copied from the source object ' 'or replaced with metadata provided when copying S3 objects. ' 'Note that if the object is copied over in parts, the source ' 'object\'s metadata will not be copied over, no matter the value for ' '``--metadata-directive``, and instead the desired metadata values ' 'must be specified as parameters on the command line. ' 'Valid values are ``COPY`` and ``REPLACE``. If this parameter is not ' 'specified, ``COPY`` will be used by default. If ``REPLACE`` is used, ' 'the copied object will only have the metadata values that were' ' specified by the CLI command. Note that if you are ' 'using any of the following parameters: ``--content-type``, ' '``content-language``, ``--content-encoding``, ' '``--content-disposition``, ``--cache-control``, or ``--expires``, you ' 'will need to specify ``--metadata-directive REPLACE`` for ' 'non-multipart copies if you want the copied objects to have the ' 'specified metadata values.') } INDEX_DOCUMENT = {'name': 'index-document', 'help_text': ( 'A suffix that is appended to a request that is for ' 'a directory on the website endpoint (e.g. if the ' 'suffix is index.html and you make a request to ' 'samplebucket/images/ the data that is returned ' 'will be for the object with the key name ' 'images/index.html) The suffix must not be empty and ' 'must not include a slash character.')} ERROR_DOCUMENT = {'name': 'error-document', 'help_text': ( 'The object key name to use when ' 'a 4XX class error occurs.')} ONLY_SHOW_ERRORS = {'name': 'only-show-errors', 'action': 'store_true', 'help_text': ( 'Only errors and warnings are displayed. All other ' 'output is suppressed.')} EXPECTED_SIZE = {'name': 'expected-size', 'help_text': ( 'This argument specifies the expected size of a stream ' 'in terms of bytes. Note that this argument is needed ' 'only when a stream is being uploaded to s3 and the size ' 'is larger than 5GB. Failure to include this argument ' 'under these conditions may result in a failed upload ' 'due to too many parts in upload.')} PAGE_SIZE = {'name': 'page-size', 'cli_type_name': 'integer', 'help_text': ( 'The number of results to return in each response to a list ' 'operation. The default value is 1000 (the maximum allowed). ' 'Using a lower value may help if an operation times out.')} IGNORE_GLACIER_WARNINGS = { 'name': 'ignore-glacier-warnings', 'action': 'store_true', 'help_text': ( 'Turns off glacier warnings. Warnings about an operation that cannot ' 'be performed because it involves copying, downloading, or moving ' 'a glacier object will no longer be printed to standard error and ' 'will no longer cause the return code of the command to be ``2``.' ) } FORCE_GLACIER_TRANSFER = { 'name': 'force-glacier-transfer', 'action': 'store_true', 'help_text': ( 'Forces a transfer request on all Glacier objects in a sync or ' 'recursive copy.' ) } REQUEST_PAYER = { 'name': 'request-payer', 'choices': ['requester'], 'nargs': '?', 'const': 'requester', 'help_text': ( 'Confirms that the requester knows that she or he will be charged ' 'for the request. Bucket owners need not specify this parameter in ' 'their requests. Documentation on downloading objects from requester ' 'pays buckets can be found at ' 'http://docs.aws.amazon.com/AmazonS3/latest/dev/' 'ObjectsinRequesterPaysBuckets.html' ) } TRANSFER_ARGS = [DRYRUN, QUIET, INCLUDE, EXCLUDE, ACL, FOLLOW_SYMLINKS, NO_FOLLOW_SYMLINKS, NO_GUESS_MIME_TYPE, SSE, SSE_C, SSE_C_KEY, SSE_KMS_KEY_ID, SSE_C_COPY_SOURCE, SSE_C_COPY_SOURCE_KEY, STORAGE_CLASS, GRANTS, WEBSITE_REDIRECT, CONTENT_TYPE, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE, EXPIRES, SOURCE_REGION, ONLY_SHOW_ERRORS, PAGE_SIZE, IGNORE_GLACIER_WARNINGS, FORCE_GLACIER_TRANSFER] def get_client(session, region, endpoint_url, verify, config=None): return session.create_client('s3', region_name=region, endpoint_url=endpoint_url, verify=verify, config=config) class S3Command(BasicCommand): def _run_main(self, parsed_args, parsed_globals): self.client = get_client(self._session, parsed_globals.region, parsed_globals.endpoint_url, parsed_globals.verify_ssl) class ListCommand(S3Command): NAME = 'ls' DESCRIPTION = ("List S3 objects and common prefixes under a prefix or " "all S3 buckets. Note that the --output and --no-paginate " "arguments are ignored for this command.") USAGE = "<S3Uri> or NONE" ARG_TABLE = [{'name': 'paths', 'nargs': '?', 'default': 's3://', 'positional_arg': True, 'synopsis': USAGE}, RECURSIVE, PAGE_SIZE, HUMAN_READABLE, SUMMARIZE, REQUEST_PAYER] def _run_main(self, parsed_args, parsed_globals): super(ListCommand, self)._run_main(parsed_args, parsed_globals) self._empty_result = False self._at_first_page = True self._size_accumulator = 0 self._total_objects = 0 self._human_readable = parsed_args.human_readable path = parsed_args.paths if path.startswith('s3://'): path = path[5:] bucket, key = find_bucket_key(path) if not bucket: self._list_all_buckets() elif parsed_args.dir_op: self._list_all_objects_recursive( bucket, key, parsed_args.page_size, parsed_args.request_payer) else: self._list_all_objects( bucket, key, parsed_args.page_size, parsed_args.request_payer) if parsed_args.summarize: self._print_summary() if key: return self._check_no_objects() else: return 0 def _list_all_objects(self, bucket, key, page_size=None, request_payer=None): paginator = self.client.get_paginator('list_objects') paging_args = { 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', 'PaginationConfig': {'PageSize': page_size} } if request_payer is not None: paging_args['RequestPayer'] = request_payer iterator = paginator.paginate(**paging_args) for response_data in iterator: self._display_page(response_data) def _display_page(self, response_data, use_basename=True): common_prefixes = response_data.get('CommonPrefixes', []) contents = response_data.get('Contents', []) if not contents and not common_prefixes: self._empty_result = True return for common_prefix in common_prefixes: prefix_components = common_prefix['Prefix'].split('/') prefix = prefix_components[-2] pre_string = "PRE".rjust(30, " ") print_str = pre_string + ' ' + prefix + '/\n' uni_print(print_str) for content in contents: last_mod_str = self._make_last_mod_str(content['LastModified']) self._size_accumulator += int(content['Size']) self._total_objects += 1 size_str = self._make_size_str(content['Size']) if use_basename: filename_components = content['Key'].split('/') filename = filename_components[-1] else: filename = content['Key'] print_str = last_mod_str + ' ' + size_str + ' ' + filename + '\n' uni_print(print_str) self._at_first_page = False def _list_all_buckets(self): response_data = self.client.list_buckets() buckets = response_data['Buckets'] for bucket in buckets: last_mod_str = self._make_last_mod_str(bucket['CreationDate']) print_str = last_mod_str + ' ' + bucket['Name'] + '\n' uni_print(print_str) def _list_all_objects_recursive(self, bucket, key, page_size=None, request_payer=None): paginator = self.client.get_paginator('list_objects') paging_args = { 'Bucket': bucket, 'Prefix': key, 'PaginationConfig': {'PageSize': page_size} } if request_payer is not None: paging_args['RequestPayer'] = request_payer iterator = paginator.paginate(**paging_args) for response_data in iterator: self._display_page(response_data, use_basename=False) def _check_no_objects(self): if self._empty_result and self._at_first_page: return 1 return 0 def _make_last_mod_str(self, last_mod): last_mod = parse(last_mod) last_mod = last_mod.astimezone(tzlocal()) last_mod_tup = (str(last_mod.year), str(last_mod.month).zfill(2), str(last_mod.day).zfill(2), str(last_mod.hour).zfill(2), str(last_mod.minute).zfill(2), str(last_mod.second).zfill(2)) last_mod_str = "%s-%s-%s %s:%s:%s" % last_mod_tup return last_mod_str.ljust(19, ' ') def _make_size_str(self, size): if self._human_readable: size_str = human_readable_size(size) else: size_str = str(size) return size_str.rjust(10, ' ') def _print_summary(self): print_str = str(self._total_objects) uni_print("\nTotal Objects: ".rjust(15, ' ') + print_str + "\n") if self._human_readable: print_str = human_readable_size(self._size_accumulator) else: print_str = str(self._size_accumulator) uni_print("Total Size: ".rjust(15, ' ') + print_str + "\n") class WebsiteCommand(S3Command): NAME = 'website' DESCRIPTION = 'Set the website configuration for a bucket.' USAGE = '<S3Uri>' ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 'synopsis': USAGE}, INDEX_DOCUMENT, ERROR_DOCUMENT] def _run_main(self, parsed_args, parsed_globals): super(WebsiteCommand, self)._run_main(parsed_args, parsed_globals) bucket = self._get_bucket_name(parsed_args.paths[0]) website_configuration = self._build_website_configuration(parsed_args) self.client.put_bucket_website( Bucket=bucket, WebsiteConfiguration=website_configuration) return 0 def _build_website_configuration(self, parsed_args): website_config = {} if parsed_args.index_document is not None: website_config['IndexDocument'] = {'Suffix': parsed_args.index_document} if parsed_args.error_document is not None: website_config['ErrorDocument'] = {'Key': parsed_args.error_document} return website_config def _get_bucket_name(self, path): if path.startswith('s3://'): path = path[5:] if path.endswith('/'): path = path[:-1] return path class PresignCommand(S3Command): NAME = 'presign' DESCRIPTION = ( "Generate a pre-signed URL for an Amazon S3 object. This allows " "anyone who receives the pre-signed URL to retrieve the S3 object " "with an HTTP GET request. For sigv4 requests the region needs to be " "configured explicitly." ) USAGE = "<S3Uri>" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, {'name': 'expires-in', 'default': 3600, 'cli_type_name': 'integer', 'help_text': ( 'Number of seconds until the pre-signed ' 'URL expires. Default is 3600 seconds.')}] def _run_main(self, parsed_args, parsed_globals): super(PresignCommand, self)._run_main(parsed_args, parsed_globals) path = parsed_args.path if path.startswith('s3://'): path = path[5:] bucket, key = find_bucket_key(path) url = self.client.generate_presigned_url( 'get_object', {'Bucket': bucket, 'Key': key}, ExpiresIn=parsed_args.expires_in ) uni_print(url) uni_print('\n') return 0 class S3TransferCommand(S3Command): def _run_main(self, parsed_args, parsed_globals): super(S3TransferCommand, self)._run_main(parsed_args, parsed_globals) self._convert_path_args(parsed_args) params = self._build_call_parameters(parsed_args, {}) cmd_params = CommandParameters(self.NAME, params, self.USAGE) cmd_params.add_region(parsed_globals) cmd_params.add_endpoint_url(parsed_globals) cmd_params.add_verify_ssl(parsed_globals) cmd_params.add_page_size(parsed_args) cmd_params.add_paths(parsed_args.paths) runtime_config = transferconfig.RuntimeConfig().build_config( **self._session.get_scoped_config().get('s3', {})) cmd = CommandArchitecture(self._session, self.NAME, cmd_params.parameters, runtime_config) cmd.set_clients() cmd.create_instructions() return cmd.run() def _build_call_parameters(self, args, command_params): for name, value in vars(args).items(): command_params[name] = value return command_params def _convert_path_args(self, parsed_args): if not isinstance(parsed_args.paths, list): parsed_args.paths = [parsed_args.paths] for i in range(len(parsed_args.paths)): path = parsed_args.paths[i] if isinstance(path, six.binary_type): dec_path = path.decode(sys.getfilesystemencoding()) enc_path = dec_path.encode('utf-8') new_path = enc_path.decode('utf-8') parsed_args.paths[i] = new_path class CpCommand(S3TransferCommand): NAME = 'cp' DESCRIPTION = "Copies a local file or S3 object to another location " "locally or in S3." USAGE = "<LocalPath> <S3Uri> or <S3Uri> <LocalPath> " "or <S3Uri> <S3Uri>" ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS + [METADATA, METADATA_DIRECTIVE, EXPECTED_SIZE, RECURSIVE] class MvCommand(S3TransferCommand): NAME = 'mv' DESCRIPTION = "Moves a local file or S3 object to " "another location locally or in S3." USAGE = "<LocalPath> <S3Uri> or <S3Uri> <LocalPath> " "or <S3Uri> <S3Uri>" ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS + [METADATA, METADATA_DIRECTIVE, RECURSIVE] class RmCommand(S3TransferCommand): NAME = 'rm' DESCRIPTION = "Deletes an S3 object." USAGE = "<S3Uri>" ARG_TABLE = [{'name': 'paths', 'nargs': 1, 'positional_arg': True, 'synopsis': USAGE}, DRYRUN, QUIET, RECURSIVE, INCLUDE, EXCLUDE, ONLY_SHOW_ERRORS, PAGE_SIZE] class SyncCommand(S3TransferCommand): NAME = 'sync' DESCRIPTION = "Syncs directories and S3 prefixes. Recursively copies " "new and updated files from the source directory to " "the destination. Only creates folders in the destination " "if they contain one or more files." USAGE = "<LocalPath> <S3Uri> or <S3Uri> " "<LocalPath> or <S3Uri> <S3Uri>" ARG_TABLE = [{'name': 'paths', 'nargs': 2, 'positional_arg': True, 'synopsis': USAGE}] + TRANSFER_ARGS + [METADATA, METADATA_DIRECTIVE] class MbCommand(S3Command): NAME = 'mb' DESCRIPTION = "Creates an S3 bucket." USAGE = "<S3Uri>" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}] def _run_main(self, parsed_args, parsed_globals): super(MbCommand, self)._run_main(parsed_args, parsed_globals) if not parsed_args.path.startswith('s3://'): raise TypeError("%s\nError: Invalid argument type" % self.USAGE) bucket, _ = split_s3_bucket_key(parsed_args.path) bucket_config = {'LocationConstraint': self.client.meta.region_name} params = {'Bucket': bucket} if self.client.meta.region_name != 'us-east-1': params['CreateBucketConfiguration'] = bucket_config try: self.client.create_bucket(**params) uni_print("make_bucket: %s\n" % bucket) return 0 except Exception as e: uni_print( "make_bucket failed: %s %s\n" % (parsed_args.path, e), sys.stderr ) return 1 class RbCommand(S3Command): NAME = 'rb' DESCRIPTION = ( "Deletes an empty S3 bucket. A bucket must be completely empty " "of objects and versioned objects before it can be deleted. " "However, the ``--force`` parameter can be used to delete " "the non-versioned objects in the bucket before the bucket is " "deleted." ) USAGE = "<S3Uri>" ARG_TABLE = [{'name': 'path', 'positional_arg': True, 'synopsis': USAGE}, FORCE] def _run_main(self, parsed_args, parsed_globals): super(RbCommand, self)._run_main(parsed_args, parsed_globals) if not parsed_args.path.startswith('s3://'): raise TypeError("%s\nError: Invalid argument type" % self.USAGE) bucket, key = split_s3_bucket_key(parsed_args.path) if key: raise ValueError('Please specify a valid bucket name only.' ' E.g. s3://%s' % bucket) if parsed_args.force: self._force(parsed_args.path, parsed_globals) try: self.client.delete_bucket(Bucket=bucket) uni_print("remove_bucket: %s\n" % bucket) return 0 except Exception as e: uni_print( "remove_bucket failed: %s %s\n" % (parsed_args.path, e), sys.stderr ) return 1 def _force(self, path, parsed_globals): rm = RmCommand(self._session) rc = rm([path, '--recursive'], parsed_globals) if rc != 0: raise RuntimeError( "remove_bucket failed: Unable to delete all objects in the " "bucket, bucket will not be deleted.") class CommandArchitecture(object): def __init__(self, session, cmd, parameters, runtime_config=None): self.session = session self.cmd = cmd self.parameters = parameters self.instructions = [] self._runtime_config = runtime_config self._endpoint = None self._source_endpoint = None self._client = None self._source_client = None def set_clients(self): client_config = None if self.parameters.get('sse') == 'aws:kms': client_config = Config(signature_version='s3v4') self._client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) self._source_client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) if self.parameters['source_region']: if self.parameters['paths_type'] == 's3s3': self._source_client = get_client( self.session, region=self.parameters['source_region'], endpoint_url=None, verify=self.parameters['verify_ssl'], config=client_config )
MIT License
aspose-words-cloud/aspose-words-cloud-python
asposewordscloud/models/document_entry.py
DocumentEntry.import_format_mode
python
def import_format_mode(self, import_format_mode): self._import_format_mode = import_format_mode
Sets the import_format_mode of this DocumentEntry. Gets or sets the option that controls formatting will be used: appended or destination document. Can be KeepSourceFormatting or UseDestinationStyles. # noqa: E501 :param import_format_mode: The import_format_mode of this DocumentEntry. # noqa: E501 :type: str
https://github.com/aspose-words-cloud/aspose-words-cloud-python/blob/abf8fccfed40aa2b09c6cdcaf3f2723e1f412d85/asposewordscloud/models/document_entry.py#L101-L109
import pprint import re import datetime import six import json class DocumentEntry(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'href': 'str', 'import_format_mode': 'str' } attribute_map = { 'href': 'Href', 'import_format_mode': 'ImportFormatMode' } def __init__(self, href=None, import_format_mode=None): self._href = None self._import_format_mode = None self.discriminator = None if href is not None: self.href = href if import_format_mode is not None: self.import_format_mode = import_format_mode @property def href(self): return self._href @href.setter def href(self, href): self._href = href @property def import_format_mode(self): return self._import_format_mode @import_format_mode.setter
MIT License
pegasystems/building-bridges
bridges/database/mongo.py
create_survey
python
def create_survey(title: str, hide_votes: bool, is_anonymous: bool, question_author_name_field_visible: bool, limit_question_characters_enabled: bool, limit_question_characters: int, results_secret: str, admin_secret: str, description: str, author: User) -> str: encoded_uri_title = get_url_from_title(title) number = __get_new_survey_number(encoded_uri_title) survey = Survey( title=title, number=number, description=description, hide_votes=hide_votes, is_anonymous=is_anonymous, question_author_name_field_visible=question_author_name_field_visible, limit_question_characters_enabled=limit_question_characters_enabled, limit_question_characters=limit_question_characters, results_secret=results_secret, author=author, url=encoded_uri_title, admin_secret=admin_secret ) surveys_collection.insert_one(survey.as_dict()) return get_url(encoded_uri_title, number)
Create new survey in db
https://github.com/pegasystems/building-bridges/blob/1e972290e95d2dd3078401ee2193df47d90f3d6e/bridges/database/mongo.py#L77-L106
from urllib.parse import quote_plus import logging from typing import List from dacite import from_dict from dacite.exceptions import ( DaciteFieldError, ForwardReferenceError, MissingValueError, UnexpectedDataError, WrongTypeError, ) from pymongo import MongoClient from pymongo.errors import ConnectionFailure from bson.objectid import ObjectId from werkzeug.wrappers import Response from bridges.errors import NotFoundError from bridges.utils import get_url_from_title, get_url, get_url_and_number from bridges.database.objects.vote import Vote from bridges.database.objects.question_user_context import QuestionUserContext from bridges.database.objects.question import Question from bridges.database.objects.survey import Survey from bridges.database.objects.user import User from bridges.argument_parser import args surveys_collection = None MONGO_QUESTIONS_ID = 'questions._id' QUESTION_NOT_FOUND_ERROR_MESSAGE = "Question not found." SURVEY_NOT_FOUND_ERROR_MESSAGE = "Survey not found." MONGO_PUSH = '$push' MONGO_PULL = '$pull' MONGO_SET = '$set' def init() -> None: global surveys_collection global replies_collection logging.info("Connecting to database %s", args.database_uri) client = MongoClient(host=args.database_uri, username=quote_plus(args.database_user), password=quote_plus(args.database_password), retryWrites=False) try: client.admin.command('ismaster') except ConnectionFailure: logging.error("Could not connect to database %s", args.database_uri) raise db = client[args.database_name] surveys_collection = db.surveys replies_collection = db.replies logging.info("Connected to database %s", args.database_uri) def add_vote(user: User, question_id: str, vote_type: str) -> None: vote = Vote(user, vote_type) surveys_collection.update_one( {MONGO_QUESTIONS_ID: ObjectId(question_id)}, {MONGO_PUSH: {'questions.$.votes': vote.as_dict()}} )
MIT License
ivannz/cplxmodule
cplxmodule/cplx.py
convnd_quick
python
def convnd_quick(conv, input, weight, stride=1, padding=0, dilation=1): n_out = int(weight.shape[0]) ww = torch.cat([weight.real, weight.imag], dim=0) wr = conv(input.real, ww, None, stride, padding, dilation, 1) wi = conv(input.imag, ww, None, stride, padding, dilation, 1) rwr, iwr = wr[:, :n_out], wr[:, n_out:] rwi, iwi = wi[:, :n_out], wi[:, n_out:] return Cplx(rwr - iwi, iwr + rwi)
r"""Applies a complex convolution transformation to the complex data :math:`y = x \ast W + b` using two calls to `conv` at the cost of extra concatenation and slicing.
https://github.com/ivannz/cplxmodule/blob/d5fc89496ca4ea1f0a589a6d36c7ea2d4a8c9ef6/cplxmodule/cplx.py#L697-L711
import warnings from copy import deepcopy import torch import torch.nn.functional as F from math import sqrt from .utils import complex_view, fix_dim class Cplx(object): __slots__ = ("__real", "__imag") def __new__(cls, real, imag=None): if isinstance(real, cls): return real if isinstance(real, complex): real, imag = torch.tensor(real.real), torch.tensor(real.imag) elif isinstance(real, float): if imag is None: imag = 0.0 elif not isinstance(imag, float): raise TypeError("""Imaginary part must be float.""") real, imag = torch.tensor(real), torch.tensor(imag) elif not isinstance(real, torch.Tensor): raise TypeError("""Real part must be torch.Tensor.""") if imag is None: imag = torch.zeros_like(real) elif not isinstance(imag, torch.Tensor): raise TypeError("""Imaginary part must be torch.Tensor.""") if real.shape != imag.shape: raise ValueError("""Real and imaginary parts have """ """mistmatching shape.""") self = super().__new__(cls) self.__real, self.__imag = real, imag return self def __copy__(self): return type(self)(self.__real, self.__imag) def __deepcopy__(self, memo): real = deepcopy(self.__real, memo) imag = deepcopy(self.__imag, memo) return type(self)(real, imag) @property def real(self): return self.__real @property def imag(self): return self.__imag def __getitem__(self, key): return type(self)(self.__real[key], self.__imag[key]) def __setitem__(self, key, value): if not isinstance(value, (Cplx, complex)): self.__real[key], self.__imag[key] = value, value else: self.__real[key], self.__imag[key] = value.real, value.imag def __iter__(self): return map(type(self), self.__real, self.__imag) def __reversed__(self): return type(self)(reversed(self.__real), reversed(self.__imag)) def clone(self): return type(self)(self.__real.clone(), self.__imag.clone()) @property def conj(self): return type(self)(self.__real, -self.__imag) def conjugate(self): return self.conj def __pos__(self): return self def __neg__(self): return type(self)(-self.__real, -self.__imag) def __add__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real + v, u.__imag) return type(u)(u.__real + v.real, u.__imag + v.imag) __radd__ = __add__ __iadd__ = __add__ def __sub__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real - v, u.__imag) return type(u)(u.__real - v.real, u.__imag - v.imag) def __rsub__(u, v): return -u + v __isub__ = __sub__ def __mul__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real * v, u.__imag * v) return type(u)(u.__real * v.real - u.__imag * v.imag, u.__imag * v.real + u.__real * v.imag) __rmul__ = __mul__ __imul__ = __mul__ def __truediv__(u, v): if not isinstance(v, (Cplx, complex)): return type(u)(u.__real / v, u.__imag / v) denom = v.real * v.real + v.imag * v.imag return u * (v.conjugate() / denom) def __rtruediv__(u, v): denom = u.__real * u.__real + u.__imag * u.__imag return (u.conjugate() / denom) * v __itruediv__ = __truediv__ def __matmul__(u, v): if not isinstance(v, Cplx): return type(u)(torch.matmul(u.__real, v), torch.matmul(u.__imag, v)) re = torch.matmul(u.__real, v.__real) - torch.matmul(u.__imag, v.__imag) im = torch.matmul(u.__imag, v.__real) + torch.matmul(u.__real, v.__imag) return type(u)(re, im) def __rmatmul__(u, v): return type(u)(torch.matmul(v, u.__real), torch.matmul(v, u.__imag)) __imatmul__ = __matmul__ def __abs__(self): input = torch.stack([self.__real, self.__imag], dim=0) return torch.norm(input, p=2, dim=0, keepdim=False) @property def angle(self): return torch.atan2(self.__imag, self.__real) def apply(self, f, *a, **k): return type(self)(f(self.__real, *a, **k), f(self.__imag, *a, **k)) @property def shape(self): return self.__real.shape def __len__(self): return self.shape[0] def t(self): return type(self)(self.__real.t(), self.__imag.t()) def h(self): return self.conj.t() def flatten(self, start_dim=0, end_dim=-1): return type(self)(self.__real.flatten(start_dim, end_dim), self.__imag.flatten(start_dim, end_dim)) def view(self, *shape): shape = shape[0] if shape and isinstance(shape[0], tuple) else shape return type(self)(self.__real.view(*shape), self.__imag.view(*shape)) def view_as(self, other): shape = other.shape return self.view(*shape) def reshape(self, *shape): shape = shape[0] if shape and isinstance(shape[0], tuple) else shape return type(self)(self.__real.reshape(*shape), self.__imag.reshape(*shape)) def size(self, *dim): return self.__real.size(*dim) def squeeze(self, dim=None): if dim is None: return type(self)(self.__real.squeeze(), self.__imag.squeeze()) else: return type(self)( self.__real.squeeze(dim=dim), self.__imag.squeeze(dim=dim) ) def unsqueeze(self, dim=None): if dim is None: return type(self)(self.__real.unsqueeze(), self.__imag.unsqueeze()) else: return type(self)( self.__real.unsqueeze(dim=dim), self.__imag.unsqueeze(dim=dim) ) def item(self): return float(self.__real) + 1j * float(self.__imag) @classmethod def from_numpy(cls, numpy): re = torch.from_numpy(numpy.real) im = torch.from_numpy(numpy.imag) return cls(re, im) def numpy(self): return self.__real.numpy() + 1j * self.__imag.numpy() def __repr__(self): return f"{self.__class__.__name__}(\n" f" real={self.__real},\n imag={self.__imag}\n)" def detach(self): return type(self)(self.__real.detach(), self.__imag.detach()) def requires_grad_(self, requires_grad=True): return type(self)(self.__real.requires_grad_(requires_grad), self.__imag.requires_grad_(requires_grad)) @property def grad(self): re, im = self.__real.grad, self.__imag.grad return None if re is None or im is None else type(self)(re, im) def cuda(self, device=None, non_blocking=False): re = self.__real.cuda(device=device, non_blocking=non_blocking) im = self.__imag.cuda(device=device, non_blocking=non_blocking) return type(self)(re, im) def cpu(self): return type(self)(self.__real.cpu(), self.__imag.cpu()) def to(self, *args, **kwargs): return type(self)(self.__real.to(*args, **kwargs), self.__imag.to(*args, **kwargs)) @property def device(self): return self.__real.device @property def dtype(self): return self.__real.dtype def dim(self): return len(self.shape) def permute(self, *dims): return type(self)(self.__real.permute(*dims), self.__imag.permute(*dims)) def transpose(self, dim0, dim1): return type(self)(self.__real.transpose(dim0, dim1), self.__imag.transpose(dim0, dim1)) def is_complex(self): return True @classmethod def empty(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.empty(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.empty_like(re, requires_grad=requires_grad)) @classmethod def zeros(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.zeros(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.zeros_like(re, requires_grad=requires_grad)) @classmethod def ones(cls, *sizes, dtype=None, device=None, requires_grad=False): re = torch.ones(*sizes, dtype=dtype, device=device, requires_grad=requires_grad) return cls(re, torch.zeros_like(re, requires_grad=requires_grad)) def cat(tensors, dim): tensors = [*map(Cplx, tensors)] return Cplx(torch.cat([z.real for z in tensors], dim=dim), torch.cat([z.imag for z in tensors], dim=dim)) def split(input, split_size_or_sections, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.split(input.real, split_size_or_sections, dim), torch.split(input.imag, split_size_or_sections, dim), )) def chunk(input, chunks, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.chunk(input.real, chunks, dim), torch.chunk(input.imag, chunks, dim), )) def stack(tensors, dim): tensors = [*map(Cplx, tensors)] return Cplx(torch.stack([z.real for z in tensors], dim=dim), torch.stack([z.imag for z in tensors], dim=dim)) def unbind(input, dim=0): return tuple(Cplx(re, im) for re, im in zip( torch.unbind(input.real, dim), torch.unbind(input.imag, dim), )) def take(input, index): return Cplx(torch.take(input.real, index), torch.take(input.imag, index)) def narrow(input, dim, start, length): return Cplx(torch.narrow(input.real, dim, start, length), torch.narrow(input.imag, dim, start, length)) def squeeze(input, dim=None): return Cplx(torch.squeeze(input.real, dim), torch.squeeze(input.imag, dim)) def unsqueeze(input, dim): return Cplx(torch.unsqueeze(input.real, dim), torch.unsqueeze(input.imag, dim)) def from_interleaved_real(input, copy=True, dim=-1): output = Cplx(*complex_view(input, dim, squeeze=False)) return output.clone() if copy else output from_real = from_interleaved_real def from_concatenated_real(input, copy=True, dim=-1): output = Cplx(*torch.chunk(input, 2, dim=dim)) return output.clone() if copy else output def to_interleaved_real(input, flatten=True, dim=-1): dim = 1 + fix_dim(dim, input.dim()) input = torch.stack([input.real, input.imag], dim=dim) return input.flatten(dim-1, dim) if flatten else input to_real = to_interleaved_real def to_concatenated_real(input, flatten=None, dim=-1): assert flatten is None return torch.cat([input.real, input.imag], dim=dim) def exp(input): scale = torch.exp(input.real) return Cplx(scale * torch.cos(input.imag), scale * torch.sin(input.imag)) def log(input): return Cplx(torch.log(abs(input)), input.angle) def sin(input): return Cplx(torch.sin(input.real) * torch.cosh(input.imag), torch.cos(input.real) * torch.sinh(input.imag)) def cos(input): return Cplx(torch.cos(input.real) * torch.cosh(input.imag), - torch.sin(input.real) * torch.sinh(input.imag)) def tan(input): return sin(input) / cos(input) def sinh(input): return Cplx(torch.sinh(input.real) * torch.cos(input.imag), torch.cosh(input.real) * torch.sin(input.imag)) def cosh(input): return Cplx(torch.cosh(input.real) * torch.cos(input.imag), torch.sinh(input.real) * torch.sin(input.imag)) def tanh(input): return sinh(input) / cosh(input) def randn(*size, dtype=None, device=None, requires_grad=False): normal = torch.randn(2, *size, dtype=dtype, layout=torch.strided, device=device, requires_grad=False) / sqrt(2) z = Cplx(normal[0], normal[1]) return z.requires_grad_(True) if requires_grad else z def randn_like(input, dtype=None, device=None, requires_grad=False): return randn(*input.size(), dtype=input.dtype if dtype is None else dtype, device=input.device if device is None else device, requires_grad=requires_grad) def modrelu(input, threshold=0.5): modulus = torch.clamp(abs(input), min=1e-5) return input * torch.relu(1. - threshold / modulus) def phaseshift(input, phi=0.0): return input * Cplx(torch.cos(phi), torch.sin(phi)) def linear_naive(input, weight, bias=None): re = F.linear(input.real, weight.real) - F.linear(input.imag, weight.imag) im = F.linear(input.real, weight.imag) + F.linear(input.imag, weight.real) output = Cplx(re, im) if bias is not None: output += bias return output def linear_cat(input, weight, bias=None): ww = torch.cat([ torch.cat([ weight.real, weight.imag], dim=0), torch.cat([-weight.imag, weight.real], dim=0) ], dim=1) xx = to_concatenated_real(input, dim=-1) output = from_concatenated_real(F.linear(xx, ww, None)) if bias is not None: output += bias return output def linear_3m(input, weight, bias=None): K1 = F.linear(input.real + input.imag, weight.real) K2 = F.linear(input.real, weight.imag - weight.real) K3 = F.linear(input.imag, weight.real + weight.imag) output = Cplx(K1 - K3, K1 + K2) if bias is not None: output += bias return output linear = linear_naive def symmetric_circular_padding(input, padding): assert input.dim() > 2 if isinstance(padding, int): padding = (input.dim() - 2) * [padding] assert isinstance(padding, (tuple, list)) assert len(padding) + 2 == input.dim() expanded_padding = [] for pad in padding: expanded_padding.extend(((pad + 1) // 2, pad // 2)) return input.apply(F.pad, tuple(expanded_padding), mode="circular") def convnd_naive(conv, input, weight, stride=1, padding=0, dilation=1, groups=1): re = conv(input.real, weight.real, None, stride, padding, dilation, groups) - conv(input.imag, weight.imag, None, stride, padding, dilation, groups) im = conv(input.real, weight.imag, None, stride, padding, dilation, groups) + conv(input.imag, weight.real, None, stride, padding, dilation, groups) return Cplx(re, im)
MIT License
y-chan/atomicswap-qt
atomicswap/address.py
base_decode
python
def base_decode(v: Union[bytes, str], length: Optional[int], base: int) -> Optional[bytes]: v = to_bytes(v, "ascii") if base not in (58, 43): raise ValueError("not supported base: {}".format(base)) chars = __b58chars if base == 43: chars = __b43chars long_value = 0 for (i, c) in enumerate(v[::-1]): digit = chars.find(bytes([c])) if digit == -1: raise ValueError("Forbidden character {} for base {}".format(c, base)) long_value += digit * (base ** i) result = bytearray() while long_value >= 256: div, mod = divmod(long_value, 256) result.append(mod) long_value = div result.append(long_value) n_pad = 0 for c in v: if c == chars[0]: n_pad += 1 else: break result.extend(b"\x00" * n_pad) if length is not None and len(result) != length: return None result.reverse() return bytes(result)
decode v into a string of len bytes.
https://github.com/y-chan/atomicswap-qt/blob/5bab6d301177aaf7487236597f75efb1172e6450/atomicswap/address.py#L85-L116
from typing import Union, Tuple, Optional import hashlib from .coind import Coind __b58chars = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" assert len(__b58chars) == 58 __b43chars = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:" assert len(__b43chars) == 43 class PrivkeyDecodeError(Exception): pass def assert_bytes(*args): try: for x in args: assert isinstance(x, (bytes, bytearray)) except Exception: print("assert bytes failed", list(map(type, args))) raise def base_encode(v: bytes, base: int) -> str: assert_bytes(v) if base not in (58, 43): raise ValueError("not supported base: {}".format(base)) chars = __b58chars if base == 43: chars = __b43chars long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256 ** i) * c result = bytearray() while long_value >= base: div, mod = divmod(long_value, base) result.append(chars[mod]) long_value = div result.append(chars[long_value]) n_pad = 0 for c in v: if c == 0x00: n_pad += 1 else: break result.extend([chars[0]] * n_pad) result.reverse() return result.decode("ascii")
MIT License
jessamynsmith/twitterbot
twitter_bot/twitter_bot.py
TwitterBot.tokenize
python
def tokenize(self, message, max_length, mentions=None): mention_text = '' mention_length = 0 if mentions: formatted_mentions = ['@{0}'.format(mention) for mention in mentions] mention_text = " ".join(formatted_mentions) message = '{0} {1}'.format(mention_text, message) mention_length = len(mention_text) + 1 if len(message) <= max_length: return [message] tokens = message.split(' ') indices = [] index = 1 length = len(tokens[0]) while index < len(tokens): if length + 1 + len(tokens[index]) + 4 > max_length: indices.append(index) length = 4 + mention_length + len(tokens[index]) else: length += 1 + len(tokens[index]) index += 1 indices.append(index) messages = [" ".join(tokens[0:indices[0]])] for i in range(1, len(indices)): messages[i - 1] += ' ...' parts = [] if mention_text: parts.append(mention_text) parts.append("...") parts.extend(tokens[indices[i - 1]:indices[i]]) messages.append(" ".join(parts)) return messages
Tokenize a message into a list of messages of no more than max_length, including mentions in each message :param message: Message to be sent :param max_length: Maximum allowed length for each resulting message :param mentions: List of usernames to mention in each message :return:
https://github.com/jessamynsmith/twitterbot/blob/124308a38d8ad31db0dae0e1ec7a367b5df0a6d6/twitter_bot/twitter_bot.py#L76-L121
from __future__ import absolute_import import logging from twitter import Twitter, TwitterHTTPError from twitter.oauth import OAuth from .settings import SettingsError logging.basicConfig(filename='logs/twitter_bot.log', filemode='a', format='%(asctime)s %(name)s %(levelname)s %(message)s', level=logging.DEBUG) def get_class(class_or_name): if isinstance(class_or_name, str): class_or_name = _get_class_by_name(class_or_name) return class_or_name() def _get_class_by_name(class_name): module_name, symbol_name = class_name.rsplit('.', 1) module = __import__(module_name, fromlist=symbol_name) return getattr(module, symbol_name) class TwitterBot(object): def _verify_settings(self, settings, required_list, message, count=2): for required in required_list: if not settings.__dict__.get(required): format_args = [required] * count if required == 'MESSAGE_PROVIDER': message += (" If TWITTER_MESSAGE_PROVIDER is not set, " "'messages.HelloWorldMessageProvider' will be used.") raise SettingsError(message.format(*format_args)) def __init__(self, settings): self.MESSAGE_LENGTH = 140 self.DUPLICATE_CODE = 187 required_twitter_settings = ('OAUTH_TOKEN', 'OAUTH_SECRET', 'CONSUMER_KEY', 'CONSUMER_SECRET', 'MESSAGE_PROVIDER') message = ("Must specify '{0}' in settings.py. When using default settings, " "this value is loaded from the TWITTER_{1} environment variable.") self._verify_settings(settings, required_twitter_settings, message) auth = OAuth( settings.OAUTH_TOKEN, settings.OAUTH_SECRET, settings.CONSUMER_KEY, settings.CONSUMER_SECRET ) self.twitter = Twitter(auth=auth) self._screen_name = None self.messages = get_class(settings.MESSAGE_PROVIDER) self.since_id = get_class(settings.SINCE_ID_PROVIDER) self.dry_run = settings.DRY_RUN @property def screen_name(self): if not self._screen_name: self._screen_name = self.twitter.account.verify_credentials()['screen_name'] return self._screen_name
MIT License
darkdarkfruit/python-weed
weed/util.py
WeedAssignKeyExtended.update_full_urls
python
def update_full_urls(self): self['full_url'] = 'http://' + self['url'] self['full_publicUrl'] = 'http://' + self['publicUrl'] self['fid_full_url'] = urllib.parse.urljoin(self['full_url'], self['fid']) self['fid_full_publicUrl'] = urllib.parse.urljoin(self['full_publicUrl'], self['fid']) for k, v in list(self.items()): setattr(self, k, v)
update "full_url" and "full_publicUrl"
https://github.com/darkdarkfruit/python-weed/blob/32722b9aa3143116970a993dad690835c9cd415b/weed/util.py#L82-L93
import json import urllib.parse from dataclasses import dataclass from enum import Enum import requests from weed.conf import g_logger class WeedAssignKey(dict): def __init__(self, json_of_weed_response=None): self['fid'] = '' self['count'] = 0 self['url'] = '' self['publicUrl'] = '' if json_of_weed_response: try: d = json.loads(json_of_weed_response) self.update(d) except Exception as e: g_logger.error('Error for json.loads "%s".\nException: %s' % (json_of_weed_response, e)) for k, v in list(self.items()): setattr(self, k, v) super(WeedAssignKey, self).__init__() class WeedAssignKeyExtended(WeedAssignKey): def __init__(self, json_of_weed_response=None): super(WeedAssignKeyExtended, self).__init__(json_of_weed_response) self.update_full_urls()
MIT License
gretelai/gretel-python-client
src/gretel_client/config.py
_get_config_path
python
def _get_config_path() -> Path: from_env = os.getenv(GRETEL_CONFIG_FILE) if from_env: return Path(from_env) return Path().home() / f".{GRETEL}" / "config.json"
Returns the path to the system's Gretel config
https://github.com/gretelai/gretel-python-client/blob/ccae575bbd9014a6364382270a93fbf0911048d5/src/gretel_client/config.py#L164-L169
from __future__ import annotations import json import logging import os from enum import Enum from pathlib import Path from typing import Optional, Type, TypeVar, Union from urllib3.util import Retry from gretel_client.rest.api.projects_api import ProjectsApi from gretel_client.rest.api_client import ApiClient from gretel_client.rest.configuration import Configuration from gretel_client.rest.exceptions import NotFoundException, UnauthorizedException GRETEL = "gretel" GRETEL_API_KEY = "GRETEL_API_KEY" GRETEL_ENDPOINT = "GRETEL_ENDPOINT" GRETEL_CONFIG_FILE = "GRETEL_CONFIG_FILE" GRETEL_PROJECT = "GRETEL_PROJECT" DEFAULT_GRETEL_ENDPOINT = "https://api.gretel.cloud" class GretelClientConfigurationError(Exception): ... T = TypeVar("T") class RunnerMode(Enum): LOCAL = "local" CLOUD = "cloud" MANUAL = "manual" DEFAULT_RUNNER = RunnerMode.CLOUD class ClientConfig: endpoint: str api_key: Optional[str] = None default_project_name: Optional[str] = None default_runner: str = DEFAULT_RUNNER.value def __init__( self, endpoint: Optional[str] = None, api_key: Optional[str] = None, default_project_name: Optional[str] = None, default_runner: str = DEFAULT_RUNNER.value, ): self.endpoint = ( endpoint or os.getenv(GRETEL_ENDPOINT) or DEFAULT_GRETEL_ENDPOINT ) self.api_key = api_key or os.getenv(GRETEL_API_KEY) self.default_runner = default_runner self.default_project_name = ( default_project_name or os.getenv(GRETEL_PROJECT) or default_project_name ) @classmethod def from_file(cls, file_path: Path) -> ClientConfig: config = json.loads(file_path.read_bytes()) return cls.from_dict(config) @classmethod def from_env(cls) -> ClientConfig: return cls() @classmethod def from_dict(cls, source: dict) -> ClientConfig: return cls( **{k: v for k, v in source.items() if k in cls.__annotations__.keys()} ) def _get_api_client(self) -> ApiClient: logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR) configuration = Configuration( host=self.endpoint, api_key={"ApiKey": self.api_key} ) configuration.retries = Retry( connect=5, read=2, redirect=5, backoff_factor=0.2 ) return ApiClient(configuration) def get_api(self, api_interface: Type[T]) -> T: return api_interface(self._get_api_client()) def _check_project(self, project_name: str = None) -> Optional[str]: if not project_name: return None projects_api = self.get_api(ProjectsApi) try: projects_api.get_project(project_id=project_name) except (UnauthorizedException, NotFoundException) as ex: raise GretelClientConfigurationError( f"Project {project_name} is invalid" ) from ex return project_name def update_default_project(self, project_id: str): self.default_project_name = project_id @property def as_dict(self) -> dict: return { prop: getattr(self, prop) for prop in self.__annotations__ if not prop.startswith("_") } def __eq__(self, other: ClientConfig) -> bool: return self.as_dict == other.as_dict @property def masked(self) -> dict: c = self.as_dict c["api_key"] = "[redacted from output]" return c @property def masked_api_key(self) -> str: if not self.api_key: return "None" return self.api_key[:8] + "****"
Apache License 2.0
ninthdevilhaunster/arknightsautohelper
vendor/penguin_client/penguin_client/models/zone.py
Zone.zone_id
python
def zone_id(self, zone_id): self._zone_id = zone_id
Sets the zone_id of this Zone. :param zone_id: The zone_id of this Zone. # noqa: E501 :type: str
https://github.com/ninthdevilhaunster/arknightsautohelper/blob/d24b4e22a73b333c1acc152556566efad4e94c04/vendor/penguin_client/penguin_client/models/zone.py#L206-L214
import pprint import re import six class Zone(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'close_time': 'int', 'existence': 'dict(str, Existence)', 'open_time': 'int', 'stages': 'list[str]', 'type': 'str', 'zone_id': 'str', 'zone_index': 'int', 'zone_name': 'str', 'zone_name_i18n': 'dict(str, str)' } attribute_map = { 'close_time': 'closeTime', 'existence': 'existence', 'open_time': 'openTime', 'stages': 'stages', 'type': 'type', 'zone_id': 'zoneId', 'zone_index': 'zoneIndex', 'zone_name': 'zoneName', 'zone_name_i18n': 'zoneName_i18n' } def __init__(self, close_time=None, existence=None, open_time=None, stages=None, type=None, zone_id=None, zone_index=None, zone_name=None, zone_name_i18n=None): self._close_time = None self._existence = None self._open_time = None self._stages = None self._type = None self._zone_id = None self._zone_index = None self._zone_name = None self._zone_name_i18n = None self.discriminator = None if close_time is not None: self.close_time = close_time if existence is not None: self.existence = existence if open_time is not None: self.open_time = open_time if stages is not None: self.stages = stages if type is not None: self.type = type if zone_id is not None: self.zone_id = zone_id if zone_index is not None: self.zone_index = zone_index if zone_name is not None: self.zone_name = zone_name if zone_name_i18n is not None: self.zone_name_i18n = zone_name_i18n @property def close_time(self): return self._close_time @close_time.setter def close_time(self, close_time): self._close_time = close_time @property def existence(self): return self._existence @existence.setter def existence(self, existence): self._existence = existence @property def open_time(self): return self._open_time @open_time.setter def open_time(self, open_time): self._open_time = open_time @property def stages(self): return self._stages @stages.setter def stages(self, stages): self._stages = stages @property def type(self): return self._type @type.setter def type(self, type): self._type = type @property def zone_id(self): return self._zone_id @zone_id.setter
MIT License
pydata/sparse
sparse/_coo/common.py
nanprod
python
def nanprod(x, axis=None, keepdims=False, dtype=None, out=None): assert out is None x = asCOO(x) return nanreduce(x, np.multiply, axis=axis, keepdims=keepdims, dtype=dtype)
Performs a product operation along the given axes, skipping ``NaN`` values. Uses all axes by default. Parameters ---------- x : SparseArray The array to perform the reduction on. axis : Union[int, Iterable[int]], optional The axes along which to multiply. Uses all axes by default. keepdims : bool, optional Whether or not to keep the dimensions of the original array. dtype : numpy.dtype The data type of the output array. Returns ------- COO The reduced output sparse array. See Also -------- :obj:`COO.prod` : Function without ``NaN`` skipping. numpy.nanprod : Equivalent Numpy function.
https://github.com/pydata/sparse/blob/0b7dfeb35cc5894fe36ed1742704acbb37c0c54e/sparse/_coo/common.py#L498-L526
from functools import reduce import operator import warnings from collections.abc import Iterable import numpy as np import scipy.sparse import numba from .._sparse_array import SparseArray from .._utils import ( isscalar, is_unsigned_dtype, normalize_axis, check_zero_fill_value, check_consistent_fill_value, can_store, ) def asCOO(x, name="asCOO", check=True): from .core import COO if check and not isinstance(x, (SparseArray, scipy.sparse.spmatrix)): raise ValueError( "Performing this operation would produce a dense result: %s" % name ) if not isinstance(x, COO): x = COO(x) return x def linear_loc(coords, shape): if shape == () and len(coords) == 0: return np.zeros(coords.shape[1:], dtype=np.intp) else: return np.ravel_multi_index(coords, shape) def kron(a, b): from .core import COO from .._umath import _cartesian_product check_zero_fill_value(a, b) a_sparse = isinstance(a, (SparseArray, scipy.sparse.spmatrix)) b_sparse = isinstance(b, (SparseArray, scipy.sparse.spmatrix)) a_ndim = np.ndim(a) b_ndim = np.ndim(b) if not (a_sparse or b_sparse): raise ValueError( "Performing this operation would produce a dense " "result: kron" ) if a_ndim == 0 or b_ndim == 0: return a * b a = asCOO(a, check=False) b = asCOO(b, check=False) max_dim = max(a.ndim, b.ndim) a = a.reshape((1,) * (max_dim - a.ndim) + a.shape) b = b.reshape((1,) * (max_dim - b.ndim) + b.shape) a_idx, b_idx = _cartesian_product(np.arange(a.nnz), np.arange(b.nnz)) a_expanded_coords = a.coords[:, a_idx] b_expanded_coords = b.coords[:, b_idx] o_coords = a_expanded_coords * np.asarray(b.shape)[:, None] + b_expanded_coords o_data = a.data[a_idx] * b.data[b_idx] o_shape = tuple(i * j for i, j in zip(a.shape, b.shape)) return COO(o_coords, o_data, shape=o_shape, has_duplicates=False) def concatenate(arrays, axis=0): from .core import COO check_consistent_fill_value(arrays) arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim) assert all( x.shape[ax] == arrays[0].shape[ax] for x in arrays for ax in set(range(arrays[0].ndim)) - {axis} ) nnz = 0 dim = sum(x.shape[axis] for x in arrays) shape = list(arrays[0].shape) shape[axis] = dim data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) if not can_store(coords.dtype, max(shape)): coords = coords.astype(np.min_scalar_type(max(shape))) dim = 0 for x in arrays: if dim: coords[axis, nnz : x.nnz + nnz] += dim dim += x.shape[axis] nnz += x.nnz return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def stack(arrays, axis=0): from .core import COO check_consistent_fill_value(arrays) assert len({x.shape for x in arrays}) == 1 arrays = [x if isinstance(x, COO) else COO(x) for x in arrays] axis = normalize_axis(axis, arrays[0].ndim + 1) data = np.concatenate([x.data for x in arrays]) coords = np.concatenate([x.coords for x in arrays], axis=1) shape = list(arrays[0].shape) shape.insert(axis, len(arrays)) nnz = 0 dim = 0 new = np.empty(shape=(coords.shape[1],), dtype=np.intp) for x in arrays: new[nnz : x.nnz + nnz] = dim dim += 1 nnz += x.nnz coords = [coords[i] for i in range(coords.shape[0])] coords.insert(axis, new) coords = np.stack(coords, axis=0) return COO( coords, data, shape=shape, has_duplicates=False, sorted=(axis == 0), fill_value=arrays[0].fill_value, ) def triu(x, k=0): from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError( "sparse.triu is not implemented for scalars or 1-D arrays." ) mask = x.coords[-2] + k <= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def tril(x, k=0): from .core import COO check_zero_fill_value(x) if not x.ndim >= 2: raise NotImplementedError( "sparse.tril is not implemented for scalars or 1-D arrays." ) mask = x.coords[-2] + k >= x.coords[-1] coords = x.coords[:, mask] data = x.data[mask] return COO(coords, data, shape=x.shape, has_duplicates=False, sorted=True) def nansum(x, axis=None, keepdims=False, dtype=None, out=None): assert out is None x = asCOO(x, name="nansum") return nanreduce(x, np.add, axis=axis, keepdims=keepdims, dtype=dtype) def nanmean(x, axis=None, keepdims=False, dtype=None, out=None): assert out is None x = asCOO(x, name="nanmean") if not np.issubdtype(x.dtype, np.floating): return x.mean(axis=axis, keepdims=keepdims, dtype=dtype) mask = np.isnan(x) x2 = where(mask, 0, x) nancount = mask.sum(axis=axis, dtype="i8", keepdims=keepdims) if axis is None: axis = tuple(range(x.ndim)) elif not isinstance(axis, tuple): axis = (axis,) den = reduce(operator.mul, (x.shape[i] for i in axis), 1) den -= nancount if (den == 0).any(): warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) num = np.sum(x2, axis=axis, dtype=dtype, keepdims=keepdims) with np.errstate(invalid="ignore", divide="ignore"): if num.ndim: return np.true_divide(num, den, casting="unsafe") return (num / den).astype(dtype) def nanmax(x, axis=None, keepdims=False, dtype=None, out=None): assert out is None x = asCOO(x, name="nanmax") ar = x.reduce(np.fmax, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) return ar def nanmin(x, axis=None, keepdims=False, dtype=None, out=None): assert out is None x = asCOO(x, name="nanmin") ar = x.reduce(np.fmin, axis=axis, keepdims=keepdims, dtype=dtype) if (isscalar(ar) and np.isnan(ar)) or np.isnan(ar.data).any(): warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2) return ar
BSD 3-Clause New or Revised License
peterdsharpe/aerosandbox
aerosandbox/library/aerodynamics/unsteady.py
pitching_through_transverse_gust
python
def pitching_through_transverse_gust( reduced_time: np.ndarray, gust_velocity_profile: Callable[[float], float], plate_velocity: float, angle_of_attack: Union[Callable[[float], float], float], chord: float = 1 ): gust_lift = calculate_lift_due_to_transverse_gust(reduced_time, gust_velocity_profile, plate_velocity, angle_of_attack, chord) pitch_lift = calculate_lift_due_to_pitching_profile(reduced_time, angle_of_attack) added_mass_lift = added_mass_due_to_pitching(reduced_time, angle_of_attack) return gust_lift + pitch_lift + added_mass_lift
This function calculates the lift as a function of time of a flat plate pitching about its midchord through an arbitrary transverse gust. It combines Kussner's gust response with wagners pitch response as well as added mass. The following physics are accounted for 1) Vorticity shed from the trailing edge due to gust profile 2) Vorticity shed from the trailing edge due to pitching profile 3) Added mass (non-circulatory force) due to pitching about midchord The following physics are NOT taken accounted for 1) Any type of flow separation 2) Leading edge vorticity shedding 3) Deflected wake due to gust (flat wake assumption) Args: reduced_time (float,np.ndarray) : Reduced time, equal to the number of semichords travelled. See function reduced_time gust_velocity_profile (Callable[[float],float]) : The transverse velocity profile that the flate plate experiences. Must be a function that takes reduced time and returns a velocity plate_velocity (float) :The velocity by which the flat plate enters the gust angle_of_attack (Union[float,Callable[[float],float]]) : The angle of attack, in degrees. Can either be a float for constant angle of attack or a Callable that takes reduced time and returns angle of attack chord (float) : The chord of the plate in meters Returns: lift_coefficient (np.ndarray) : The lift coefficient history of the flat plate
https://github.com/peterdsharpe/aerosandbox/blob/8fbf9449cba2f02e14424690ba2e34b438f21c69/aerosandbox/library/aerodynamics/unsteady.py#L289-L326
import matplotlib.pyplot as plt import aerosandbox.numpy as np from typing import Union, Callable from scipy.integrate import quad def main(): time = np.linspace(0, 10, 100) wing_velocity = 2 chord = 2 reduced_time = calculate_reduced_time(time, wing_velocity, chord) fig, ax1 = plt.subplots(dpi=300) ln1 = ax1.plot(reduced_time, np.array([top_hat_gust(s) for s in reduced_time]), label="Top-Hat Gust", lw=3) ln2 = ax1.plot(reduced_time, np.array([sine_squared_gust(s) for s in reduced_time]), label="Sine-Squared Gust", lw=3) ax1.set_xlabel("Reduced time") ax1.set_ylabel("Velocity (m/s)") ax2 = ax1.twinx() ln3 = ax2.plot(reduced_time, np.array([gaussian_pitch(s) for s in reduced_time]), label="Guassian Pitch", c="red", ls="--", lw=3) ax2.set_ylabel("Angle of Attack, degrees") lns = ln1 + ln2 + ln3 labs = [l.get_label() for l in lns] ax2.legend(lns, labs, loc="lower right") plt.title("Gust and pitch example profiles") total_lift = pitching_through_transverse_gust(reduced_time, top_hat_gust, wing_velocity, gaussian_pitch) gust_lift = calculate_lift_due_to_transverse_gust(reduced_time, top_hat_gust, wing_velocity, gaussian_pitch) pitch_lift = calculate_lift_due_to_pitching_profile(reduced_time, gaussian_pitch) added_mass_lift = added_mass_due_to_pitching(reduced_time, gaussian_pitch) plt.figure(dpi=300) plt.plot(reduced_time, total_lift, label="Total Lift", lw=2) plt.plot(reduced_time, gust_lift, label="Gust Lift", lw=2) plt.plot(reduced_time, pitch_lift, label="Pitching Lift", lw=2) plt.plot(reduced_time, added_mass_lift, label="Added Mass Lift", lw=2) plt.legend() plt.xlabel("Reduced time") plt.ylabel("$C_\ell$") plt.title("Guassian Pitch Maneuver Through Top-Hat Gust") def calculate_reduced_time( time: Union[float, np.ndarray], velocity: Union[float, np.ndarray], chord: float ) -> Union[float, np.ndarray]: if type(velocity) == float or type(velocity) == int: return 2 * velocity * time / chord else: assert np.size(velocity) == np.size(time), "The velocity history and time must have the same length" reduced_time = np.zeros_like(time) for i in range(len(time) - 1): reduced_time[i + 1] = reduced_time[i] + (velocity[i + 1] + velocity[i]) / 2 * (time[i + 1] - time[i]) return 2 / chord * reduced_time def wagners_function(reduced_time: Union[np.ndarray, float]): wagner = (1 - 0.165 * np.exp(-0.0455 * reduced_time) - 0.335 * np.exp(-0.3 * reduced_time)) * np.where(reduced_time >= 0, 1, 0) return wagner def kussners_function(reduced_time: Union[np.ndarray, float]): kussner = (1 - 0.5 * np.exp(-0.13 * reduced_time) - 0.5 * np.exp(-reduced_time)) * np.where(reduced_time >= 0, 1, 0) return kussner def indicial_pitch_response( reduced_time: Union[float, np.ndarray], angle_of_attack: float ): return 2 * np.pi * np.deg2rad(angle_of_attack) * wagners_function(reduced_time) def indicial_gust_response( reduced_time: Union[float, np.ndarray], gust_velocity: float, plate_velocity: float, angle_of_attack: float = 0, chord: float = 1 ): angle_of_attack_radians = np.deg2rad(angle_of_attack) offset = chord / 2 * (1 - np.cos(angle_of_attack_radians)) return (2 * np.pi * np.arctan(gust_velocity / plate_velocity) * np.cos(angle_of_attack_radians) * kussners_function(reduced_time - offset)) def calculate_lift_due_to_transverse_gust( reduced_time: np.ndarray, gust_velocity_profile: Callable[[float], float], plate_velocity: float, angle_of_attack: Union[float, Callable[[float], float]] = 0, chord: float = 1 ): assert type(angle_of_attack) != np.ndarray, "Please provide either a Callable or a float for the angle of attack" if isinstance(angle_of_attack, float) or isinstance(angle_of_attack, int): def AoA_function(reduced_time): return np.deg2rad(angle_of_attack) else: def AoA_function(reduced_time): return np.deg2rad(angle_of_attack(reduced_time)) def dK_ds(reduced_time): return (0.065 * np.exp(-0.13 * reduced_time) + 0.5 * np.exp(-reduced_time)) def integrand(sigma, s, chord): offset = chord / 2 * (1 - np.cos(AoA_function(s - sigma))) return (dK_ds(sigma) * gust_velocity_profile(s - sigma - offset) * np.cos(AoA_function(s - sigma))) lift_coefficient = np.zeros_like(reduced_time) for i, s in enumerate(reduced_time): I = quad(integrand, 0, s, args=(s, chord))[0] lift_coefficient[i] = 2 * np.pi * I / plate_velocity return lift_coefficient def calculate_lift_due_to_pitching_profile( reduced_time: np.ndarray, angle_of_attack: Union[Callable[[float], float], float] ): assert (reduced_time >= 0).all(), "Please use positive time. Negative time not supported" if isinstance(angle_of_attack, float) or isinstance(angle_of_attack, int): def AoA_function(reduced_time): return np.deg2rad(angle_of_attack) else: def AoA_function(reduced_time): return np.deg2rad(angle_of_attack(reduced_time)) def dW_ds(reduced_time): return (0.1005 * np.exp(-0.3 * reduced_time) + 0.00750075 * np.exp(-0.0455 * reduced_time)) def integrand(sigma, s): if dW_ds(sigma) < 0: dW_ds(sigma) return dW_ds(sigma) * AoA_function(s - sigma) lift_coefficient = np.zeros_like(reduced_time) for i, s in enumerate(reduced_time): I = quad(integrand, 0, s, args=s)[0] lift_coefficient[i] = 2 * np.pi * (AoA_function(s) * wagners_function(0) + I) return lift_coefficient def added_mass_due_to_pitching( reduced_time: np.ndarray, angle_of_attack: Callable[[float], float] ): AoA = np.array([np.deg2rad(angle_of_attack(s)) for s in reduced_time]) da_ds = np.gradient(AoA, reduced_time) return np.pi / 2 * np.cos(AoA) ** 2 * da_ds
MIT License
chaffelson/whoville
whoville/cloudbreak/models/aws_encryption.py
AwsEncryption.type
python
def type(self, type): self._type = type
Sets the type of this AwsEncryption. encryption type for vm (DEFAULT|CUSTOM|NONE) :param type: The type of this AwsEncryption. :type: str
https://github.com/chaffelson/whoville/blob/f71fda629c9fd50d0a482120165ea5abcc754522/whoville/cloudbreak/models/aws_encryption.py#L68-L77
from pprint import pformat from six import iteritems import re class AwsEncryption(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'type': 'str', 'key': 'str' } attribute_map = { 'type': 'type', 'key': 'key' } def __init__(self, type=None, key=None): self._type = None self._key = None if type is not None: self.type = type if key is not None: self.key = key @property def type(self): return self._type @type.setter
Apache License 2.0
ionelmc/python-hunter
src/hunter/event.py
Event.detach
python
def detach(self, value_filter=None): event = Event.__new__(Event) event.__dict__['code'] = self.code event.__dict__['filename'] = self.filename event.__dict__['fullsource'] = self.fullsource event.__dict__['function'] = self.function event.__dict__['lineno'] = self.lineno event.__dict__['module'] = self.module event.__dict__['source'] = self.source event.__dict__['stdlib'] = self.stdlib event.__dict__['threadid'] = self.threadid event.__dict__['threadname'] = self.threadname event.__dict__['instruction'] = self.instruction if value_filter: event.__dict__['arg'] = value_filter(self.arg) event.__dict__['globals'] = {key: value_filter(value) for key, value in self.globals.items()} event.__dict__['locals'] = {key: value_filter(value) for key, value in self.locals.items()} else: event.__dict__['globals'] = {} event.__dict__['locals'] = {} event.__dict__['arg'] = None event.threading_support = self.threading_support event.calls = self.calls event.depth = self.depth event.kind = self.kind event.builtin = self.builtin event.detached = True return event
Return a copy of the event with references to live objects (like the frame) removed. You should use this if you want to store or use the event outside the handler. You should use this if you want to avoid memory leaks or side-effects when storing the events. Args: value_filter: Optional callable that takes one argument: ``value``. If not specified then the ``arg``, ``globals`` and ``locals`` fields will be ``None``. Example usage in a :class:`~hunter.actions.ColorStreamAction` subclass: .. sourcecode:: python def __call__(self, event): self.events = [event.detach(lambda field, value: self.try_repr(value))]
https://github.com/ionelmc/python-hunter/blob/e14bbfe28a11bfe8e65a91fd65831c72b2269cef/src/hunter/event.py#L125-L177
from __future__ import absolute_import import linecache import tokenize from functools import partial from os.path import basename from os.path import exists from os.path import splitext from threading import current_thread from .const import SITE_PACKAGES_PATHS from .const import SYS_PREFIX_PATHS from .util import CYTHON_SUFFIX_RE from .util import LEADING_WHITESPACE_RE from .util import MISSING from .util import PY2 from .util import cached_property from .util import get_func_in_mro from .util import get_main_thread from .util import if_same_code __all__ = 'Event', class Event(object): frame = None kind = None arg = None depth = None calls = None builtin = None def __init__(self, frame, kind, arg, tracer=None, depth=None, calls=None, threading_support=MISSING): if tracer is None: if depth is None: raise TypeError('Missing argument: depth (required because tracer was not given).') if calls is None: raise TypeError('Missing argument: calls (required because tracer was not given).') if threading_support is MISSING: raise TypeError('Missing argument: threading_support (required because tracer was not given).') else: depth = tracer.depth calls = tracer.calls threading_support = tracer.threading_support self.frame = frame if kind.startswith('c_'): kind = kind[2:] builtin = True else: builtin = False self.builtin = builtin self.kind = kind self.arg = arg self.depth = depth self.calls = calls self.threading_support = threading_support self.detached = False def __repr__(self): return '<Event kind=%r function=%r module=%r filename=%r lineno=%s>' % ( self.kind, self.function, self.module, self.filename, self.lineno ) def __eq__(self, other): return ( type(self) == type(other) and self.kind == other.kind and self.depth == other.depth and self.function == other.function and self.module == other.module and self.filename == other.filename )
BSD 2-Clause Simplified License
frank-qlu/recruit
ζ‹›θ˜ηˆ¬θ™«/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/core/defchararray.py
encode
python
def encode(a, encoding=None, errors=None): return _to_string_or_unicode_array( _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
Calls `str.encode` element-wise. The set of available codecs comes from the Python standard library, and may be extended at runtime. For more information, see the codecs module. Parameters ---------- a : array_like of str or unicode encoding : str, optional The name of an encoding errors : str, optional Specifies how to handle encoding errors Returns ------- out : ndarray See also -------- str.encode Notes ----- The type of the result will depend on the encoding specified.
https://github.com/frank-qlu/recruit/blob/0875fb1d2cfb581aaa8abc7a97880c0ce5bf6147/ζ‹›θ˜ηˆ¬θ™«/zlzpView/static/zlzpView/venv/Lib/site-packages/numpy/core/defchararray.py#L568-L600
from __future__ import division, absolute_import, print_function import functools import sys from .numerictypes import string_, unicode_, integer, object_, bool_, character from .numeric import ndarray, compare_chararrays from .numeric import array as narray from numpy.core.multiarray import _vec_string from numpy.core.overrides import set_module from numpy.core import overrides from numpy.compat import asbytes, long import numpy __all__ = [ 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', 'array', 'asarray' ] _globalvar = 0 if sys.version_info[0] >= 3: _unicode = str _bytes = bytes else: _unicode = unicode _bytes = str _len = len array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.char') def _use_unicode(*args): for x in args: if (isinstance(x, _unicode) or issubclass(numpy.asarray(x).dtype.type, unicode_)): return unicode_ return string_ def _to_string_or_unicode_array(result): return numpy.asarray(result.tolist()) def _clean_args(*args): newargs = [] for chk in args: if chk is None: break newargs.append(chk) return newargs def _get_num_chars(a): if issubclass(a.dtype.type, unicode_): return a.itemsize // 4 return a.itemsize def _binary_op_dispatcher(x1, x2): return (x1, x2) @array_function_dispatch(_binary_op_dispatcher) def equal(x1, x2): return compare_chararrays(x1, x2, '==', True) @array_function_dispatch(_binary_op_dispatcher) def not_equal(x1, x2): return compare_chararrays(x1, x2, '!=', True) @array_function_dispatch(_binary_op_dispatcher) def greater_equal(x1, x2): return compare_chararrays(x1, x2, '>=', True) @array_function_dispatch(_binary_op_dispatcher) def less_equal(x1, x2): return compare_chararrays(x1, x2, '<=', True) @array_function_dispatch(_binary_op_dispatcher) def greater(x1, x2): return compare_chararrays(x1, x2, '>', True) @array_function_dispatch(_binary_op_dispatcher) def less(x1, x2): return compare_chararrays(x1, x2, '<', True) def _unary_op_dispatcher(a): return (a,) @array_function_dispatch(_unary_op_dispatcher) def str_len(a): return _vec_string(a, integer, '__len__') @array_function_dispatch(_binary_op_dispatcher) def add(x1, x2): arr1 = numpy.asarray(x1) arr2 = numpy.asarray(x2) out_size = _get_num_chars(arr1) + _get_num_chars(arr2) dtype = _use_unicode(arr1, arr2) return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) def _multiply_dispatcher(a, i): return (a,) @array_function_dispatch(_multiply_dispatcher) def multiply(a, i): a_arr = numpy.asarray(a) i_arr = numpy.asarray(i) if not issubclass(i_arr.dtype.type, integer): raise ValueError("Can only multiply by integers") out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) return _vec_string( a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) def _mod_dispatcher(a, values): return (a, values) @array_function_dispatch(_mod_dispatcher) def mod(a, values): return _to_string_or_unicode_array( _vec_string(a, object_, '__mod__', (values,))) @array_function_dispatch(_unary_op_dispatcher) def capitalize(a): a_arr = numpy.asarray(a) return _vec_string(a_arr, a_arr.dtype, 'capitalize') def _center_dispatcher(a, width, fillchar=None): return (a,) @array_function_dispatch(_center_dispatcher) def center(a, width, fillchar=' '): a_arr = numpy.asarray(a) width_arr = numpy.asarray(width) size = long(numpy.max(width_arr.flat)) if numpy.issubdtype(a_arr.dtype, numpy.string_): fillchar = asbytes(fillchar) return _vec_string( a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) def _count_dispatcher(a, sub, start=None, end=None): return (a,) @array_function_dispatch(_count_dispatcher) def count(a, sub, start=0, end=None): return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) def _code_dispatcher(a, encoding=None, errors=None): return (a,) @array_function_dispatch(_code_dispatcher) def decode(a, encoding=None, errors=None): return _to_string_or_unicode_array( _vec_string(a, object_, 'decode', _clean_args(encoding, errors))) @array_function_dispatch(_code_dispatcher)
Apache License 2.0
didix21/mdutils
mdutils/fileutils/fileutils.py
MarkDownFile.rewrite_all_file
python
def rewrite_all_file(self, data): with open(self.file_name, 'w', encoding='utf-8') as self.file: self.file.write(data)
Rewrite all the data of a Markdown file by ``data``. :param str data: is a string containing all the data that is written in the markdown file.
https://github.com/didix21/mdutils/blob/09e531b486563e01f4890a0c68633bb246d44b4c/mdutils/fileutils/fileutils.py#L26-L31
class MarkDownFile(object): def __init__(self, name=''): if name: self.file_name = name if name.endswith('.md') else name + '.md' self.file = open(self.file_name, 'w+', encoding='UTF-8') self.file.close()
MIT License
gandalf15/hx711
HX711_Python3/hx711.py
HX711.set_data_filter
python
def set_data_filter(self, data_filter): if callable(data_filter): self._data_filter = data_filter else: raise TypeError('Parameter "data_filter" must be a function. ' 'Received: {}'.format(data_filter))
set_data_filter method sets data filter that is passed as an argument. Args: data_filter(data_filter): Data filter that takes list of int numbers and returns a list of filtered int numbers. Raises: TypeError: if filter is not a function.
https://github.com/gandalf15/hx711/blob/4faae5525ced1d08e51c95728f47ac0b8864c56f/HX711_Python3/hx711.py#L243-L258
import statistics as stat import time import RPi.GPIO as GPIO class HX711: def __init__(self, dout_pin, pd_sck_pin, gain_channel_A=128, select_channel='A'): if (isinstance(dout_pin, int)): if (isinstance(pd_sck_pin, int)): self._pd_sck = pd_sck_pin self._dout = dout_pin else: raise TypeError('pd_sck_pin must be type int. ' 'Received pd_sck_pin: {}'.format(pd_sck_pin)) else: raise TypeError('dout_pin must be type int. ' 'Received dout_pin: {}'.format(dout_pin)) self._gain_channel_A = 0 self._offset_A_128 = 0 self._offset_A_64 = 0 self._offset_B = 0 self._last_raw_data_A_128 = 0 self._last_raw_data_A_64 = 0 self._last_raw_data_B = 0 self._wanted_channel = '' self._current_channel = '' self._scale_ratio_A_128 = 1 self._scale_ratio_A_64 = 1 self._scale_ratio_B = 1 self._debug_mode = False self._data_filter = self.outliers_filter GPIO.setup(self._pd_sck, GPIO.OUT) GPIO.setup(self._dout, GPIO.IN) self.select_channel(select_channel) self.set_gain_A(gain_channel_A) def select_channel(self, channel): channel = channel.capitalize() if (channel == 'A'): self._wanted_channel = 'A' elif (channel == 'B'): self._wanted_channel = 'B' else: raise ValueError('Parameter "channel" has to be "A" or "B". ' 'Received: {}'.format(channel)) self._read() time.sleep(0.5) def set_gain_A(self, gain): if gain == 128: self._gain_channel_A = gain elif gain == 64: self._gain_channel_A = gain else: raise ValueError('gain has to be 128 or 64. ' 'Received: {}'.format(gain)) self._read() time.sleep(0.5) def zero(self, readings=30): if readings > 0 and readings < 100: result = self.get_raw_data_mean(readings) if result != False: if (self._current_channel == 'A' and self._gain_channel_A == 128): self._offset_A_128 = result return False elif (self._current_channel == 'A' and self._gain_channel_A == 64): self._offset_A_64 = result return False elif (self._current_channel == 'B'): self._offset_B = result return False else: if self._debug_mode: print('Cannot zero() channel and gain mismatch.\n' 'current channel: {}\n' 'gain A: {}\n'.format(self._current_channel, self._gain_channel_A)) return True else: if self._debug_mode: print('From method "zero()".\n' 'get_raw_data_mean(readings) returned False.\n') return True else: raise ValueError('Parameter "readings" ' 'can be in range 1 up to 99. ' 'Received: {}'.format(readings)) def set_offset(self, offset, channel='', gain_A=0): channel = channel.capitalize() if isinstance(offset, int): if channel == 'A' and gain_A == 128: self._offset_A_128 = offset return elif channel == 'A' and gain_A == 64: self._offset_A_64 = offset return elif channel == 'B': self._offset_B = offset return elif channel == '': if self._current_channel == 'A' and self._gain_channel_A == 128: self._offset_A_128 = offset return elif self._current_channel == 'A' and self._gain_channel_A == 64: self._offset_A_64 = offset return else: self._offset_B = offset return else: raise ValueError('Parameter "channel" has to be "A" or "B". ' 'Received: {}'.format(channel)) else: raise TypeError('Parameter "offset" has to be integer. ' 'Received: ' + str(offset) + '\n') def set_scale_ratio(self, scale_ratio, channel='', gain_A=0): channel = channel.capitalize() if isinstance(gain_A, int): if channel == 'A' and gain_A == 128: self._scale_ratio_A_128 = scale_ratio return elif channel == 'A' and gain_A == 64: self._scale_ratio_A_64 = scale_ratio return elif channel == 'B': self._scale_ratio_B = scale_ratio return elif channel == '': if self._current_channel == 'A' and self._gain_channel_A == 128: self._scale_ratio_A_128 = scale_ratio return elif self._current_channel == 'A' and self._gain_channel_A == 64: self._scale_ratio_A_64 = scale_ratio return else: self._scale_ratio_B = scale_ratio return else: raise ValueError('Parameter "channel" has to be "A" or "B". ' 'received: {}'.format(channel)) else: raise TypeError('Parameter "gain_A" has to be integer. ' 'Received: ' + str(gain_A) + '\n')
BSD 3-Clause New or Revised License
jamescurtin/demo-cookiecutter-flask
my_flask_app/public/views.py
register
python
def register(): form = RegisterForm(request.form) if form.validate_on_submit(): User.create( username=form.username.data, email=form.email.data, password=form.password.data, active=True, ) flash("Thank you for registering. You can now log in.", "success") return redirect(url_for("public.home")) else: flash_errors(form) return render_template("public/register.html", form=form)
Register new user.
https://github.com/jamescurtin/demo-cookiecutter-flask/blob/11decb79ea62c8d10d3141ba7333db85390d4ebf/my_flask_app/public/views.py#L56-L70
from flask import ( Blueprint, current_app, flash, redirect, render_template, request, url_for, ) from flask_login import login_required, login_user, logout_user from my_flask_app.extensions import login_manager from my_flask_app.public.forms import LoginForm from my_flask_app.user.forms import RegisterForm from my_flask_app.user.models import User from my_flask_app.utils import flash_errors blueprint = Blueprint("public", __name__, static_folder="../static") @login_manager.user_loader def load_user(user_id): return User.get_by_id(int(user_id)) @blueprint.route("/", methods=["GET", "POST"]) def home(): form = LoginForm(request.form) current_app.logger.info("Hello from the home page!") if request.method == "POST": if form.validate_on_submit(): login_user(form.user) flash("You are logged in.", "success") redirect_url = request.args.get("next") or url_for("user.members") return redirect(redirect_url) else: flash_errors(form) return render_template("public/home.html", form=form) @blueprint.route("/logout/") @login_required def logout(): logout_user() flash("You are logged out.", "info") return redirect(url_for("public.home")) @blueprint.route("/register/", methods=["GET", "POST"])
MIT License
andrewtavis/causeinfer
src/causeinfer/data/download_utils.py
get_download_paths
python
def get_download_paths(file_path, file_directory="files", file_name="file"): if file_path is None: directory_path = os.path.join(os.getcwd() + "/" + file_directory) file_path = os.path.join(directory_path + "/" + file_name) else: directory_path = file_path.split("/")[0] file_path = file_path return directory_path, file_path
Derives paths for a file folder and a file. Parameters ---------- path : str A user specified path that the data should go to file_directory : str (default=files) A user specified directory. file_name : str (default=file) The name to call the file.
https://github.com/andrewtavis/causeinfer/blob/19cb098e162f4b711f2681bad21f303e8dc65db7/src/causeinfer/data/download_utils.py#L67-L89
import os import urllib import zipfile import requests def download_file(url: str, output_path: str, zip_file=False): print("Attempting to download file to '{}'...".format(output_path)) headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36" } res = requests.get(url, headers=headers) status_code = int(res.status_code) if status_code == 200: if zip_file == True: file = urllib.request.urlretrieve(url, output_path) with zipfile.ZipFile(output_path, "r") as zip_ref: print("Unzipping '{}'...".format(output_path)) zip_ref.extractall(output_path.split(".zip")[0]) os.remove(output_path) print("File unzipped - deleting .zip file") print("Download complete") else: with open(output_path, "wb") as file: for chunk in res: file.write(chunk) print("Download complete") elif status_code == 404: raise Exception("Wrong URL: " + url) elif status_code == 403: raise Exception("Forbidden URL: " + url)
BSD 3-Clause New or Revised License
vector-ai/vectorhub
vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.py
SentenceTransformer2Vec.get_list_of_urls
python
def get_list_of_urls(self): return self.urls
Return list of URLS.
https://github.com/vector-ai/vectorhub/blob/17c2f342cef2ff7bcc02c8f3914e79ad92071a9e/vectorhub/encoders/text/sentence_transformers/sentence_auto_transformers.py#L73-L77
import warnings from typing import List from datetime import date from ....base import catch_vector_errors from ....doc_utils import ModelDefinition from ....import_utils import * from ....models_dict import MODEL_REQUIREMENTS from ..base import BaseText2Vec is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-sentence-transformers']) try: from sentence_transformers import SentenceTransformer from sentence_transformers import SentenceTransformer from sentence_transformers import models, datasets, losses import gzip from torch.utils.data import DataLoader import numpy as np import pandas as pd from tqdm.auto import tqdm import nltk except: import traceback traceback.print_exc() SentenceTransformerModelDefinition = ModelDefinition(markdown_filepath='encoders/text/sentence_transformers/sentence_auto_transformers.md') LIST_OF_URLS = { 'distilroberta-base-paraphrase-v1' : {"vector_length": 768}, 'xlm-r-distilroberta-base-paraphrase-v1' : {"vector_length": 768}, "paraphrase-xlm-r-multilingual-v1": {"vector_length": 768}, 'distilbert-base-nli-stsb-mean-tokens' : {"vector_length": 768}, 'bert-large-nli-stsb-mean-tokens' : {"vector_length": 1024}, 'roberta-base-nli-stsb-mean-tokens' : {"vector_length": 768}, 'roberta-large-nli-stsb-mean-tokens' : {"vector_length": 1024}, 'distilbert-base-nli-stsb-quora-ranking' : {"vector_length": 768}, 'distilbert-multilingual-nli-stsb-quora-ranking' : {"vector_length": 768}, 'distilroberta-base-msmarco-v1' : {"vector_length": 768}, 'distiluse-base-multilingual-cased-v2' : {"vector_length": 512}, 'xlm-r-bert-base-nli-stsb-mean-tokens' : {"vector_length": 768}, 'bert-base-wikipedia-sections-mean-tokens' : {"vector_length": 768}, 'LaBSE' : {"vector_length": 768}, 'average_word_embeddings_glove.6B.300d' : {"vector_length": 300}, 'average_word_embeddings_komninos' : {"vector_length": 300}, 'average_word_embeddings_levy_dependency' : {"vector_length": 768}, 'average_word_embeddings_glove.840B.300d' : {"vector_length": 300}, 'paraphrase-xlm-r-multilingual-v1': {"vector_length": 768}, } __doc__ = SentenceTransformerModelDefinition.create_docs() class SentenceTransformer2Vec(BaseText2Vec): definition = SentenceTransformerModelDefinition urls = LIST_OF_URLS def __init__(self, model_name: str): self.model_name = model_name self.urls = LIST_OF_URLS self.validate_model_url(model_name, LIST_OF_URLS) if model_name in LIST_OF_URLS: self.vector_length = LIST_OF_URLS[model_name]["vector_length"] else: self.vector_length = None warnings.warn("Not included in the official model repository. Please specify set the vector length attribute.") self.model = SentenceTransformer(model_name)
Apache License 2.0
benvanwerkhoven/kernel_tuner
kernel_tuner/opencl.py
OpenCLFunctions.run_kernel
python
def run_kernel(self, func, gpu_args, threads, grid): global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2]) local_size = threads event = func(self.queue, global_size, local_size, *gpu_args) event.wait()
runs the OpenCL kernel passed as 'func' :param func: An OpenCL Kernel :type func: pyopencl.Kernel :param gpu_args: A list of arguments to the kernel, order should match the order in the code. Allowed values are either variables in global memory or single values passed by value. :type gpu_args: list( pyopencl.Buffer, numpy.int32, ...) :param threads: A tuple listing the number of work items in each dimension of the work group. :type threads: tuple(int, int, int) :param grid: A tuple listing the number of work groups in each dimension of the NDRange. :type grid: tuple(int, int)
https://github.com/benvanwerkhoven/kernel_tuner/blob/1fb183b7719ddb4e211428231e7936c3194f1433/kernel_tuner/opencl.py#L171-L193
from __future__ import print_function import time import numpy as np from kernel_tuner.observers import BenchmarkObserver try: import pyopencl as cl except ImportError: cl = None class OpenCLObserver(BenchmarkObserver): def __init__(self, dev): self.dev = dev self.times = [] def after_finish(self): event = self.dev.event self.times.append((event.profile.end - event.profile.start)*1e-6) def get_results(self): results = {"time": np.average(self.times), "times": self.times.copy()} self.times = [] return results class OpenCLFunctions(): def __init__(self, device=0, platform=0, iterations=7, compiler_options=None, observers=None): if not cl: raise ImportError("Error: pyopencl not installed, please install e.g. using 'pip install pyopencl'.") self.iterations = iterations platforms = cl.get_platforms() self.ctx = cl.Context(devices=[platforms[platform].get_devices()[device]]) self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE) self.mf = cl.mem_flags self.max_threads = self.ctx.devices[0].get_info(cl.device_info.MAX_WORK_GROUP_SIZE) self.compiler_options = compiler_options or [] self.observers = observers or [] self.observers.append(OpenCLObserver(self)) self.event = None for obs in self.observers: obs.register_device(self) dev = self.ctx.devices[0] env = dict() env["platform_name"] = dev.platform.name env["platform_version"] = dev.platform.version env["device_name"] = dev.name env["device_version"] = dev.version env["opencl_c_version"] = dev.opencl_c_version env["driver_version"] = dev.driver_version env["iterations"] = self.iterations env["compiler_options"] = compiler_options self.env = env self.name = dev.name def __enter__(self): return self def __exit__(self, *exc): pass def ready_argument_list(self, arguments): gpu_args = [] for arg in arguments: if isinstance(arg, np.ndarray): gpu_args.append(cl.Buffer(self.ctx, self.mf.READ_WRITE | self.mf.COPY_HOST_PTR, hostbuf=arg)) else: gpu_args.append(arg) return gpu_args def compile(self, kernel_instance): prg = cl.Program(self.ctx, kernel_instance.kernel_string).build(options=self.compiler_options) func = getattr(prg, kernel_instance.name) return func def benchmark(self, func, gpu_args, threads, grid): result = dict() global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2]) local_size = threads for _ in range(self.iterations): for obs in self.observers: obs.before_start() self.queue.finish() self.event = func(self.queue, global_size, local_size, *gpu_args) for obs in self.observers: obs.after_start() while self.event.get_info(cl.event_info.COMMAND_EXECUTION_STATUS) != 0: for obs in self.observers: obs.during() time.sleep(1e-6) self.event.wait() for obs in self.observers: obs.after_finish() for obs in self.observers: result.update(obs.get_results()) return result
Apache License 2.0
opendilab/di-star
ctools/pysc2/lib/actions.py
raw_cmd_pt
python
def raw_cmd_pt(action, ability_id, queued, unit_tags, world): action_cmd = action.action_raw.unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued if not isinstance(unit_tags, (tuple, list)): unit_tags = [unit_tags] action_cmd.unit_tags.extend(unit_tags) world.assign_to(action_cmd.target_world_space_pos)
Do a raw command to another unit towards a point.
https://github.com/opendilab/di-star/blob/f12d79403488e7df0498d7b116fc23a67506112b/ctools/pysc2/lib/actions.py#L171-L179
import collections import numbers import enum import numpy import six from ctools.pysc2.lib import point from s2clientprotocol import spatial_pb2 as sc_spatial from s2clientprotocol import ui_pb2 as sc_ui class ActionSpace(enum.Enum): FEATURES = 1 RGB = 2 RAW = 3 def spatial(action, action_space): if action_space == ActionSpace.FEATURES: return action.action_feature_layer elif action_space == ActionSpace.RGB: return action.action_render else: raise ValueError("Unexpected value for action_space: %s" % action_space) def no_op(action, action_space): del action, action_space def move_camera(action, action_space, minimap): minimap.assign_to(spatial(action, action_space).camera_move.center_minimap) def select_point(action, action_space, select_point_act, screen): select = spatial(action, action_space).unit_selection_point screen.assign_to(select.selection_screen_coord) select.type = select_point_act def select_rect(action, action_space, select_add, screen, screen2): select = spatial(action, action_space).unit_selection_rect out_rect = select.selection_screen_coord.add() screen_rect = point.Rect(screen, screen2) screen_rect.tl.assign_to(out_rect.p0) screen_rect.br.assign_to(out_rect.p1) select.selection_add = bool(select_add) def select_idle_worker(action, action_space, select_worker): del action_space action.action_ui.select_idle_worker.type = select_worker def select_army(action, action_space, select_add): del action_space action.action_ui.select_army.selection_add = select_add def select_warp_gates(action, action_space, select_add): del action_space action.action_ui.select_warp_gates.selection_add = select_add def select_larva(action, action_space): del action_space action.action_ui.select_larva.SetInParent() def select_unit(action, action_space, select_unit_act, select_unit_id): del action_space select = action.action_ui.multi_panel select.type = select_unit_act select.unit_index = select_unit_id def control_group(action, action_space, control_group_act, control_group_id): del action_space select = action.action_ui.control_group select.action = control_group_act select.control_group_index = control_group_id def unload(action, action_space, unload_id): del action_space action.action_ui.cargo_panel.unit_index = unload_id def build_queue(action, action_space, build_queue_id): del action_space action.action_ui.production_panel.unit_index = build_queue_id def cmd_quick(action, action_space, ability_id, queued): action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued def cmd_screen(action, action_space, ability_id, queued, screen): action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued screen.assign_to(action_cmd.target_screen_coord) def cmd_minimap(action, action_space, ability_id, queued, minimap): action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued minimap.assign_to(action_cmd.target_minimap_coord) def autocast(action, action_space, ability_id): del action_space action.action_ui.toggle_autocast.ability_id = ability_id def raw_no_op(action): del action def raw_move_camera(action, world): action_cmd = action.action_raw.camera_move world.assign_to(action_cmd.center_world_space) def raw_cmd(action, ability_id, queued, unit_tags): action_cmd = action.action_raw.unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued if not isinstance(unit_tags, (tuple, list)): unit_tags = [unit_tags] action_cmd.unit_tags.extend(unit_tags)
Apache License 2.0
google-research/pyreach
pyreach/internal.py
Timer.calls
python
def calls(self) -> int: with self._lock: return self._calls
Return the number of timer calls.
https://github.com/google-research/pyreach/blob/83cac8e235ba1392dcdc6b8d19202c3eff3ad9a6/pyreach/internal.py#L91-L94
import os import threading import time from typing import Any, Callable, Dict, FrozenSet, List, Optional, Set, TextIO, Tuple import numpy as np from pyreach import core from pyreach.common.python import types_gen class Timer(object): def __init__(self, name: str, get_time: Callable[[], float] = time.time) -> None: self.name: str = name self._calls: int = 0 self._duration: float = 0.0 self._lock: threading.Lock = threading.Lock() self._start_time: float = 0.0 self._get_time: Callable[[], float] = get_time self.parent: Optional[Timer] = None @property
Apache License 2.0
packtpublishing/hands-on-deep-learning-architectures-with-python
Chapter03/rbm.py
RBM._gibbs_sampling
python
def _gibbs_sampling(self, v): v0 = v prob_h_v0 = self._prob_h_given_v(v0) vk = v prob_h_vk = prob_h_v0 for _ in range(self.k): hk = self._bernoulli_sampling(prob_h_vk) prob_v_hk = self._prob_v_given_h(hk) vk = self._bernoulli_sampling(prob_v_hk) prob_h_vk = self._prob_h_given_v(vk) return v0, prob_h_v0, vk, prob_h_vk
Gibbs sampling @param v: visible layer @return: visible vector before Gibbs sampling, conditional probability P(h|v) before Gibbs sampling, visible vector after Gibbs sampling, conditional probability P(h|v) after Gibbs sampling
https://github.com/packtpublishing/hands-on-deep-learning-architectures-with-python/blob/61ae6aea8618093c7abf44c2fe00b3d1e6e2d3c8/Chapter03/rbm.py#L67-L85
import numpy as np import tensorflow as tf class RBM(object): def __init__(self, num_v, num_h, batch_size, learning_rate, num_epoch, k=2): self.num_v = num_v self.num_h = num_h self.batch_size = batch_size self.learning_rate = learning_rate self.num_epoch = num_epoch self.k = k self.W, self.a, self.b = self._init_parameter() def _init_parameter(self): abs_val = np.sqrt(2.0 / (self.num_h + self.num_v)) W = tf.get_variable('weights', shape=(self.num_v, self.num_h), initializer=tf.random_uniform_initializer(minval=-abs_val, maxval=abs_val)) a = tf.get_variable('visible_bias', shape=(self.num_v), initializer=tf.zeros_initializer()) b = tf.get_variable('hidden_bias', shape=(self.num_h), initializer=tf.zeros_initializer()) return W, a, b def _prob_v_given_h(self, h): return tf.sigmoid(tf.add(self.a, tf.matmul(h, tf.transpose(self.W)))) def _prob_h_given_v(self, v): return tf.sigmoid(tf.add(self.b, tf.matmul(v, self.W))) def _bernoulli_sampling(self, prob): distribution = tf.distributions.Bernoulli(probs=prob, dtype=tf.float32) return tf.cast(distribution.sample(), tf.float32) def _compute_gradients(self, v0, prob_h_v0, vk, prob_h_vk): outer_product0 = tf.matmul(tf.transpose(v0), prob_h_v0) outer_productk = tf.matmul(tf.transpose(vk), prob_h_vk) W_grad = tf.reduce_mean(outer_product0 - outer_productk, axis=0) a_grad = tf.reduce_mean(v0 - vk, axis=0) b_grad = tf.reduce_mean(prob_h_v0 - prob_h_vk, axis=0) return W_grad, a_grad, b_grad
MIT License
dingmyu/hr-nas
utils/optim.py
get_lr_scheduler
python
def get_lr_scheduler(optimizer, FLAGS, last_epoch=-1): stepwise = FLAGS.get('lr_stepwise', True) steps_per_epoch = FLAGS._steps_per_epoch warmup_iterations = FLAGS.get('epoch_warmup', 5) * steps_per_epoch use_warmup = FLAGS.lr > FLAGS.base_lr def warmup_wrap(lr_lambda, i): if use_warmup and i <= warmup_iterations: warmup_lr_ratio = FLAGS.base_lr / FLAGS.lr return warmup_lr_ratio + i / warmup_iterations * (1 - warmup_lr_ratio) else: return lr_lambda(i) if FLAGS.lr_scheduler == 'multistep': if use_warmup: raise NotImplementedError('Warmup not implemented for multistep') lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ steps_per_epoch * val for val in FLAGS.multistep_lr_milestones ], gamma=FLAGS.multistep_lr_gamma) elif FLAGS.lr_scheduler == 'exp_decaying' or FLAGS.lr_scheduler == 'exp_decaying_trunc': def aux(i, trunc_to_constant=0.0): decay_interval = steps_per_epoch * FLAGS.exp_decay_epoch_interval if not stepwise: i = (i // decay_interval) * decay_interval res = FLAGS.exp_decaying_lr_gamma ** (i / decay_interval) return res if res > trunc_to_constant else trunc_to_constant if 'trunc' in FLAGS.lr_scheduler: trunc_to_constant = 0.05 else: trunc_to_constant = 0.0 lr_lambda = functools.partial( warmup_wrap, functools.partial(aux, trunc_to_constant=trunc_to_constant)) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda, last_epoch=last_epoch) elif FLAGS.lr_scheduler == 'linear_decaying': assert stepwise lr_lambda = functools.partial( warmup_wrap, lambda i: 1 - i / (FLAGS.num_epochs * steps_per_epoch)) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda) elif FLAGS.lr_scheduler == 'onecycle': div_factor = FLAGS.get('div_factor', 25) lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=FLAGS.lr, total_steps=FLAGS.num_epochs * steps_per_epoch, last_epoch=last_epoch, div_factor=div_factor) elif FLAGS.lr_scheduler == 'poly': return None else: raise NotImplementedError( 'Learning rate scheduler {} is not yet implemented.'.format( FLAGS.lr_scheduler)) return lr_scheduler
Get learning rate scheduler.
https://github.com/dingmyu/hr-nas/blob/003c3b6bd0168751c884b6999ffc8c13b36a39e2/utils/optim.py#L264-L327
from __future__ import division import copy from collections import OrderedDict import logging import functools import importlib import warnings import torch from torch import nn from utils.rmsprop import RMSprop from utils.adamw import AdamW from utils.adam import Adam def poly_learning_rate(optimizer, base_lr, curr_iter, max_iter, power=0.9, min_lr=1e-4, index_split=4, scale_lr=10.0): lr = (base_lr - min_lr) * (1 - float(curr_iter) / max_iter) ** power + min_lr for param_group in optimizer.param_groups: param_group['lr'] = lr class ExponentialMovingAverage(nn.Module): def __init__(self, momentum, zero_debias=False): if zero_debias: raise NotImplementedError('zero_debias') if momentum < 0.0 or momentum > 1.0: raise ValueError("Invalid momentum value: {}".format(momentum)) super(ExponentialMovingAverage, self).__init__() self._momentum = momentum self._zero_debias = zero_debias self.clear() def _check_exist(self, name): if name not in self._shadow: raise RuntimeError('{} has not been registered'.format(name)) def register(self, name, val, zero_init=False): if name in self._shadow: raise ValueError('Should not register twice for {}'.format(name)) if val.dtype not in [torch.float16, torch.float32, torch.float64]: raise TypeError( 'The variables must be half, float, or double: {}'.format(name)) if zero_init: self._shadow[name] = torch.zeros_like(val) else: self._shadow[name] = val.detach().clone() self._info[name] = { 'num_updates': 0, 'last_momemtum': None, 'zero_init': zero_init, 'compress_masked': False, } def forward(self, name, x, num_updates=None): self._check_exist(name) if num_updates is None: momentum = self._momentum else: momentum = min(self._momentum, (1.0 + num_updates) / (10.0 + num_updates)) self._info[name]['num_updates'] += 1 self._info[name]['last_momemtum'] = momentum return self._shadow[name].mul_(momentum).add_(1.0 - momentum, x.detach()) def clear(self): self._shadow = OrderedDict() self._info = OrderedDict() def pop(self, name): self._check_exist(name) val = self._shadow.pop(name) info = self._info.pop(name) return val, info def average_names(self): return list(self._shadow.keys()) def average(self, name): self._check_exist(name) return self._shadow[name] def state_dict(self): return { 'info': self._info, 'shadow': self._shadow, 'param': { 'momentum': self._momentum, 'zero_debias': self._zero_debias } } def load_state_dict(self, state_dict): params = state_dict['param'] for key, val in params.items(): cur_val = getattr(self, '_{}'.format(key)) if val != cur_val: warning_str = 'EMA {} mismatch: current {} vs previous {}'.format( key, cur_val, val) warnings.warn(warning_str, RuntimeWarning) logging.warning(warning_str) self._shadow = copy.deepcopy(state_dict['shadow']) self._info = copy.deepcopy(state_dict['info']) def to(self, *args, **kwargs): device, dtype, non_blocking = torch._C._nn._parse_to(*args, **kwargs) for k in list(self._shadow.keys()): v = self._shadow[k] self._shadow[k] = v.to(device, dtype if v.is_floating_point() else None, non_blocking) return self def compress_start(self): for val in self._info.values(): val['compress_masked'] = False def compress_mask(self, info, verbose=False): var_old_name = info['var_old_name'] var_new_name = info['var_new_name'] var_new = info['var_new'] mask_hook = info['mask_hook'] mask = info['mask'] if verbose: logging.info('EMA compress: {} -> {}'.format(var_old_name, var_new_name)) if self._info[var_old_name]['compress_masked']: raise RuntimeError('May have dependencies in compress') if var_new_name in self._info and self._info[var_new_name]['compress_masked']: raise RuntimeError('Compress {} twice'.format(var_new_name)) ema_old = self._shadow.pop(var_old_name) ema_new = torch.zeros_like(var_new, device=ema_old.device) mask_hook(ema_new, ema_old, mask) self._info[var_new_name] = self._info.pop(var_old_name) self._info[var_new_name]['compress_masked'] = True self._shadow[var_new_name] = ema_new def compress_drop(self, info, verbose=False): name = info['var_old_name'] if verbose: logging.info('EMA drop: {}'.format(name)) self._check_exist(name) if self._info[name]['compress_masked']: if verbose: logging.info('EMA drop: {} skipped'.format(name)) else: return self.pop(name) @staticmethod def adjust_momentum(momentum, steps_multi): return momentum ** (1.0 / steps_multi) class CrossEntropyLabelSmooth(nn.Module): def __init__(self, num_classes, label_smoothing, reduction='none'): super(CrossEntropyLabelSmooth, self).__init__() self.num_classes = num_classes self.label_smoothing = label_smoothing self.logsoftmax = nn.LogSoftmax(dim=1) if reduction == 'none': fun = lambda x: x elif reduction == 'mean': fun = torch.mean elif reduction == 'sum': fun = torch.sum else: raise ValueError('Unknown reduction: {}'.format(reduction)) self.reduce_fun = fun def forward(self, inputs, targets): assert inputs.size(1) == self.num_classes log_probs = self.logsoftmax(inputs) targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) targets = (1 - self.label_smoothing ) * targets + self.label_smoothing / self.num_classes loss = torch.sum(-targets * log_probs, 1) return self.reduce_fun(loss) def cal_l2_loss(model, weight_decay, method): loss = 0.0 if method == 'slimmable': for params in model.parameters(): ps = list(params.size()) if len(ps) == 4 and ps[1] != 1: _weight_decay = weight_decay elif len(ps) == 2: _weight_decay = weight_decay else: _weight_decay = 0 loss += _weight_decay * (params ** 2).sum() elif method == 'mnas': classifier_bias_count = 0 weight_decay_map = dict() for name, params in model.named_parameters(): ps = list(params.size()) if len(ps) == 4 or len(ps) == 2: weight_decay_map[name] = weight_decay, params else: assert len(ps) == 1 if 'classifier' in name: weight_decay_map[name] = weight_decay, params classifier_bias_count += 1 else: weight_decay_map[name] = 0.0, params assert classifier_bias_count == 1 for _weight_decay, params in weight_decay_map.values(): loss += _weight_decay * (params ** 2).sum() else: raise ValueError('Unknown weight_decay method: {}'.format(method)) return loss * 0.5
MIT License
vertexproject/synapse
synapse/lib/cell.py
CellApi.iterBackupArchive
python
async def iterBackupArchive(self, name): await self.cell.iterBackupArchive(name, user=self.user) if False: yield
Retrieve a backup by name as a compressed stream of bytes. Note: Compression and streaming will occur from a separate process. Args: name (str): The name of the backup to retrieve.
https://github.com/vertexproject/synapse/blob/a9d62ffacd9cc236ac52f92a734deef55c66ecf3/synapse/lib/cell.py#L675-L688
import os import ssl import time import shutil import socket import asyncio import logging import tarfile import argparse import datetime import platform import functools import contextlib import multiprocessing import tornado.web as t_web import synapse.exc as s_exc import synapse.common as s_common import synapse.daemon as s_daemon import synapse.telepath as s_telepath import synapse.lib.base as s_base import synapse.lib.boss as s_boss import synapse.lib.coro as s_coro import synapse.lib.hive as s_hive import synapse.lib.link as s_link import synapse.lib.const as s_const import synapse.lib.nexus as s_nexus import synapse.lib.scope as s_scope import synapse.lib.config as s_config import synapse.lib.health as s_health import synapse.lib.output as s_output import synapse.lib.certdir as s_certdir import synapse.lib.dyndeps as s_dyndeps import synapse.lib.httpapi as s_httpapi import synapse.lib.urlhelp as s_urlhelp import synapse.lib.version as s_version import synapse.lib.hiveauth as s_hiveauth import synapse.lib.lmdbslab as s_lmdbslab import synapse.lib.thisplat as s_thisplat import synapse.tools.backup as s_t_backup logger = logging.getLogger(__name__) SLAB_MAP_SIZE = 128 * s_const.mebibyte def adminapi(log=False): def decrfunc(func): @functools.wraps(func) def wrapped(*args, **kwargs): if args[0].user is not None and not args[0].user.isAdmin(): raise s_exc.AuthDeny(mesg='User is not an admin.', user=args[0].user.name) if log: logger.info('Executing [%s] as [%s] with args [%s][%s]', func.__qualname__, args[0].user.name, args[1:], kwargs) return func(*args, **kwargs) wrapped.__syn_wrapped__ = 'adminapi' return wrapped return decrfunc async def _doIterBackup(path, chunksize=1024): output_filename = path + '.tar.gz' link0, file1 = await s_link.linkfile() def dowrite(fd): with tarfile.open(output_filename, 'w|gz', fileobj=fd) as tar: tar.add(path, arcname=os.path.basename(path)) fd.close() coro = s_coro.executor(dowrite, file1) while True: byts = await link0.recv(chunksize) if not byts: break yield byts await coro await link0.fini() async def _iterBackupWork(path, linkinfo): logger.info(f'Getting backup streaming link for [{path}].') link = await s_link.fromspawn(linkinfo) await s_daemon.t2call(link, _doIterBackup, (path,), {}) await link.fini() logger.info(f'Backup streaming for [{path}] completed.') def _iterBackupProc(path, linkinfo): s_common.setlogging(logger, **linkinfo.get('logconf')) logger.info(f'Backup streaming process for [{path}] starting.') asyncio.run(_iterBackupWork(path, linkinfo)) class CellApi(s_base.Base): async def __anit__(self, cell, link, user): await s_base.Base.__anit__(self) self.cell = cell self.link = link assert user self.user = user self.sess = self.link.get('sess') self.sess.user = user await self.initCellApi() async def initCellApi(self): pass async def allowed(self, perm, default=None): return self.user.allowed(perm, default=default) async def _reqUserAllowed(self, perm): if not await self.allowed(perm): perm = '.'.join(perm) mesg = f'User must have permission {perm}' raise s_exc.AuthDeny(mesg=mesg, perm=perm, user=self.user.name) def getCellType(self): return self.cell.getCellType() def getCellIden(self): return self.cell.getCellIden() async def isCellActive(self): return await self.cell.isCellActive() @adminapi() def getNexsIndx(self): return self.cell.getNexsIndx() @adminapi(log=True) async def cullNexsLog(self, offs): return await self.cell.cullNexsLog(offs) @adminapi(log=True) async def rotateNexsLog(self): return await self.cell.rotateNexsLog() @adminapi(log=True) async def trimNexsLog(self, consumers=None, timeout=60): return await self.cell.trimNexsLog(consumers=consumers, timeout=timeout) @adminapi() async def waitNexsOffs(self, offs, timeout=None): return await self.cell.waitNexsOffs(offs, timeout=timeout) @adminapi() async def promote(self): return await self.cell.promote() def getCellUser(self): return self.user.pack() async def getCellInfo(self): return await self.cell.getCellInfo() @adminapi() async def getSystemInfo(self): return await self.cell.getSystemInfo() def setCellUser(self, iden): if not self.user.isAdmin(): mesg = 'setCellUser() caller must be admin.' raise s_exc.AuthDeny(mesg=mesg) user = self.cell.auth.user(iden) if user is None: raise s_exc.NoSuchUser(iden=iden) self.user = user self.link.get('sess').user = user return True async def ps(self): return await self.cell.ps(self.user) async def kill(self, iden): return await self.cell.kill(self.user, iden) @adminapi(log=True) async def addUser(self, name, passwd=None, email=None, iden=None): return await self.cell.addUser(name, passwd=passwd, email=email, iden=iden) @adminapi(log=True) async def delUser(self, iden): return await self.cell.delUser(iden) @adminapi(log=True) async def addRole(self, name): return await self.cell.addRole(name) @adminapi(log=True) async def delRole(self, iden): return await self.cell.delRole(iden) @adminapi() async def dyncall(self, iden, todo, gatekeys=()): return await self.cell.dyncall(iden, todo, gatekeys=gatekeys) @adminapi() async def dyniter(self, iden, todo, gatekeys=()): async for item in self.cell.dyniter(iden, todo, gatekeys=gatekeys): yield item @adminapi() async def issue(self, nexsiden: str, event: str, args, kwargs, meta=None): try: await self.cell.nexsroot.issue(nexsiden, event, args, kwargs, meta) except asyncio.CancelledError: raise except Exception: pass @adminapi(log=True) async def delAuthUser(self, name): await self.cell.auth.delUser(name) await self.cell.fire('user:mod', act='deluser', name=name) @adminapi(log=True) async def addAuthRole(self, name): role = await self.cell.auth.addRole(name) await self.cell.fire('user:mod', act='addrole', name=name) return role.pack() @adminapi(log=True) async def delAuthRole(self, name): await self.cell.auth.delRole(name) await self.cell.fire('user:mod', act='delrole', name=name) @adminapi() async def getAuthUsers(self, archived=False): return await self.cell.getAuthUsers(archived=archived) @adminapi() async def getAuthRoles(self): return await self.cell.getAuthRoles() @adminapi(log=True) async def addUserRule(self, iden, rule, indx=None, gateiden=None): return await self.cell.addUserRule(iden, rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def setUserRules(self, iden, rules, gateiden=None): return await self.cell.setUserRules(iden, rules, gateiden=gateiden) @adminapi(log=True) async def setRoleRules(self, iden, rules, gateiden=None): return await self.cell.setRoleRules(iden, rules, gateiden=gateiden) @adminapi(log=True) async def addRoleRule(self, iden, rule, indx=None, gateiden=None): return await self.cell.addRoleRule(iden, rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def delUserRule(self, iden, rule, gateiden=None): return await self.cell.delUserRule(iden, rule, gateiden=gateiden) @adminapi(log=True) async def delRoleRule(self, iden, rule, gateiden=None): return await self.cell.delRoleRule(iden, rule, gateiden=gateiden) @adminapi(log=True) async def setUserAdmin(self, iden, admin, gateiden=None): return await self.cell.setUserAdmin(iden, admin, gateiden=gateiden) @adminapi() async def getAuthInfo(self, name): s_common.deprecated('getAuthInfo') user = await self.cell.auth.getUserByName(name) if user is not None: info = user.pack() info['roles'] = [self.cell.auth.role(r).name for r in info['roles']] return info role = await self.cell.auth.getRoleByName(name) if role is not None: return role.pack() raise s_exc.NoSuchName(name=name) @adminapi(log=True) async def addAuthRule(self, name, rule, indx=None, gateiden=None): s_common.deprecated('addAuthRule') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.addRule(rule, indx=indx, gateiden=gateiden) @adminapi(log=True) async def delAuthRule(self, name, rule, gateiden=None): s_common.deprecated('delAuthRule') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.delRule(rule, gateiden=gateiden) @adminapi(log=True) async def setAuthAdmin(self, name, isadmin): s_common.deprecated('setAuthAdmin') item = await self.cell.auth.getUserByName(name) if item is None: item = await self.cell.auth.getRoleByName(name) await item.setAdmin(isadmin) async def setUserPasswd(self, iden, passwd): await self.cell.auth.reqUser(iden) if self.user.iden == iden: self.user.confirm(('auth', 'self', 'set', 'passwd'), default=True) return await self.cell.setUserPasswd(iden, passwd) self.user.confirm(('auth', 'user', 'set', 'passwd')) return await self.cell.setUserPasswd(iden, passwd) @adminapi(log=True) async def setUserLocked(self, useriden, locked): return await self.cell.setUserLocked(useriden, locked) @adminapi(log=True) async def setUserArchived(self, useriden, archived): return await self.cell.setUserArchived(useriden, archived) @adminapi(log=True) async def setUserEmail(self, useriden, email): return await self.cell.setUserEmail(useriden, email) @adminapi(log=True) async def addUserRole(self, useriden, roleiden): return await self.cell.addUserRole(useriden, roleiden) @adminapi(log=True) async def setUserRoles(self, useriden, roleidens): return await self.cell.setUserRoles(useriden, roleidens) @adminapi(log=True) async def delUserRole(self, useriden, roleiden): return await self.cell.delUserRole(useriden, roleiden) async def getUserInfo(self, name): user = await self.cell.auth.reqUserByName(name) if self.user.isAdmin() or self.user.iden == user.iden: info = user.pack() info['roles'] = [self.cell.auth.role(r).name for r in info['roles']] return info mesg = 'getUserInfo denied for non-admin and non-self' raise s_exc.AuthDeny(mesg=mesg) async def getRoleInfo(self, name): role = await self.cell.auth.reqRoleByName(name) if self.user.isAdmin() or role.iden in self.user.info.get('roles', ()): return role.pack() mesg = 'getRoleInfo denied for non-admin and non-member' raise s_exc.AuthDeny(mesg=mesg) @adminapi() async def getUserDef(self, iden): return await self.cell.getUserDef(iden) @adminapi() async def getAuthGate(self, iden): return await self.cell.getAuthGate(iden) @adminapi() async def getAuthGates(self): return await self.cell.getAuthGates() @adminapi() async def getRoleDef(self, iden): return await self.cell.getRoleDef(iden) @adminapi() async def getUserDefByName(self, name): return await self.cell.getUserDefByName(name) @adminapi() async def getRoleDefByName(self, name): return await self.cell.getRoleDefByName(name) @adminapi() async def getUserDefs(self): return await self.cell.getUserDefs() @adminapi() async def getRoleDefs(self): return await self.cell.getRoleDefs() @adminapi() async def isUserAllowed(self, iden, perm, gateiden=None): return await self.cell.isUserAllowed(iden, perm, gateiden=gateiden) @adminapi() async def tryUserPasswd(self, name, passwd): return await self.cell.tryUserPasswd(name, passwd) @adminapi() async def getUserProfile(self, iden): return await self.cell.getUserProfile(iden) @adminapi() async def getUserProfInfo(self, iden, name): return await self.cell.getUserProfInfo(iden, name) @adminapi() async def setUserProfInfo(self, iden, name, valu): return await self.cell.setUserProfInfo(iden, name, valu) async def getHealthCheck(self): await self._reqUserAllowed(('health',)) return await self.cell.getHealthCheck() @adminapi() async def getDmonSessions(self): return await self.cell.getDmonSessions() @adminapi() async def listHiveKey(self, path=None): return await self.cell.listHiveKey(path=path) @adminapi() async def getHiveKeys(self, path): return await self.cell.getHiveKeys(path) @adminapi() async def getHiveKey(self, path): return await self.cell.getHiveKey(path) @adminapi(log=True) async def setHiveKey(self, path, valu): return await self.cell.setHiveKey(path, valu) @adminapi(log=True) async def popHiveKey(self, path): return await self.cell.popHiveKey(path) @adminapi(log=True) async def saveHiveTree(self, path=()): return await self.cell.saveHiveTree(path=path) @adminapi() async def getNexusChanges(self, offs): async for item in self.cell.getNexusChanges(offs): yield item @adminapi() async def runBackup(self, name=None, wait=True): return await self.cell.runBackup(name=name, wait=wait) @adminapi() async def getBackupInfo(self): return await self.cell.getBackupInfo() @adminapi() async def getBackups(self): return await self.cell.getBackups() @adminapi() async def delBackup(self, name): return await self.cell.delBackup(name) @adminapi()
Apache License 2.0
ourownstory/neural_prophet
neuralprophet/df_utils.py
split_df
python
def split_df(df, n_lags, n_forecasts, valid_p=0.2, inputs_overbleed=True, local_modeling=False): if isinstance(df, list): df_list = df.copy() df_train_list = list() df_val_list = list() if local_modeling: for df in df_list: df_train, df_val = _split_df(df, n_lags, n_forecasts, valid_p, inputs_overbleed) df_train_list.append(df_train) df_val_list.append(df_val) df_train, df_val = df_train_list, df_val_list else: threshold_time_stamp = find_time_threshold(df_list, n_lags, valid_p, inputs_overbleed) df_train, df_val = split_considering_timestamp(df_list, threshold_time_stamp) else: df_train, df_val = _split_df(df, n_lags, n_forecasts, valid_p, inputs_overbleed) return df_train, df_val
Splits timeseries df into train and validation sets. Prevents overbleed of targets. Overbleed of inputs can be configured. In case of global modeling the split could be either local or global. Args: df (pd.DataFrame or list of pd.Dataframe): data n_lags (int): identical to NeuralProhet n_forecasts (int): identical to NeuralProhet valid_p (float, int): fraction (0,1) of data to use for holdout validation set, or number of validation samples >1 inputs_overbleed (bool): Whether to allow last training targets to be first validation inputs (never targets) local_modeling (bool): when set to true each episode from list of dataframes will be considered locally (i.e. seasonality, data_params, normalization) - not fully implemented yet. Returns: df_train (pd.DataFrame or list of pd.Dataframe): training data df_val (pd.DataFrame or list of pd.Dataframe): validation data
https://github.com/ourownstory/neural_prophet/blob/8535b8ce7e1e1c9827f20dfb9c47d3550c24f73f/neuralprophet/df_utils.py#L506-L539
from dataclasses import dataclass from collections import OrderedDict import pandas as pd import numpy as np import logging import math log = logging.getLogger("NP.df_utils") @dataclass class ShiftScale: shift: float = 0.0 scale: float = 1.0 def create_df_list(df): if isinstance(df, list): df_list = df.copy() else: df_list = [df] return df_list def join_dataframes(df_list): cont = 0 episodes = [] for i in df_list: s = ["Ep" + str(cont)] episodes = episodes + s * len(i) cont += 1 df_joined = pd.concat(df_list) return df_joined, episodes def recover_dataframes(df_joined, episodes): df_joined.insert(0, "eps", episodes) df_list = [x for _, x in df_joined.groupby("eps")] df_list = [x.drop(["eps"], axis=1) for x in df_list] return df_list def data_params_definition(df, normalize, covariates_config=None, regressor_config=None, events_config=None): data_params = OrderedDict({}) if df["ds"].dtype == np.int64: df.loc[:, "ds"] = df.loc[:, "ds"].astype(str) df.loc[:, "ds"] = pd.to_datetime(df.loc[:, "ds"]) data_params["ds"] = ShiftScale( shift=df["ds"].min(), scale=df["ds"].max() - df["ds"].min(), ) if "y" in df: data_params["y"] = get_normalization_params( array=df["y"].values, norm_type=normalize, ) if covariates_config is not None: for covar in covariates_config.keys(): if covar not in df.columns: raise ValueError("Covariate {} not found in DataFrame.".format(covar)) data_params[covar] = get_normalization_params( array=df[covar].values, norm_type=covariates_config[covar].normalize, ) if regressor_config is not None: for reg in regressor_config.keys(): if reg not in df.columns: raise ValueError("Regressor {} not found in DataFrame.".format(reg)) data_params[reg] = get_normalization_params( array=df[reg].values, norm_type=regressor_config[reg].normalize, ) if events_config is not None: for event in events_config.keys(): if event not in df.columns: raise ValueError("Event {} not found in DataFrame.".format(event)) data_params[event] = ShiftScale() return data_params def init_data_params( df, normalize, covariates_config=None, regressor_config=None, events_config=None, local_modeling=False ): if isinstance(df, list): df_list = df.copy() if local_modeling: data_params = list() for df in df_list: data_params.append( data_params_definition(df, normalize, covariates_config, regressor_config, events_config) ) log.debug( "Global Modeling - Local Normalization - Data Parameters (shift, scale): {}".format( [(k, (v.shift, v.scale)) for k, v in data_params[-1].items()] ) ) log.warning( "Local normalization will be implemented in the future - list of data_params may break the code" ) else: df, _ = join_dataframes(df_list) data_params = data_params_definition(df, normalize, covariates_config, regressor_config, events_config) log.debug( "Global Modeling - Global Normalization - Data Parameters (shift, scale): {}".format( [(k, (v.shift, v.scale)) for k, v in data_params.items()] ) ) else: data_params = data_params_definition(df, normalize, covariates_config, regressor_config, events_config) log.debug( "Data Parameters (shift, scale): {}".format([(k, (v.shift, v.scale)) for k, v in data_params.items()]) ) return data_params def auto_normalization_setting(array): if len(np.unique(array)) < 2: log.error("encountered variable with one unique value") raise ValueError elif len(np.unique(array)) == 2: return "minmax" else: return "soft" def get_normalization_params(array, norm_type): if norm_type == "auto": norm_type = auto_normalization_setting(array) shift = 0.0 scale = 1.0 if norm_type == "soft": lowest = np.min(array) q95 = np.quantile(array, 0.95, interpolation="higher") width = q95 - lowest if math.isclose(width, 0): width = np.max(array) - lowest shift = lowest scale = width elif norm_type == "soft1": lowest = np.min(array) q90 = np.quantile(array, 0.9, interpolation="higher") width = q90 - lowest if math.isclose(width, 0): width = (np.max(array) - lowest) / 1.25 shift = lowest - 0.125 * width scale = 1.25 * width elif norm_type == "minmax": shift = np.min(array) scale = np.max(array) - shift elif norm_type == "standardize": shift = np.mean(array) scale = np.std(array) elif norm_type != "off": log.error("Normalization {} not defined.".format(norm_type)) return ShiftScale(shift, scale) def _normalization(df, data_params): for name in df.columns: if name not in data_params.keys(): raise ValueError("Unexpected column {} in data".format(name)) new_name = name if name == "ds": new_name = "t" if name == "y": new_name = "y_scaled" df[new_name] = df[name].sub(data_params[name].shift).div(data_params[name].scale) return df def normalize(df, data_params, local_modeling=False): if isinstance(df, list): df_list = df.copy() if local_modeling: log.warning( "Local normalization will be implemented in the future - list of data_params may break the code" ) df_list_norm = list() for df, df_data_params in zip(df_list, data_params): df_list_norm.append(_normalization(df, df_data_params)) df = df_list_norm else: df_joined, episodes = join_dataframes(df_list) df = _normalization(df_joined, data_params) df = recover_dataframes(df, episodes) else: df = _normalization(df, data_params) return df def _check_dataframe(df, check_y, covariates, regressors, events): if df.shape[0] == 0: raise ValueError("Dataframe has no rows.") if "ds" not in df: raise ValueError('Dataframe must have columns "ds" with the dates.') if df.loc[:, "ds"].isnull().any(): raise ValueError("Found NaN in column ds.") if df["ds"].dtype == np.int64: df.loc[:, "ds"] = df.loc[:, "ds"].astype(str) if not np.issubdtype(df["ds"].dtype, np.datetime64): df.loc[:, "ds"] = pd.to_datetime(df.loc[:, "ds"]) if df["ds"].dt.tz is not None: raise ValueError("Column ds has timezone specified, which is not supported. Remove timezone.") if len(df.ds.unique()) != len(df.ds): raise ValueError("Column ds has duplicate values. Please remove duplicates.") columns = [] if check_y: columns.append("y") if covariates is not None: if type(covariates) is list: columns.extend(covariates) else: columns.extend(covariates.keys()) if regressors is not None: if type(regressors) is list: columns.extend(regressors) else: columns.extend(regressors.keys()) if events is not None: if type(events) is list: columns.extend(events) else: columns.extend(events.keys()) for name in columns: if name not in df: raise ValueError("Column {name!r} missing from dataframe".format(name=name)) if df.loc[df.loc[:, name].notnull()].shape[0] < 1: raise ValueError("Dataframe column {name!r} only has NaN rows.".format(name=name)) if not np.issubdtype(df[name].dtype, np.number): df.loc[:, name] = pd.to_numeric(df.loc[:, name]) if np.isinf(df.loc[:, name].values).any(): df.loc[:, name] = df[name].replace([np.inf, -np.inf], np.nan) if df.loc[df.loc[:, name].notnull()].shape[0] < 1: raise ValueError("Dataframe column {name!r} only has NaN rows.".format(name=name)) if df.index.name == "ds": df.index.name = None df = df.sort_values("ds") df = df.reset_index(drop=True) return df def check_dataframe(df, check_y=True, covariates=None, regressors=None, events=None): df_list = create_df_list(df) checked_df = list() for df in df_list: checked_df.append(_check_dataframe(df, check_y, covariates, regressors, events)) df = checked_df return df[0] if len(df) == 1 else df def crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct=0.0): if n_lags == 0: assert n_forecasts == 1 total_samples = len(df) - n_lags + 2 - (2 * n_forecasts) samples_fold = max(1, int(fold_pct * total_samples)) samples_overlap = int(fold_overlap_pct * samples_fold) assert samples_overlap < samples_fold min_train = total_samples - samples_fold - (k - 1) * (samples_fold - samples_overlap) assert min_train >= samples_fold folds = [] df_fold = df.copy(deep=True) for i in range(k, 0, -1): df_train, df_val = split_df(df_fold, n_lags, n_forecasts, valid_p=samples_fold, inputs_overbleed=True) folds.append((df_train, df_val)) split_idx = len(df_fold) - samples_fold + samples_overlap df_fold = df_fold.iloc[:split_idx].reset_index(drop=True) folds = folds[::-1] return folds def double_crossvalidation_split_df(df, n_lags, n_forecasts, k, valid_pct, test_pct): fold_pct_test = float(test_pct) / k folds_test = crossvalidation_split_df(df, n_lags, n_forecasts, k, fold_pct=fold_pct_test, fold_overlap_pct=0.0) df_train = folds_test[0][0] fold_pct_val = float(valid_pct) / k / (1.0 - test_pct) folds_val = crossvalidation_split_df(df_train, n_lags, n_forecasts, k, fold_pct=fold_pct_val, fold_overlap_pct=0.0) return folds_val, folds_test def _split_df(df, n_lags, n_forecasts, valid_p, inputs_overbleed): n_samples = len(df) - n_lags + 2 - (2 * n_forecasts) n_samples = n_samples if inputs_overbleed else n_samples - n_lags if 0.0 < valid_p < 1.0: n_valid = max(1, int(n_samples * valid_p)) else: assert valid_p >= 1 assert type(valid_p) == int n_valid = valid_p n_train = n_samples - n_valid assert n_train >= 1 split_idx_train = n_train + n_lags + n_forecasts - 1 split_idx_val = split_idx_train - n_lags if inputs_overbleed else split_idx_train df_train = df.copy(deep=True).iloc[:split_idx_train].reset_index(drop=True) df_val = df.copy(deep=True).iloc[split_idx_val:].reset_index(drop=True) log.debug("{} n_train, {} n_eval".format(n_train, n_samples - n_train)) return df_train, df_val def find_time_threshold(df_list, n_lags, valid_p, inputs_overbleed): if not 0 < valid_p < 1: log.error("Please type a valid value for valid_p (for global modeling it should be between 0 and 1.0)") df_joint, _ = join_dataframes(df_list) df_joint = df_joint.sort_values("ds") df_joint = df_joint.reset_index(drop=True) n_samples = len(df_joint) n_samples = n_samples if inputs_overbleed else n_samples - n_lags n_valid = max(1, int(n_samples * valid_p)) n_train = n_samples - n_valid threshold_time_stamp = df_joint.loc[n_train, "ds"] log.debug("Time threshold: ", threshold_time_stamp) return threshold_time_stamp def split_considering_timestamp(df_list, threshold_time_stamp): df_train = list() df_val = list() for df in df_list: if df["ds"].max() < threshold_time_stamp: df_train.append(df) elif df["ds"].min() > threshold_time_stamp: df_val.append(df) else: df_train.append(df[df["ds"] < threshold_time_stamp]) df_val.append(df[df["ds"] >= threshold_time_stamp]) return df_train, df_val
MIT License
szymonmaszke/torchfunc
torchfunc/performance/layers.py
Inplace.modules
python
def modules(self, module: torch.nn.Module): yield from self._analyse(module, "modules")
r"""**Look for inplace operation using** `modules()` **method (recursive scanning).** Yields ------ int Indices where module is probably `inplace`.
https://github.com/szymonmaszke/torchfunc/blob/92511c9beb2b62bb4e195deb0fa87b450daee61c/torchfunc/performance/layers.py#L193-L201
import abc import collections import sys import typing import torch from .._base import Base class Depthwise(Base): def __init__( self, checkers: typing.Tuple[typing.Callable[[torch.nn.Module], bool]] = None ): self.checkers: typing.Tuple[typing.Callable] = ( Depthwise.default_checker, ) if checkers is None else checkers @classmethod def default_checker(cls, module): if hasattr(module, "groups") and hasattr(module, "in_channels"): return module.groups == module.in_channels and module.in_channels != 1 return False def _analyse(self, module, function): for index, submodule in enumerate(getattr(module, function)()): for checker in self.checkers: if checker(submodule): yield index def modules(self, module: torch.nn.Module): yield from self._analyse(module, "modules") def children(self, module: torch.nn.Module): yield from self._analyse(module, "children") def tips(self, module: torch.nn.Module) -> str: depthwise = self.modules(module) if depthwise: return ( "Depthwise convolutions are not currently using specialized kernel and might be slower.\n" + "See this issue: https://github.com/pytorch/pytorch/issues/18631 for more information.\n" + "Indices of those modules:\n" + str(list(depthwise)) + "\nYou may want to decrease number of groups (like it's done for ResNeXt) for possible speed & accuracy improvements." ) return "" class Inplace(Base): def __init__(self, inplace: typing.Tuple[str] = ("inplace",)): self.inplace = inplace def _analyse(self, module: torch.nn.Module, method: str): for index, submodule in enumerate(getattr(module, method)()): for attribute in self.inplace: if hasattr(submodule, attribute): if getattr(submodule, attribute): yield index
MIT License
adn-devtech/3dsmax-python-howtos
src/packages/reloadmod/reloadmod/reload.py
non_builtin
python
def non_builtin(): skip = set( list(sys.builtin_module_names) + list(filter(lambda k: k.find("importlib") >= 0, sys.modules.keys())) + FORCE_SKIP) return set(filter(lambda k: not (k in skip) and not is_builtin(k), sys.modules.keys()))
Return a set of all modules names that are not builtins and not importlib related.
https://github.com/adn-devtech/3dsmax-python-howtos/blob/b86ef45ef4d8dff373bd1cbfe5c4d5b805687339/src/packages/reloadmod/reloadmod/reload.py#L11-L20
import sys import importlib import inspect import pymxs FORCE_SKIP = []
MIT License
jjdabr/forecastnet
Pytorch/denseForecastNet.py
ForecastNetDenseModel2.forward
python
def forward(self, input, target, is_training=False): outputs = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device) next_cell_input = input for i in range(self.out_seq_length): hidden = F.relu(self.hidden_layer1[i](next_cell_input)) hidden = F.relu(self.hidden_layer2[i](hidden)) output = self.output_layer[i](hidden) outputs[i,:,:] = output if is_training: next_cell_input = torch.cat((input, hidden, target[i, :, :]), dim=1) else: next_cell_input = torch.cat((input, hidden, outputs[i, :, :]), dim=1) return outputs
Forward propagation of the dense ForecastNet model :param input: Input data in the form [input_seq_length, batch_size, input_dim] :param target: Target data in the form [output_seq_length, batch_size, output_dim] :param is_training: If true, use target data for training, else use the previous output. :return: outputs: Forecast outputs in the form [decoder_seq_length, batch_size, input_dim]
https://github.com/jjdabr/forecastnet/blob/dc76b95f5136dae95fe868dca76d8d8cd9d43cf4/Pytorch/denseForecastNet.py#L115-L139
import torch import torch.nn as nn import torch.nn.functional as F class ForecastNetDenseModel(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, in_seq_length, out_seq_length, device): super(ForecastNetDenseModel, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.in_seq_length = in_seq_length self.out_seq_length = out_seq_length self.device = device input_dim_comb = input_dim * in_seq_length hidden_layer1 = [nn.Linear(input_dim_comb, hidden_dim)] for i in range(out_seq_length - 1): hidden_layer1.append(nn.Linear(input_dim_comb + hidden_dim + output_dim, hidden_dim)) self.hidden_layer1 = nn.ModuleList(hidden_layer1) self.hidden_layer2 = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for i in range(out_seq_length)]) self.mu_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)]) self.sigma_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)]) def forward(self, input, target, is_training=False): outputs = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device) mu = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device) sigma = torch.zeros((self.out_seq_length, input.shape[0], self.output_dim)).to(self.device) next_cell_input = input for i in range(self.out_seq_length): out = F.relu(self.hidden_layer1[i](next_cell_input)) out = F.relu(self.hidden_layer2[i](out)) mu_ = self.mu_layer[i](out) sigma_ = F.softplus(self.sigma_layer[i](out)) mu[i,:,:] = mu_ sigma[i,:,:] = sigma_ outputs[i,:,:] = torch.normal(mu_, sigma_).to(self.device) if is_training: next_cell_input = torch.cat((input, out, target[i, :, :]), dim=1) else: next_cell_input = torch.cat((input, out, outputs[i, :, :]), dim=1) return outputs, mu, sigma class ForecastNetDenseModel2(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, in_seq_length, out_seq_length, device): super(ForecastNetDenseModel2, self).__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.in_seq_length = in_seq_length self.out_seq_length = out_seq_length self.device = device input_dim_comb = input_dim * in_seq_length hidden_layer1 = [nn.Linear(input_dim_comb, hidden_dim)] for i in range(out_seq_length - 1): hidden_layer1.append(nn.Linear(input_dim_comb + hidden_dim + output_dim, hidden_dim)) self.hidden_layer1 = nn.ModuleList(hidden_layer1) self.hidden_layer2 = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for i in range(out_seq_length)]) self.output_layer = nn.ModuleList([nn.Linear(hidden_dim, output_dim) for i in range(out_seq_length)])
MIT License
bendangnuksung/mrcnn_serving_ready
inferencing/saved_model_utils.py
resize_image
python
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"): image_dtype = image.dtype h, w = image.shape[:2] window = (0, 0, h, w) scale = 1 padding = [(0, 0), (0, 0), (0, 0)] crop = None if mode == "none": return image, window, scale, padding, crop if min_dim: scale = max(1, min_dim / min(h, w)) if min_scale and scale < min_scale: scale = min_scale if max_dim and mode == "square": image_max = max(h, w) if round(image_max * scale) > max_dim: scale = max_dim / image_max if scale != 1: image = cv2.resize(image, (round(w * scale), round(h * scale))) if mode == "square": h, w = image.shape[:2] top_pad = (max_dim - h) // 2 bottom_pad = max_dim - h - top_pad left_pad = (max_dim - w) // 2 right_pad = max_dim - w - left_pad padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "pad64": h, w = image.shape[:2] assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64" if h % 64 > 0: max_h = h - (h % 64) + 64 top_pad = (max_h - h) // 2 bottom_pad = max_h - h - top_pad else: top_pad = bottom_pad = 0 if w % 64 > 0: max_w = w - (w % 64) + 64 left_pad = (max_w - w) // 2 right_pad = max_w - w - left_pad else: left_pad = right_pad = 0 padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)] image = np.pad(image, padding, mode='constant', constant_values=0) window = (top_pad, left_pad, h + top_pad, w + left_pad) elif mode == "crop": h, w = image.shape[:2] y = random.randint(0, (h - min_dim)) x = random.randint(0, (w - min_dim)) crop = (y, x, min_dim, min_dim) image = image[y:y + min_dim, x:x + min_dim] window = (0, 0, min_dim, min_dim) else: raise Exception("Mode {} not supported".format(mode)) return image.astype(image_dtype), window, scale, padding, crop
Resizes an image keeping the aspect ratio unchanged. min_dim: if provided, resizes the image such that it's smaller dimension == min_dim max_dim: if provided, ensures that the image longest side doesn't exceed this value. min_scale: if provided, ensure that the image is scaled up by at least this percent even if min_dim doesn't require it. mode: Resizing mode. none: No resizing. Return the image unchanged. square: Resize and pad with zeros to get a square image of size [max_dim, max_dim]. pad64: Pads width and height with zeros to make them multiples of 64. If min_dim or min_scale are provided, it scales the image up before padding. max_dim is ignored in this mode. The multiple of 64 is needed to ensure smooth scaling of feature maps up and down the 6 levels of the FPN pyramid (2**6=64). crop: Picks random crops from the image. First, scales the image based on min_dim and min_scale, then picks a random crop of size min_dim x min_dim. Can be used in training only. max_dim is not used in this mode. Returns: image: the resized image window: (y1, x1, y2, x2). If max_dim is provided, padding might be inserted in the returned image. If so, this window is the coordinates of the image part of the full image (excluding the padding). The x2, y2 pixels are not included. scale: The scale factor used to resize the image padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
https://github.com/bendangnuksung/mrcnn_serving_ready/blob/de9cd824e6e3a108dcd6af50a4a377afc3f24d08/inferencing/saved_model_utils.py#L385-L491
import random import cv2 import numpy as np import tensorflow as tf import scipy import skimage.color import skimage.transform import urllib.request import shutil import warnings COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5" def extract_bboxes(mask): boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32) for i in range(mask.shape[-1]): m = mask[:, :, i] horizontal_indicies = np.where(np.any(m, axis=0))[0] vertical_indicies = np.where(np.any(m, axis=1))[0] if horizontal_indicies.shape[0]: x1, x2 = horizontal_indicies[[0, -1]] y1, y2 = vertical_indicies[[0, -1]] x2 += 1 y2 += 1 else: x1, x2, y1, y2 = 0, 0, 0, 0 boxes[i] = np.array([y1, x1, y2, x2]) return boxes.astype(np.int32) def compute_iou(box, boxes, box_area, boxes_area): y1 = np.maximum(box[0], boxes[:, 0]) y2 = np.minimum(box[2], boxes[:, 2]) x1 = np.maximum(box[1], boxes[:, 1]) x2 = np.minimum(box[3], boxes[:, 3]) intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) union = box_area + boxes_area[:] - intersection[:] iou = intersection / union return iou def compute_overlaps(boxes1, boxes2): area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) for i in range(overlaps.shape[1]): box2 = boxes2[i] overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) return overlaps def compute_overlaps_masks(masks1, masks2): if masks1.shape[0] == 0 or masks2.shape[0] == 0: return np.zeros((masks1.shape[0], masks2.shape[-1])) masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32) masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32) area1 = np.sum(masks1, axis=0) area2 = np.sum(masks2, axis=0) intersections = np.dot(masks1.T, masks2) union = area1[:, None] + area2[None, :] - intersections overlaps = intersections / union return overlaps def non_max_suppression(boxes, scores, threshold): assert boxes.shape[0] > 0 if boxes.dtype.kind != "f": boxes = boxes.astype(np.float32) y1 = boxes[:, 0] x1 = boxes[:, 1] y2 = boxes[:, 2] x2 = boxes[:, 3] area = (y2 - y1) * (x2 - x1) ixs = scores.argsort()[::-1] pick = [] while len(ixs) > 0: i = ixs[0] pick.append(i) iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]]) remove_ixs = np.where(iou > threshold)[0] + 1 ixs = np.delete(ixs, remove_ixs) ixs = np.delete(ixs, 0) return np.array(pick, dtype=np.int32) def apply_box_deltas(boxes, deltas): boxes = boxes.astype(np.float32) height = boxes[:, 2] - boxes[:, 0] width = boxes[:, 3] - boxes[:, 1] center_y = boxes[:, 0] + 0.5 * height center_x = boxes[:, 1] + 0.5 * width center_y += deltas[:, 0] * height center_x += deltas[:, 1] * width height *= np.exp(deltas[:, 2]) width *= np.exp(deltas[:, 3]) y1 = center_y - 0.5 * height x1 = center_x - 0.5 * width y2 = y1 + height x2 = x1 + width return np.stack([y1, x1, y2, x2], axis=1) def box_refinement_graph(box, gt_box): box = tf.cast(box, tf.float32) gt_box = tf.cast(gt_box, tf.float32) height = box[:, 2] - box[:, 0] width = box[:, 3] - box[:, 1] center_y = box[:, 0] + 0.5 * height center_x = box[:, 1] + 0.5 * width gt_height = gt_box[:, 2] - gt_box[:, 0] gt_width = gt_box[:, 3] - gt_box[:, 1] gt_center_y = gt_box[:, 0] + 0.5 * gt_height gt_center_x = gt_box[:, 1] + 0.5 * gt_width dy = (gt_center_y - center_y) / height dx = (gt_center_x - center_x) / width dh = tf.log(gt_height / height) dw = tf.log(gt_width / width) result = tf.stack([dy, dx, dh, dw], axis=1) return result def box_refinement(box, gt_box): box = box.astype(np.float32) gt_box = gt_box.astype(np.float32) height = box[:, 2] - box[:, 0] width = box[:, 3] - box[:, 1] center_y = box[:, 0] + 0.5 * height center_x = box[:, 1] + 0.5 * width gt_height = gt_box[:, 2] - gt_box[:, 0] gt_width = gt_box[:, 3] - gt_box[:, 1] gt_center_y = gt_box[:, 0] + 0.5 * gt_height gt_center_x = gt_box[:, 1] + 0.5 * gt_width dy = (gt_center_y - center_y) / height dx = (gt_center_x - center_x) / width dh = np.log(gt_height / height) dw = np.log(gt_width / width) return np.stack([dy, dx, dh, dw], axis=1) class Dataset(object): def __init__(self, class_map=None): self._image_ids = [] self.image_info = [] self.class_info = [{"source": "", "id": 0, "name": "BG"}] self.source_class_ids = {} def add_class(self, source, class_id, class_name): assert "." not in source, "Source name cannot contain a dot" for info in self.class_info: if info['source'] == source and info["id"] == class_id: return self.class_info.append({ "source": source, "id": class_id, "name": class_name, }) def add_image(self, source, image_id, path, **kwargs): image_info = { "id": image_id, "source": source, "path": path, } image_info.update(kwargs) self.image_info.append(image_info) def image_reference(self, image_id): return "" def prepare(self, class_map=None): def clean_name(name): return ",".join(name.split(",")[:1]) self.num_classes = len(self.class_info) self.class_ids = np.arange(self.num_classes) self.class_names = [clean_name(c["name"]) for c in self.class_info] self.num_images = len(self.image_info) self._image_ids = np.arange(self.num_images) self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.class_info, self.class_ids)} self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id for info, id in zip(self.image_info, self.image_ids)} self.sources = list(set([i['source'] for i in self.class_info])) self.source_class_ids = {} for source in self.sources: self.source_class_ids[source] = [] for i, info in enumerate(self.class_info): if i == 0 or source == info['source']: self.source_class_ids[source].append(i) def map_source_class_id(self, source_class_id): return self.class_from_source_map[source_class_id] def get_source_class_id(self, class_id, source): info = self.class_info[class_id] assert info['source'] == source return info['id'] def append_data(self, class_info, image_info): self.external_to_class_id = {} for i, c in enumerate(self.class_info): for ds, id in c["map"]: self.external_to_class_id[ds + str(id)] = i self.external_to_image_id = {} for i, info in enumerate(self.image_info): self.external_to_image_id[info["ds"] + str(info["id"])] = i @property def image_ids(self): return self._image_ids def source_image_link(self, image_id): return self.image_info[image_id]["path"] def load_image(self, image_id): image = cv2.imread(self.image_info[image_id]['path']) if image.ndim != 3: image = skimage.color.gray2rgb(image) if image.shape[-1] == 4: image = image[..., :3] return image def load_mask(self, image_id): mask = np.empty([0, 0, 0]) class_ids = np.empty([0], np.int32) return mask, class_ids
MIT License
chriso/gauged
gauged/bridge.py
SharedLibrary.prototype
python
def prototype(self, name, argtypes, restype=None): function = self.function(name) function.argtypes = argtypes if restype: function.restype = restype
Define argument / return types for the specified C function
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/bridge.py#L31-L36
import glob import os import sys from ctypes import (POINTER, Structure, cdll, c_int, c_size_t, c_uint32, c_char_p, c_bool, c_float) class SharedLibrary(object): def __init__(self, name, prefix): self.prefix = prefix path = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) version = sys.version.split(' ')[0][0:3] shared_lib = os.path.join(path, 'build', 'lib*-' + version, name + '*.*') lib = glob.glob(shared_lib) if not lib: lib = glob.glob(os.path.join(path, name + '*.*')) try: self.library = cdll.LoadLibrary(lib[0]) except OSError as err: raise OSError('Failed to load the C extension: ' + str(err))
MIT License
okpy/ok-client
client/api/assignment.py
Assignment._encrypt_file
python
def _encrypt_file(self, path, key, padding): def encrypt(data): if encryption.is_encrypted(data): try: data = encryption.decrypt(data, key) except encryption.InvalidKeyException: raise ValueError("Attempt to re-encrypt file with an invalid key") return encryption.encrypt(data, key, padding) self._in_place_edit(path, encrypt)
Encrypt the given file in place with the given key. This is idempotent but if you try to encrypt the same file with multiple keys it errors.
https://github.com/okpy/ok-client/blob/3c5eca17100eed808023a815654cfe1c95179080/client/api/assignment.py#L194-L207
import uuid from datetime import timedelta import requests from client import exceptions as ex from client.sources.common import core from client.utils import auth, format, encryption from client.protocols.grading import grade from client.cli.common import messages import client import collections import glob import importlib import json import logging import os import textwrap from client.utils.printer import print_success, print_error, print_warning log = logging.getLogger(__name__) CONFIG_EXTENSION = '*.ok' def load_assignment(filepath=None, cmd_args=None): config = _get_config(filepath) if not isinstance(config, dict): raise ex.LoadingException('Config should be a dictionary') if cmd_args is None: cmd_args = Settings() return Assignment(cmd_args, **config) def _get_config(config): if config is None: configs = glob.glob(CONFIG_EXTENSION) if len(configs) > 1: raise ex.LoadingException('\n'.join([ 'Multiple .ok files found:', ' ' + ' '.join(configs), "Please specify a particular assignment's config file with", ' python3 ok --config <config file>' ])) elif not configs: raise ex.LoadingException('No .ok configuration file found') config = configs[0] elif not os.path.isfile(config): raise ex.LoadingException( 'Could not find config file: {}'.format(config)) try: with open(config, 'r') as f: result = json.load(f, object_pairs_hook=collections.OrderedDict) except IOError: raise ex.LoadingException('Error loading config: {}'.format(config)) except ValueError: raise ex.LoadingException( '{0} is a malformed .ok configuration file. ' 'Please re-download {0}.'.format(config)) else: log.info('Loaded config from {}'.format(config)) return result class Assignment(core.Serializable): name = core.String() endpoint = core.String(optional=True, default='') decryption_keypage = core.String(optional=True, default='') src = core.List(type=str, optional=True) tests = core.Dict(keys=str, values=str, ordered=True) default_tests = core.List(type=str, optional=True) protocols = core.List(type=str, optional=True) def grade(self, question, env=None, skip_locked_cases=False): if env is None: import __main__ env = __main__.__dict__ messages = {} tests = self._resolve_specified_tests([question], all_tests=False) for test in tests: try: for suite in test.suites: suite.skip_locked_cases = skip_locked_cases suite.console.skip_locked_cases = skip_locked_cases suite.console.hash_key = self.name except AttributeError: pass test_name = tests[0].name grade(tests, messages, env) return messages['grading'][test_name] def generate_encryption_key(self, keys_file): data = [(filename, encryption.generate_key()) for filename in self._get_files()] with open(keys_file, "w") as f: json.dump(data, f) def encrypt(self, keys_file, padding): with open(keys_file) as f: keys = dict(json.load(f)) for file in self._get_files(): if file in keys: self._encrypt_file(file, keys[file], padding) def decrypt(self, keys): decrypted_files, undecrypted_files = self.attempt_decryption(keys) if not undecrypted_files + decrypted_files: print_success("All files are decrypted") elif undecrypted_files: if keys: print_error("Unable to decrypt some files with the keys", ", ".join(keys)) else: print_error("No keys found, could not decrypt any files") print_error(" Non-decrypted files:", *undecrypted_files) def attempt_decryption(self, keys): if self.decryption_keypage: try: response = requests.get(self.decryption_keypage) response.raise_for_status() keys_data = response.content.decode('utf-8') keys = keys + encryption.get_keys(keys_data) except Exception as e: print_error( "Could not load decryption page {}: {}.".format(self.decryption_keypage, e)) print_error("You can pass in a key directly by running python3 ok --decrypt [KEY]") decrypted_files = [] undecrypted_files = [] for file in self._get_files(): with open(file) as f: if not encryption.is_encrypted(f.read()): continue for key in keys: success = self._decrypt_file(file, key) if success: decrypted_files.append(file) break else: undecrypted_files.append(file) return decrypted_files, undecrypted_files def _decrypt_file(self, path, key): success = False def decrypt(ciphertext): if not encryption.is_encrypted(ciphertext): return ciphertext try: plaintext = encryption.decrypt(ciphertext, key) nonlocal success success = True print_success("decrypted", path, "with", key) return plaintext except encryption.InvalidKeyException: return ciphertext self._in_place_edit(path, decrypt) return success
Apache License 2.0
hexrd/hexrd
hexrd/crystallography.py
PlaneData.getLatticeOperators
python
def getLatticeOperators(self): return copy.deepcopy(self.__latVecOps)
gets lattice vector operators as a new (deepcopy)
https://github.com/hexrd/hexrd/blob/90e9b26e5e5091dd5ecf460b3227072e6d90bcd5/hexrd/crystallography.py#L929-L933
import re import copy from math import pi import numpy as np import csv import os from hexrd import constants from hexrd.matrixutil import unitVector from hexrd.rotations import rotMatOfExpMap, mapAngle, applySym, ltypeOfLaueGroup, quatOfLaueGroup from hexrd.transforms import xfcapi from hexrd import valunits from hexrd.valunits import toFloat from hexrd.constants import d2r, r2d, sqrt3by2, sqrt_epsf dUnit = 'angstrom' outputDegrees = False outputDegrees_bak = outputDegrees def hklToStr(x): return re.sub('\[|\]|\(|\)', '', str(x)) def tempSetOutputDegrees(val): global outputDegrees, outputDegrees_bak outputDegrees_bak = outputDegrees outputDegrees = val return def revertOutputDegrees(): global outputDegrees, outputDegrees_bak outputDegrees = outputDegrees_bak return def cosineXform(a, b, c): cosar = (np.cos(b)*np.cos(c) - np.cos(a)) / (np.sin(b)*np.sin(c)) sinar = np.sqrt(1 - cosar**2) return cosar, sinar def processWavelength(arg): if hasattr(arg, 'getVal'): if arg.isLength(): retval = arg.getVal(dUnit) elif arg.isEnergy(): e = arg.getVal('keV') retval = valunits.valWUnit( 'wavelength', 'length', constants.keVToAngstrom(e), 'angstrom' ).getVal(dUnit) else: raise RuntimeError('do not know what to do with '+str(arg)) else: retval = valunits.valWUnit( 'wavelength', 'length', constants.keVToAngstrom(arg), 'angstrom' ).getVal(dUnit) return retval def latticeParameters(lvec): lnorm = np.sqrt(np.sum(lvec**2, 0)) a = lnorm[0] b = lnorm[1] c = lnorm[2] ahat = lvec[:, 0]/a bhat = lvec[:, 1]/b chat = lvec[:, 2]/c gama = np.arccos(np.dot(ahat, bhat)) beta = np.arccos(np.dot(ahat, chat)) alfa = np.arccos(np.dot(bhat, chat)) if outputDegrees: gama = r2d*gama beta = r2d*beta alfa = r2d*alfa return [a, b, c, alfa, beta, gama] def latticePlanes(hkls, lparms, ltype='cubic', wavelength=1.54059292, strainMag=None): location = 'latticePlanes' assert hkls.shape[0] == 3, "hkls aren't column vectors in call to '%s'!" % location tag = ltype wlen = wavelength L = latticeVectors(lparms, tag) G = np.dot(L['B'], hkls) d = 1 / np.sqrt(np.sum(G**2, 0)) aconv = 1. if outputDegrees: aconv = r2d sth = wlen / 2. / d mask = (np.abs(sth) < 1.) tth = np.zeros(sth.shape) tth[~mask] = np.nan tth[mask] = aconv * 2. * np.arcsin(sth[mask]) p = dict(normals=unitVector(G), dspacings=d, tThetas=tth) if strainMag is not None: p['tThetasLo'] = np.zeros(sth.shape) p['tThetasHi'] = np.zeros(sth.shape) mask = ( (np.abs(wlen / 2. / (d * (1. + strainMag))) < 1.) & (np.abs(wlen / 2. / (d * (1. - strainMag))) < 1.) ) p['tThetasLo'][~mask] = np.nan p['tThetasHi'][~mask] = np.nan p['tThetasLo'][mask] = aconv * 2 * np.arcsin(wlen/2./(d[mask]*(1. + strainMag))) p['tThetasHi'][mask] = aconv * 2 * np.arcsin(wlen/2./(d[mask]*(1. - strainMag))) return p def latticeVectors(lparms, tag='cubic', radians=False, debug=False): lattStrings = [ 'cubic', 'hexagonal', 'trigonal', 'rhombohedral', 'tetragonal', 'orthorhombic', 'monoclinic', 'triclinic' ] if radians: aconv = 1. else: aconv = pi/180. deg90 = pi/2. deg120 = 2.*pi/3. if tag == lattStrings[0]: cellparms = np.r_[np.tile(lparms[0], (3,)), deg90*np.ones((3,))] elif tag == lattStrings[1] or tag == lattStrings[2]: cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg120] elif tag == lattStrings[3]: cellparms = np.r_[np.tile(lparms[0], (3,)), np.tile(aconv*lparms[1], (3,))] elif tag == lattStrings[4]: cellparms = np.r_[lparms[0], lparms[0], lparms[1], deg90, deg90, deg90] elif tag == lattStrings[5]: cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, deg90, deg90] elif tag == lattStrings[6]: cellparms = np.r_[lparms[0], lparms[1], lparms[2], deg90, aconv*lparms[3], deg90] elif tag == lattStrings[7]: cellparms = np.r_[lparms[0], lparms[1], lparms[2], aconv*lparms[3], aconv*lparms[4], aconv*lparms[5]] else: raise RuntimeError('lattice tag \'%s\' is not recognized' % (tag)) if debug: print((str(cellparms[0:3]) + ' ' + str(r2d*cellparms[3:6]))) alfa = cellparms[3] beta = cellparms[4] gama = cellparms[5] cosalfar, sinalfar = cosineXform(alfa, beta, gama) a = cellparms[0]*np.r_[1, 0, 0] b = cellparms[1]*np.r_[np.cos(gama), np.sin(gama), 0] c = cellparms[2]*np.r_[np.cos(beta), -cosalfar*np.sin(beta), sinalfar*np.sin(beta)] ad = np.sqrt(np.sum(a**2)) bd = np.sqrt(np.sum(b**2)) cd = np.sqrt(np.sum(c**2)) V = np.dot(a, np.cross(b, c)) F = np.c_[a, b, c] astar = np.cross(b, c)/V bstar = np.cross(c, a)/V cstar = np.cross(a, b)/V ar = np.sqrt(np.sum(astar**2)) br = np.sqrt(np.sum(bstar**2)) cr = np.sqrt(np.sum(cstar**2)) alfar = np.arccos(np.dot(bstar, cstar)/br/cr) betar = np.arccos(np.dot(cstar, astar)/cr/ar) gamar = np.arccos(np.dot(astar, bstar)/ar/br) B = np.c_[astar, bstar, cstar] cosalfar2, sinalfar2 = cosineXform(alfar, betar, gamar) afable = ar*np.r_[1, 0, 0] bfable = br*np.r_[np.cos(gamar), np.sin(gamar), 0] cfable = cr*np.r_[np.cos(betar), -cosalfar2*np.sin(betar), sinalfar2*np.sin(betar)] BR = np.c_[afable, bfable, cfable] U0 = np.dot(B, np.linalg.inv(BR)) if outputDegrees: dparms = np.r_[ad, bd, cd, r2d*np.r_[alfa, beta, gama]] rparms = np.r_[ar, br, cr, r2d*np.r_[alfar, betar, gamar]] else: dparms = np.r_[ad, bd, cd, np.r_[alfa, beta, gama]] rparms = np.r_[ar, br, cr, np.r_[alfar, betar, gamar]] L = {'F': F, 'B': B, 'BR': BR, 'U0': U0, 'vol': V, 'dparms': dparms, 'rparms': rparms} return L def hexagonalIndicesFromRhombohedral(hkl): HKL = np.zeros((3, hkl.shape[1]), dtype='int') HKL[0, :] = hkl[0, :] - hkl[1, :] HKL[1, :] = hkl[1, :] - hkl[2, :] HKL[2, :] = hkl[0, :] + hkl[1, :] + hkl[2, :] return HKL def rhombohedralIndicesFromHexagonal(HKL): hkl = np.zeros((3, HKL.shape[1]), dtype='int') hkl[0, :] = 2 * HKL[0, :] + HKL[1, :] + HKL[2, :] hkl[1, :] = -HKL[0, :] + HKL[1, :] + HKL[2, :] hkl[2, :] = -HKL[0, :] - 2 * HKL[1, :] + HKL[2, :] hkl = hkl / 3. return hkl def rhombohedralParametersFromHexagonal(a_h, c_h): a_r = np.sqrt(3 * a_h**2 + c_h**2) / 3. alfa_r = 2 * np.arcsin(3. / (2 * np.sqrt(3 + (c_h / a_h)**2))) if outputDegrees: alfa_r = r2d * alfa_r return a_r, alfa_r def millerBravaisDirectionToVector(dir_ind, a=1., c=1.): dir_ind = np.atleast_2d(dir_ind) num_in = len(dir_ind) u = dir_ind[:, 0] v = dir_ind[:, 1] w = dir_ind[:, 2] return unitVector( np.vstack([1.5*u*a, sqrt3by2*(2.*v + u)*a, w*c]).reshape(3, num_in) ) class PlaneData(object): def __init__(self, hkls, *args, **kwargs): self.phaseID = None self.__doTThSort = True self.__exclusions = None self.__tThMax = None if len(args) == 4: lparms, laueGroup, wavelength, strainMag = args tThWidth = None self.__wavelength = processWavelength(wavelength) self.__lparms = self.__parseLParms(lparms) elif len(args) == 1 and hasattr(args[0], 'getParams'): other = args[0] lparms, laueGroup, wavelength, strainMag, tThWidth = other.getParams() self.__wavelength = wavelength self.__lparms = lparms self.phaseID = other.phaseID self.__doTThSort = other.__doTThSort self.__exclusions = other.__exclusions self.__tThMax = other.__tThMax if hkls is None: hkls = other.__hkls else: raise NotImplementedError('args : '+str(args)) self.__laueGroup = laueGroup self.__qsym = quatOfLaueGroup(self.__laueGroup) self.__hkls = copy.deepcopy(hkls) self.__strainMag = strainMag self.__structFact = np.ones(self.__hkls.shape[1]) self.tThWidth = tThWidth if 'phaseID' in kwargs: self.phaseID = kwargs.pop('phaseID') if 'doTThSort' in kwargs: self.__doTThSort = kwargs.pop('doTThSort') if 'exclusions' in kwargs: self.__exclusions = kwargs.pop('exclusions') if 'tThMax' in kwargs: self.__tThMax = toFloat(kwargs.pop('tThMax'), 'radians') if 'tThWidth' in kwargs: self.tThWidth = kwargs.pop('tThWidth') if len(kwargs) > 0: raise RuntimeError('have unparsed keyword arguments with keys: ' + str(list(kwargs.keys()))) self.__calc() return def __calc(self): symmGroup = ltypeOfLaueGroup(self.__laueGroup) latPlaneData, latVecOps, hklDataList = PlaneData.makePlaneData( self.__hkls, self.__lparms, self.__qsym, symmGroup, self.__strainMag, self.wavelength) tThs = np.array( [hklDataList[iHKL]['tTheta'] for iHKL in range(len(hklDataList))] ) if self.__doTThSort: self.tThSort = np.argsort(tThs) self.tThSortInv = np.empty(len(hklDataList), dtype=int) self.tThSortInv[self.tThSort] = np.arange(len(hklDataList)) self.hklDataList = [hklDataList[iHKL] for iHKL in self.tThSort] else: self.tThSort = np.arange(len(hklDataList)) self.tThSortInv = np.arange(len(hklDataList)) self.hklDataList = hklDataList self.__latVecOps = latVecOps self.nHKLs = len(self.getHKLs()) return def __str__(self): s = '========== plane data ==========\n' s += 'lattice parameters:\n ' + str(self.lparms) + '\n' s += 'two theta width: (%s)\n' % str(self.tThWidth) s += 'strain magnitude: (%s)\n' % str(self.strainMag) s += 'beam energy (%s)\n' % str(self.wavelength) s += 'hkls: (%d)\n' % self.nHKLs s += str(self.getHKLs()) return s def getNHKLs(self): return self.nHKLs def getPhaseID(self): return self.phaseID def getParams(self): return (self.__lparms, self.__laueGroup, self.__wavelength, self.__strainMag, self.tThWidth) def getNhklRef(self): retval = len(self.hklDataList) return retval def get_hkls(self): return self.getHKLs().T def set_hkls(self, hkls): raise RuntimeError('for now, not allowing hkls to be reset') return hkls = property(get_hkls, set_hkls, None) def get_tThMax(self): return self.__tThMax def set_tThMax(self, tThMax): self.__tThMax = toFloat(tThMax, 'radians') return tThMax = property(get_tThMax, set_tThMax, None) def get_exclusions(self): retval = np.zeros(self.getNhklRef(), dtype=bool) if self.__exclusions is not None: retval[:] = self.__exclusions[self.tThSortInv] if self.__tThMax is not None: for iHKLr, hklData in enumerate(self.hklDataList): if hklData['tTheta'] > self.__tThMax: retval[iHKLr] = True return retval def set_exclusions(self, exclusions): excl = np.zeros(len(self.hklDataList), dtype=bool) if exclusions is not None: exclusions = np.atleast_1d(exclusions) if len(exclusions) == len(self.hklDataList): assert exclusions.dtype == 'bool', 'exclusions should be bool if full length' excl[:] = exclusions[self.tThSort] else: if len(exclusions.shape) == 1: excl[self.tThSort[exclusions]] = True elif len(exclusions.shape) == 2: raise NotImplementedError( 'have not yet coded treating exclusions as ranges' ) else: raise RuntimeError( 'do not now what to do with exclusions with shape ' + str(exclusions.shape) ) self.__exclusions = excl self.nHKLs = np.sum(np.logical_not(self.__exclusions)) return exclusions = property(get_exclusions, set_exclusions, None) def get_lparms(self): return self.__lparms def __parseLParms(self, lparms): lparmsDUnit = [] for lparmThis in lparms: if hasattr(lparmThis, 'getVal'): if lparmThis.isLength(): lparmsDUnit.append(lparmThis.getVal(dUnit)) elif lparmThis.isAngle(): lparmsDUnit.append(lparmThis.getVal('degrees')) else: raise RuntimeError( 'do not know what to do with ' + str(lparmThis) ) else: lparmsDUnit.append(lparmThis) return lparmsDUnit def set_lparms(self, lparms): self.__lparms = self.__parseLParms(lparms) self.__calc() return lparms = property(get_lparms, set_lparms, None) def get_strainMag(self): return self.__strainMag def set_strainMag(self, strainMag): self.__strainMag = strainMag self.tThWidth = None self.__calc() return strainMag = property(get_strainMag, set_strainMag, None) def get_wavelength(self): return self.__wavelength def set_wavelength(self, wavelength): wavelength = processWavelength(wavelength) if np.isclose(self.__wavelength, wavelength): return self.__wavelength = wavelength self.__calc() wavelength = property(get_wavelength, set_wavelength, None) def get_structFact(self): return self.__structFact[~self.exclusions] def set_structFact(self, structFact): self.__structFact = structFact multiplicity = self.getMultiplicity(allHKLs=True) tth = self.getTTh(allHKLs=True) lp = (1 + np.cos(tth)**2)/np.cos(0.5*tth)/np.sin(0.5*tth)**2/2.0 powderI = structFact*multiplicity*lp powderI = 100.0*powderI/np.nanmax(powderI) self._powder_intensity = powderI structFact = property(get_structFact, set_structFact, None) @property def powder_intensity(self): return self._powder_intensity[~self.exclusions] @staticmethod def makePlaneData(hkls, lparms, qsym, symmGroup, strainMag, wavelength): tempSetOutputDegrees(False) latPlaneData = latticePlanes(hkls, lparms, ltype=symmGroup, strainMag=strainMag, wavelength=wavelength) latVecOps = latticeVectors(lparms, symmGroup) hklDataList = [] for iHKL in range(len(hkls.T)): latPlnNrmls = applySym( np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), qsym, csFlag=True, cullPM=False) latPlnNrmlsM = applySym( np.dot(latVecOps['B'], hkls[:, iHKL].reshape(3, 1)), qsym, csFlag=False, cullPM=False) csRefl = latPlnNrmls.shape[1] == latPlnNrmlsM.shape[1] symHKLs = np.array( np.round( np.dot(latVecOps['F'].T, latPlnNrmls) ), dtype='int' ) hklDataList.append( dict(hklID=iHKL, hkl=hkls[:, iHKL], tTheta=latPlaneData['tThetas'][iHKL], dSpacings=latPlaneData['dspacings'][iHKL], tThetaLo=latPlaneData['tThetasLo'][iHKL], tThetaHi=latPlaneData['tThetasHi'][iHKL], latPlnNrmls=unitVector(latPlnNrmls), symHKLs=symHKLs, centrosym=csRefl ) ) revertOutputDegrees() return latPlaneData, latVecOps, hklDataList def getLatticeType(self): return ltypeOfLaueGroup(self.__laueGroup) def getLaueGroup(self): return self.__laueGroup def setLaueGroup(self, laueGroup): self.__laueGroup = laueGroup self.__calc() laueGroup = property(getLaueGroup, setLaueGroup, None) def set_laue_and_lparms(self, laueGroup, lparms): self.__laueGroup = laueGroup self.__lparms = self.__parseLParms(lparms) self.__calc() def getQSym(self): return self.__qsym def getPlaneSpacings(self): dspacings = [] for iHKLr, hklData in enumerate(self.hklDataList): if not self.__thisHKL(iHKLr): continue dspacings.append(hklData['dSpacings']) return dspacings def getPlaneNormals(self): plnNrmls = [] for iHKLr, hklData in enumerate(self.hklDataList): if not self.__thisHKL(iHKLr): continue plnNrmls.append(hklData['latPlnNrmls']) return plnNrmls
BSD 3-Clause New or Revised License
waliens/sldc
sldc/logging.py
Logger.info
python
def info(self, msg): self._log(Logger.INFO, msg)
Logs a information message if the level of verbosity is above or equal INFO Parameters ---------- msg: string The message to log
https://github.com/waliens/sldc/blob/b16d28ca223ac686b711ca988f5e76f7cdedbaca/sldc/logging.py#L70-L77
import os import threading from abc import abstractmethod, ABCMeta __author__ = "Romain Mormont <[email protected]>" __version__ = "0.1" class Logger(object): SILENT = 0 ERROR = 1 WARNING = 2 INFO = 3 DEBUG = 4 def __init__(self, level, prefix=True, pid=True): self._level = level self._prefix = prefix self._pid = pid @property def level(self): return self._level @level.setter def level(self, level): self._level = level def d(self, msg): self.debug(msg) def debug(self, msg): self._log(Logger.DEBUG, msg) def i(self, msg): self.info(msg)
MIT License
clericpy/torequests
torequests/main.py
NewExecutorPoolMixin._get_cpu_count
python
def _get_cpu_count(self): try: from multiprocessing import cpu_count return cpu_count() except Exception as e: logger.error("_get_cpu_count failed for %s" % e)
Get the cpu count.
https://github.com/clericpy/torequests/blob/e57ce331aa850db45c198dc90b9d01e437384b61/torequests/main.py#L74-L81
import atexit from concurrent.futures import (ProcessPoolExecutor, ThreadPoolExecutor, as_completed) from concurrent.futures._base import (CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, PENDING, RUNNING, CancelledError, Error, Executor, Future, TimeoutError) from concurrent.futures.thread import _threads_queues, _WorkItem from functools import wraps from logging import getLogger from threading import Thread, Timer from time import sleep from time import time as time_time from weakref import WeakSet from requests import PreparedRequest, RequestException, Session from requests.adapters import HTTPAdapter from urllib3 import disable_warnings from .configs import Config from .exceptions import FailureException, ValidationError from .frequency_controller.sync_tools import Frequency from .versions import PY2, PY3 try: from queue import Empty, Queue except ImportError: from Queue import Empty, Queue if PY3: from concurrent.futures.process import BrokenProcessPool __all__ = [ "Pool", "ProcessPool", "NewFuture", "Async", "threads", "get_results_generator", "run_after_async", "tPool", "get", "post", "options", "delete", "put", "head", "patch", "request", "disable_warnings", "Workshop" ] logger = getLogger("torequests") def _abandon_all_tasks(): _threads_queues.clear() def ensure_waiting_for_threads(): if Config.wait_futures_before_exiting: _abandon_all_tasks() atexit.register(ensure_waiting_for_threads) class NewExecutorPoolMixin(Executor): def async_func(self, function): @wraps(function) def wrapped(*args, **kwargs): return self.submit(function, *args, **kwargs) return wrapped def close(self, wait=True): return self.shutdown(wait=wait)
MIT License
sfanous/pyecobee
pyecobee/objects/thermostat.py
Thermostat.program
python
def program(self, program): self._program = program
Sets the program attribute of this Thermostat instance. :param program: The program value to set for the program attribute of this Thermostat instance. :type: Program
https://github.com/sfanous/pyecobee/blob/3d6b4aec3c6bc9b796aa3d3fd6626909ffdbac13/pyecobee/objects/thermostat.py#L603-L612
from pyecobee.ecobee_object import EcobeeObject class Thermostat(EcobeeObject): __slots__ = [ '_identifier', '_name', '_thermostat_rev', '_is_registered', '_model_number', '_brand', '_features', '_last_modified', '_thermostat_time', '_utc_time', '_audio', '_alerts', '_reminders', '_settings', '_runtime', '_extended_runtime', '_electricity', '_devices', '_location', '_energy', '_technician', '_utility', '_management', '_weather', '_events', '_program', '_house_details', '_oem_cfg', '_equipment_status', '_notification_settings', '_privacy', '_version', '_security_settings', '_filter_subscription', '_remote_sensors', ] attribute_name_map = { 'identifier': 'identifier', 'name': 'name', 'thermostat_rev': 'thermostatRev', 'thermostatRev': 'thermostat_rev', 'is_registered': 'isRegistered', 'isRegistered': 'is_registered', 'model_number': 'modelNumber', 'modelNumber': 'model_number', 'brand': 'brand', 'features': 'features', 'last_modified': 'lastModified', 'lastModified': 'last_modified', 'thermostat_time': 'thermostatTime', 'thermostatTime': 'thermostat_time', 'utc_time': 'utcTime', 'utcTime': 'utc_time', 'audio': 'audio', 'alerts': 'alerts', 'reminders': 'reminders', 'settings': 'settings', 'runtime': 'runtime', 'extended_runtime': 'extendedRuntime', 'extendedRuntime': 'extended_runtime', 'electricity': 'electricity', 'devices': 'devices', 'location': 'location', 'energy': 'energy', 'technician': 'technician', 'utility': 'utility', 'management': 'management', 'weather': 'weather', 'events': 'events', 'program': 'program', 'house_details': 'houseDetails', 'houseDetails': 'house_details', 'oem_cfg': 'oemCfg', 'oemCfg': 'oem_cfg', 'equipment_status': 'equipmentStatus', 'equipmentStatus': 'equipment_status', 'notification_settings': 'notificationSettings', 'notificationSettings': 'notification_settings', 'privacy': 'privacy', 'version': 'version', 'security_settings': 'securitySettings', 'securitySettings': 'security_settings', 'filter_subscription': 'filterSubscription', 'filterSubscription': 'filter_subscription', 'remote_sensors': 'remoteSensors', 'remoteSensors': 'remote_sensors', } attribute_type_map = { 'identifier': 'six.text_type', 'name': 'six.text_type', 'thermostat_rev': 'six.text_type', 'is_registered': 'bool', 'model_number': 'six.text_type', 'brand': 'six.text_type', 'features': 'six.text_type', 'last_modified': 'six.text_type', 'thermostat_time': 'six.text_type', 'utc_time': 'six.text_type', 'audio': 'Audio', 'alerts': 'List[Alert]', 'reminders': 'List[ThermostatReminder2]', 'settings': 'Settings', 'runtime': 'Runtime', 'extended_runtime': 'ExtendedRuntime', 'electricity': 'Electricity', 'devices': 'List[Device]', 'location': 'Location', 'energy': 'Energy', 'technician': 'Technician', 'utility': 'Utility', 'management': 'Management', 'weather': 'Weather', 'events': 'List[Event]', 'program': 'Program', 'house_details': 'HouseDetails', 'oem_cfg': 'ThermostatOemCfg', 'equipment_status': 'six.text_type', 'notification_settings': 'NotificationSettings', 'privacy': 'ThermostatPrivacy', 'version': 'Version', 'security_settings': 'SecuritySettings', 'filter_subscription': 'ApiFilterSubscription', 'remote_sensors': 'List[RemoteSensor]', } def __init__( self, identifier, name=None, thermostat_rev=None, is_registered=None, model_number=None, brand=None, features=None, last_modified=None, thermostat_time=None, utc_time=None, audio=None, alerts=None, reminders=None, settings=None, runtime=None, extended_runtime=None, electricity=None, devices=None, location=None, energy=None, technician=None, utility=None, management=None, weather=None, events=None, program=None, house_details=None, oem_cfg=None, equipment_status=None, notification_settings=None, privacy=None, version=None, security_settings=None, filter_subscription=None, remote_sensors=None, ): self._identifier = identifier self._name = name self._thermostat_rev = thermostat_rev self._is_registered = is_registered self._model_number = model_number self._brand = brand self._features = features self._last_modified = last_modified self._thermostat_time = thermostat_time self._utc_time = utc_time self._audio = audio self._alerts = alerts self._reminders = reminders self._settings = settings self._runtime = runtime self._extended_runtime = extended_runtime self._electricity = electricity self._devices = devices self._location = location self._energy = energy self._technician = technician self._utility = utility self._management = management self._weather = weather self._events = events self._program = program self._house_details = house_details self._oem_cfg = oem_cfg self._equipment_status = equipment_status self._notification_settings = notification_settings self._privacy = privacy self._version = version self._security_settings = security_settings self._filter_subscription = filter_subscription self._remote_sensors = remote_sensors @property def identifier(self): return self._identifier @property def name(self): return self._name @name.setter def name(self, name): self._name = name @property def thermostat_rev(self): return self._thermostat_rev @property def is_registered(self): return self._is_registered @property def model_number(self): return self._model_number @property def brand(self): return self._brand @property def features(self): return self._features @property def last_modified(self): return self._last_modified @property def thermostat_time(self): return self._thermostat_time @property def utc_time(self): return self._utc_time @property def audio(self): return self._audio @audio.setter def audio(self, audio): self._audio = audio @property def alerts(self): return self._alerts @property def reminders(self): return self._reminders @property def settings(self): return self._settings @settings.setter def settings(self, settings): self._settings = settings @property def runtime(self): return self._runtime @property def extended_runtime(self): return self._extended_runtime @property def electricity(self): return self._electricity @property def devices(self): return self._devices @property def location(self): return self._location @location.setter def location(self, location): self._location = location @property def energy(self): return self._energy @energy.setter def energy(self, energy): self._energy = energy @property def technician(self): return self._technician @property def utility(self): return self._utility @property def management(self): return self._management @property def weather(self): return self._weather @property def events(self): return self._events @property def program(self): return self._program @program.setter
MIT License
trusted-ai/adversarial-robustness-toolbox
art/estimators/classification/mxnet.py
MXClassifier.save
python
def save(self, filename: str, path: Optional[str] = None) -> None: if path is None: full_path = os.path.join(config.ART_DATA_PATH, filename) else: full_path = os.path.join(path, filename) folder = os.path.split(full_path)[0] if not os.path.exists(folder): os.makedirs(folder) self._model.save_parameters(full_path + ".params") logger.info("Model parameters saved in path: %s.params.", full_path)
Save a model to file in the format specific to the backend framework. For Gluon, only parameters are saved in file with name `<filename>.params` at the specified path. To load the saved model, the original model code needs to be run before calling `load_parameters` on the generated Gluon model. :param filename: Name of the file where to store the model. :param path: Path of the folder where to store the model. If no path is specified, the model will be stored in the default data location of the library `ART_DATA_PATH`.
https://github.com/trusted-ai/adversarial-robustness-toolbox/blob/564f46f99b3cb0406fe3570919b8e71a4c5bba9d/art/estimators/classification/mxnet.py#L499-L518
from __future__ import absolute_import, division, print_function, unicode_literals import logging import os from typing import List, Optional, Tuple, Union, TYPE_CHECKING import numpy as np import six from art import config from art.estimators.mxnet import MXEstimator from art.estimators.classification.classifier import ClassGradientsMixin, ClassifierMixin from art.utils import check_and_transform_label_format if TYPE_CHECKING: import mxnet as mx from art.utils import CLIP_VALUES_TYPE, PREPROCESSING_TYPE from art.data_generators import DataGenerator from art.defences.preprocessor import Preprocessor from art.defences.postprocessor import Postprocessor logger = logging.getLogger(__name__) class MXClassifier(ClassGradientsMixin, ClassifierMixin, MXEstimator): estimator_params = ( MXEstimator.estimator_params + ClassifierMixin.estimator_params + [ "loss", "input_shape", "nb_classes", "optimizer", "ctx", "channels_first", ] ) def __init__( self, model: "mx.gluon.Block", loss: Union["mx.nd.loss", "mx.gluon.loss"], input_shape: Tuple[int, ...], nb_classes: int, optimizer: Optional["mx.gluon.Trainer"] = None, ctx: Optional["mx.context.Context"] = None, channels_first: bool = True, clip_values: Optional["CLIP_VALUES_TYPE"] = None, preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None, postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None, preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0), ) -> None: import mxnet as mx super().__init__( model=model, clip_values=clip_values, channels_first=channels_first, preprocessing_defences=preprocessing_defences, postprocessing_defences=postprocessing_defences, preprocessing=preprocessing, ) self._loss = loss self._nb_classes = nb_classes self._input_shape = input_shape self._device = ctx self._optimizer = optimizer if ctx is None: self._ctx = mx.cpu() else: self._ctx = ctx self._layer_names = self._get_layers() @property def input_shape(self) -> Tuple[int, ...]: return self._input_shape @property def loss(self) -> Union["mx.nd.loss", "mx.gluon.loss"]: return self._loss @property def optimizer(self) -> "mx.gluon.Trainer": return self._optimizer @property def ctx(self) -> "mx.context.Context": return self._ctx def fit( self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 20, **kwargs ) -> None: import mxnet as mx if self.optimizer is None: raise ValueError("An MXNet optimizer is required for fitting the model.") training_mode = True y = check_and_transform_label_format(y, self.nb_classes) x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True) y_preprocessed = np.argmax(y_preprocessed, axis=1) nb_batch = int(np.ceil(len(x_preprocessed) / batch_size)) ind = np.arange(len(x_preprocessed)) for _ in range(nb_epochs): np.random.shuffle(ind) for m in range(nb_batch): x_batch = mx.nd.array( x_preprocessed[ind[m * batch_size : (m + 1) * batch_size]].astype(config.ART_NUMPY_DTYPE) ).as_in_context(self.ctx) y_batch = mx.nd.array(y_preprocessed[ind[m * batch_size : (m + 1) * batch_size]]).as_in_context( self.ctx ) with mx.autograd.record(train_mode=training_mode): preds = self._model(x_batch) preds = self._apply_postprocessing(preds=preds, fit=True) loss = self.loss(preds, y_batch) loss.backward() self.optimizer.step(batch_size) def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None: import mxnet as mx from art.data_generators import MXDataGenerator if self.optimizer is None: raise ValueError("An MXNet optimizer is required for fitting the model.") training_mode = True if ( isinstance(generator, MXDataGenerator) and (self.preprocessing is None or self.preprocessing == []) and self.preprocessing == (0, 1) ): for _ in range(nb_epochs): for x_batch, y_batch in generator.iterator: x_batch = mx.nd.array(x_batch.astype(config.ART_NUMPY_DTYPE)).as_in_context(self.ctx) y_batch = mx.nd.argmax(y_batch, axis=1) y_batch = mx.nd.array(y_batch).as_in_context(self.ctx) with mx.autograd.record(train_mode=training_mode): preds = self._model(x_batch) loss = self.loss(preds, y_batch) loss.backward() self.optimizer.step(x_batch.shape[0]) else: super().fit_generator(generator, nb_epochs=nb_epochs) def predict( self, x: np.ndarray, batch_size: int = 128, training_mode: bool = False, **kwargs ) -> np.ndarray: import mxnet as mx x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False) results = np.zeros((x_preprocessed.shape[0], self.nb_classes), dtype=np.float32) num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size))) for m in range(num_batch): begin, end = ( m * batch_size, min((m + 1) * batch_size, x_preprocessed.shape[0]), ) x_batch = mx.nd.array(x_preprocessed[begin:end].astype(config.ART_NUMPY_DTYPE), ctx=self.ctx) x_batch.attach_grad() with mx.autograd.record(train_mode=training_mode): preds = self._model(x_batch) results[begin:end] = preds.asnumpy() predictions = self._apply_postprocessing(preds=results, fit=False) return predictions def class_gradient( self, x: np.ndarray, label: Union[int, List[int], None] = None, training_mode: bool = False, **kwargs ) -> np.ndarray: import mxnet as mx if not ( label is None or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes)) or ( isinstance(label, np.ndarray) and len(label.shape) == 1 and (label < self.nb_classes).all() and label.shape[0] == x.shape[0] ) ): raise ValueError("Label %s is out of range." % str(label)) x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False) x_preprocessed = mx.nd.array(x_preprocessed.astype(config.ART_NUMPY_DTYPE), ctx=self.ctx) x_preprocessed.attach_grad() if label is None: with mx.autograd.record(train_mode=False): preds = self._model(x_preprocessed) class_slices = [preds[:, i] for i in range(self.nb_classes)] grads = [] for slice_ in class_slices: slice_.backward(retain_graph=True) grad = x_preprocessed.grad.asnumpy() grads.append(grad) grads = np.swapaxes(np.array(grads), 0, 1) elif isinstance(label, (int, np.integer)): with mx.autograd.record(train_mode=training_mode): preds = self._model(x_preprocessed) class_slice = preds[:, label] class_slice.backward() grads = np.expand_dims(x_preprocessed.grad.asnumpy(), axis=1) else: unique_labels = list(np.unique(label)) with mx.autograd.record(train_mode=training_mode): preds = self._model(x_preprocessed) class_slices = [preds[:, i] for i in unique_labels] grads = [] for slice_ in class_slices: slice_.backward(retain_graph=True) grad = x_preprocessed.grad.asnumpy() grads.append(grad) grads = np.swapaxes(np.array(grads), 0, 1) lst = [unique_labels.index(i) for i in label] grads = grads[np.arange(len(grads)), lst] grads = np.expand_dims(grads, axis=1) grads = self._apply_preprocessing_gradient(x, grads) return grads def loss_gradient( self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs ) -> np.ndarray: import mxnet as mx x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False) y_preprocessed = mx.nd.array([np.argmax(y_preprocessed, axis=1)], ctx=self.ctx).T x_preprocessed = mx.nd.array(x_preprocessed.astype(config.ART_NUMPY_DTYPE), ctx=self.ctx) x_preprocessed.attach_grad() with mx.autograd.record(train_mode=training_mode): preds = self._model(x_preprocessed) loss = self.loss(preds, y_preprocessed) loss.backward() grads = x_preprocessed.grad.asnumpy() grads = self._apply_preprocessing_gradient(x, grads) assert grads.shape == x.shape return grads def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray: raise NotImplementedError @property def layer_names(self) -> List[str]: return self._layer_names def get_activations( self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False ) -> np.ndarray: import mxnet as mx if isinstance(layer, six.string_types): if layer not in self._layer_names: raise ValueError("Layer name %s is not part of the model." % layer) layer_ind = self._layer_names.index(layer) elif isinstance(layer, int): if layer < 0 or layer >= len(self._layer_names): raise ValueError( "Layer index %d is outside of range (0 to %d included)." % (layer, len(self._layer_names) - 1) ) layer_ind = layer else: raise TypeError("Layer must be of type `str` or `int`.") if x.shape == self.input_shape: x_expanded = np.expand_dims(x, 0) else: x_expanded = x x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False) if framework: return self._model[layer_ind] activations = [] nb_batches = int(np.ceil(len(x_preprocessed) / float(batch_size))) for batch_index in range(nb_batches): begin, end = ( batch_index * batch_size, min((batch_index + 1) * batch_size, x_preprocessed.shape[0]), ) x_batch = mx.nd.array(x_preprocessed[begin:end].astype(config.ART_NUMPY_DTYPE), ctx=self.ctx) x_batch.attach_grad() with mx.autograd.record(train_mode=False): preds = self._model[layer_ind](x_batch) activations.append(preds.asnumpy()) activations = np.vstack(activations) return activations
MIT License
rlouf/mcx
mcx/distributions/distribution.py
Distribution.sample
python
def sample( self, rng_key: jnp.ndarray, sample_shape: Union[Tuple[()], Tuple[int]] ) -> jax.numpy.DeviceArray: pass
Obtain samples from the distribution. Parameters ---------- rng_key: jnp.ndarray The pseudo random number generator key to use to draw samples. sample_shape: Tuple[int] The number of independant, identically distributed samples to draw from the distribution. Returns ------- jax.numpy.DeviceArray An array of shape sample_shape + batch_shape + event_shape with independent samples.
https://github.com/rlouf/mcx/blob/26c316f2911dac86fbc585b66a8652872187f64e/mcx/distributions/distribution.py#L65-L83
from abc import ABC, abstractmethod from typing import Dict, Tuple, Union import jax from jax import numpy as jnp from .constraints import Constraint class Distribution(ABC): parameters: Dict[str, Constraint] support: Constraint @abstractmethod def __init__(self, *args) -> None: pass @abstractmethod
Apache License 2.0
google/fedjax
fedjax/core/models.py
ModelEvaluator.evaluate_per_client_params
python
def evaluate_per_client_params( self, clients: Iterable[Tuple[federated_data.ClientId, Iterable[BatchExample], Params]] ) -> Iterator[Tuple[federated_data.ClientId, Dict[str, jnp.ndarray]]]: yield from self._evaluate_each_client(shared_input=None, clients=clients)
Evaluates batches from each client using per client params. Args: clients: Client batches and the per client params. Yields: Pairs of the client id and a dictionary of evaluation `Metric` results for each client.
https://github.com/google/fedjax/blob/24f768c9aa2959f76d91c3e5aa0e513721c903d7/fedjax/core/models.py#L337-L350
import functools from typing import Any, Callable, Dict, Iterable, Iterator, Optional, Mapping, Tuple from fedjax.core import client_datasets from fedjax.core import dataclasses from fedjax.core import federated_data from fedjax.core import for_each_client from fedjax.core import metrics from fedjax.core import util from fedjax.core.typing import BatchExample from fedjax.core.typing import BatchPrediction from fedjax.core.typing import Params from fedjax.core.typing import PRNGKey import haiku as hk import jax import jax.numpy as jnp BatchTrainOutput = jnp.ndarray BatchEvalPrediction = BatchPrediction @dataclasses.dataclass class Model: init: Callable[[PRNGKey], Params] apply_for_train: Callable[[Params, BatchExample, PRNGKey], BatchTrainOutput] apply_for_eval: Callable[[Params, BatchExample], BatchEvalPrediction] train_loss: Callable[[BatchExample, BatchTrainOutput], jnp.ndarray] eval_metrics: Mapping[str, metrics.Metric] def __hash__(self) -> int: return id(self) def __eq__(self, other: Any) -> bool: return self is other def create_model_from_haiku( transformed_forward_pass: hk.Transformed, sample_batch: BatchExample, train_loss: Callable[[BatchExample, BatchTrainOutput], jnp.ndarray], eval_metrics: Optional[Mapping[str, metrics.Metric]] = None, train_kwargs: Optional[Mapping[str, Any]] = None, eval_kwargs: Optional[Mapping[str, Any]] = None) -> Model: eval_metrics = eval_metrics or {} train_kwargs = train_kwargs or {} eval_kwargs = eval_kwargs or {} @jax.jit def init(rng): return transformed_forward_pass.init(rng, sample_batch) @jax.jit def apply_for_train(params, batch, rng=None): return transformed_forward_pass.apply(params, rng, batch, **train_kwargs) @jax.jit def apply_for_eval(params, batch): return transformed_forward_pass.apply(params, None, batch, **eval_kwargs) return Model(init, apply_for_train, apply_for_eval, train_loss, eval_metrics) def create_model_from_stax( stax_init: Callable[..., Params], stax_apply: Callable[..., jnp.ndarray], sample_shape: Tuple[int, ...], train_loss: Callable[[BatchExample, BatchTrainOutput], jnp.ndarray], eval_metrics: Optional[Mapping[str, metrics.Metric]] = None, train_kwargs: Optional[Mapping[str, Any]] = None, eval_kwargs: Optional[Mapping[str, Any]] = None, input_key: str = 'x') -> Model: eval_metrics = eval_metrics or {} train_kwargs = train_kwargs or {} eval_kwargs = eval_kwargs or {} @jax.jit def init(rng): _, params = stax_init(rng, sample_shape) return params @jax.jit def apply_for_train(params, batch, rng=None): return stax_apply(params, batch[input_key], rng=rng, **train_kwargs) @jax.jit def apply_for_eval(params, batch): return stax_apply(params, batch[input_key], **eval_kwargs) return Model(init, apply_for_train, apply_for_eval, train_loss, eval_metrics) @functools.partial(jax.jit, static_argnums=0) def _evaluate_model_step(model: Model, params: Params, batch: BatchExample, stat: metrics.Stat) -> Dict[str, metrics.Stat]: try: mask = batch[client_datasets.EXAMPLE_MASK_KEY].astype(jnp.bool_) except KeyError: mask = jnp.ones([len(next(iter(batch.values())))], dtype=jnp.bool_) pred = model.apply_for_eval(params, batch) new_stat = { k: metrics.evaluate_batch(metric, batch, pred, mask) for k, metric in model.eval_metrics.items() } return jax.tree_util.tree_multimap( lambda a, b: a.merge(b), stat, new_stat, is_leaf=lambda v: isinstance(v, metrics.Stat)) def evaluate_model(model: Model, params: Params, batches: Iterable[BatchExample]) -> Dict[str, jnp.ndarray]: stat = {k: metric.zero() for k, metric in model.eval_metrics.items()} for batch in batches: stat = _evaluate_model_step(model, params, batch, stat) return jax.tree_util.tree_map( lambda x: x.result(), stat, is_leaf=lambda v: isinstance(v, metrics.Stat)) class ModelEvaluator: def __init__(self, model: Model): def client_init(shared_input, client_input): if shared_input is not None: params = shared_input else: params = client_input stat = {k: metric.zero() for k, metric in model.eval_metrics.items()} return params, stat def client_step(state, batch): params, stat = state next_stat = _evaluate_model_step(model, params, batch, stat) return params, next_stat def client_final(shared_input, state): del shared_input _, stat = state return {k: v.result() for k, v in stat.items()} self._evaluate_each_client = for_each_client.for_each_client( client_init, client_step, client_final) def evaluate_global_params( self, params: Params, clients: Iterable[Tuple[federated_data.ClientId, Iterable[BatchExample]]] ) -> Iterator[Tuple[federated_data.ClientId, Dict[str, jnp.ndarray]]]: yield from self._evaluate_each_client( shared_input=params, clients=[(client_id, batches, None) for client_id, batches in clients])
Apache License 2.0
ialbert/bio
biorun/utils.py
gz_write
python
def gz_write(fname, flag='wt'): stream = gzip.open(fname, flag, compresslevel=3) if fname.endswith(".gz") else open(fname, flag) return stream
Shortcut to opening gzipped or regular files
https://github.com/ialbert/bio/blob/564a77c8ee92a1791beea56eedf16b722a0c3932/biorun/utils.py#L387-L392
import gzip import json import logging import os import shutil import sys import tempfile from io import StringIO from itertools import * from os.path import expanduser import requests from biorun.libs.sqlitedict import SqliteDict from tqdm import tqdm __CURR_DIR = os.path.dirname(__file__) __TMPL_DIR = os.path.join(__CURR_DIR, "templates") DATADIR = os.path.join(expanduser("~"), ".bio") os.makedirs(DATADIR, exist_ok=True) def read_lines(stream, index=0, sep=None): lines = filter(lambda x: not x.strip().startswith("#"), stream) lines = filter(lambda x: x.strip(), lines) try: lines = map(lambda x: x.strip().split(sep=sep)[index], lines) except IndexError as exc: error(f"column index out of bounds: {index}") lines = filter(None, lines) lines = list(lines) return lines def get_streams(fnames): streams = [] for name in fnames: if not os.path.isfile(name): error(f"File not found: {name}") streams.append(open(name)) if not sys.stdin.isatty(): streams.append(sys.stdin) return streams def parse_alias(fname): if not fname or not os.path.isfile(fname): return dict() stream = open(fname) lines = map(lambda x: x.strip(), stream) lines = filter(lambda x: not x.startswith("#"), lines) lines = filter(None, lines) lines = map(lambda x: x.split(), lines) lines = filter(lambda x: len(x) > 1, lines) pairs = map(lambda x: (x[0].strip(), x[1].strip()), lines) remap = dict(pairs) return remap def is_int(text): try: int(text) return True except ValueError as exc: return False def trim(text, size=3): div, mod = divmod(len(text), size) subs = text[:div * size] return subs def lower_case_keys(adict): return dict((k.lower(), v) for (k, v) in adict.items()) def int_or_zero(text): try: return int(text) except ValueError as exc: return 0 def open_db(table, fname, flag='c', strict=True): if strict and not os.path.isfile(fname): error(f"Database not found: {fname}", stop=False) error(f"Run: bio --download") conn = SqliteDict(fname, tablename=table, flag=flag, encode=json.dumps, decode=json.loads) return conn def open_streams(fnames=[]): if not sys.stdin.isatty(): stream = StringIO(sys.stdin.read()) yield stream for fname in fnames: if not os.path.isfile(fname): error(f" file not found: {fname}") stream = gzip.open(fname) if fname.endswith("gz") else open(fname) yield stream def concat_stream(streams): return chain.from_iterable(streams) def save_table(name, obj, fname, flg='w', chunk=20000, cache=False): path = cache_path(fname) if cache else fname size = len(obj) table = open_db(table=name, fname=path, flag=flg, strict=False) stream = enumerate(obj.items()) stream = tqdm(stream, total=size, desc=f"# {name}") for index, (key, value) in stream: table[key] = value if index % chunk == 0: table.commit() print(f"# saved {name} with {size:,} elements", end="\r") print("") table.commit() table.close() class Fasta: def __init__(self, name, lines=[], seq=''): try: self.name = name.rstrip().split()[0] if lines: self.seq = "".join(lines).replace(" ", "").replace("\r", "").upper() else: self.seq = seq.upper() except Exception as exc: error(f"Invalid FASTA format: {exc}") def fasta_parser(stream): for line in stream: if line[0] == ">": title = line[1:] break else: return lines = [] for line in stream: if line[0] == ">": yield Fasta(name=title, lines=lines) lines = [] title = line[1:] continue lines.append(line.rstrip()) fasta = Fasta(name=title, lines=lines) yield fasta def plural(target, val=0, end='ies'): output = target if val == 1 else f"{target[:-1]}{end}" return output def urlopen(url, params={}): r = requests.get(url, stream=True, params=params) try: r.raise_for_status() except Exception as exc: error(f"{exc}") return r CHUNK_SIZE = 2500 def cache_path(fname): return os.path.join(DATADIR, fname) def download(url, fname, cache=False, params={}, overwrite=False): logger.info(f"downloading: {url}") path = cache_path(fname) if cache else fname url = url.replace('ftp:', 'http:') if url.startswith('ftp:') else url r = urlopen(url=url, params=params) headers = lower_case_keys(r.headers) size = headers.get("content-length", 0) total = int_or_zero(size) chunk_size = 1 * 1024 * 1024 pbar = tqdm(desc=f"# {fname}", unit="B", unit_scale=True, unit_divisor=1024, total=total) try: tempname = os.path.join(tempfile.gettempdir(), os.urandom(24).hex()) logger.info(f"tempfile: {tempname}") fp = open(tempname, 'wb') for chunk in r.iter_content(chunk_size=chunk_size): total += len(chunk) fp.write(chunk) pbar.update(len(chunk)) fp.close() shutil.copy(tempname, path) logger.info(f"saved to: {path}") finally: os.unlink(tempname) pbar.close() ROOT_URL = "http://www.bioinfo.help/data/" def download_prebuilt(fname='biodata.tar.gz'): import tarfile url = f"{ROOT_URL}{fname}" path = cache_path(fname) download(url=url, fname=path) fp = tarfile.open(path) dirpath = os.path.dirname(path) print("# extracting files") fp.extractall(dirpath) fp.close() print("# download completed.") def no_dash(alist): elems = list(filter(lambda x: x.startswith('-'), alist)) if elems: msg = f"Invalid accessions: {elems}" error(msg) def zero_based(start, end): if start == 0: error(f"start={start} may not be zero") start = int(start) if start else 1 end = None if not end else int(end) start = start - 1 if start > 0 else start return start, end def human_size(num): for unit in ['B', 'KB', 'MB', 'GB']: if abs(num) < 1024.0: return "%.0f %s" % (num, unit) num /= 1024.0 return "%.1f%s" % (num, '??') def safe_int(text): try: return int(text) except ValueError as exc: logger.error(f"not a valid integer value: {text}") sys.exit() def parse_number(text): if text in ('', None): return None text = str(text) text = text.lower() text = text.replace(",", '') if text.endswith("k") or text.endswith("kb"): value = safe_int(text.split("k")[0]) text = f"{value * 1000}" if text.endswith("m") or text.endswith("mb"): value = safe_int(text.split("m")[0]) text = f"{value * 1000 * 1000}" return safe_int(text)
MIT License
aws-solutions/aws-mlops-framework
source/lib/blueprints/byom/lambdas/create_baseline_job/baselines_helper.py
exception_handler
python
def exception_handler(func: Callable[..., Any]) -> Any: def wrapper_function(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: logger.error(f"Error in {func.__name__}: {str(e)}") raise e return wrapper_function
Docorator function to handle exceptions Args: func (object): function to be decorated Returns: func's return value Raises: Exception thrown by the decorated function
https://github.com/aws-solutions/aws-mlops-framework/blob/c2315eed90496371ebc6f2c8d259b95bdcfb41f7/source/lib/blueprints/byom/lambdas/create_baseline_job/baselines_helper.py#L23-L45
from typing import Callable, Any, Dict, List, Optional import logging import sagemaker from sagemaker.model_monitor import DefaultModelMonitor from sagemaker.model_monitor import ModelQualityMonitor from sagemaker.model_monitor.dataset_format import DatasetFormat logger = logging.getLogger(__name__)
Apache License 2.0
mitjafelicijan/redis-marshal
redis/connection.py
Connection.pack_command
python
def pack_command(self, *args): output = [] command = args[0] if ' ' in command: args = tuple([Token.get_token(s) for s in command.split()]) + args[1:] else: args = (Token.get_token(command),) + args[1:] buff = SYM_EMPTY.join( (SYM_STAR, b(str(len(args))), SYM_CRLF)) for arg in imap(self.encoder.encode, args): if len(buff) > 6000 or len(arg) > 6000: buff = SYM_EMPTY.join( (buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF)) output.append(buff) output.append(arg) buff = SYM_CRLF else: buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF, arg, SYM_CRLF)) output.append(buff) return output
Pack a series of arguments into the Redis protocol
https://github.com/mitjafelicijan/redis-marshal/blob/57c730529e86f803fc489e4d52973fd37fa12d53/redis/connection.py#L633-L664
from __future__ import with_statement from distutils.version import StrictVersion from itertools import chain import os import socket import sys import threading import warnings try: import ssl ssl_available = True except ImportError: ssl_available = False from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long, BytesIO, nativestr, basestring, iteritems, LifoQueue, Empty, Full, urlparse, parse_qs, recv, recv_into, select, unquote) from redis.exceptions import ( RedisError, ConnectionError, TimeoutError, BusyLoadingError, ResponseError, InvalidResponse, AuthenticationError, NoScriptError, ExecAbortError, ReadOnlyError ) from redis.utils import HIREDIS_AVAILABLE if HIREDIS_AVAILABLE: import hiredis hiredis_version = StrictVersion(hiredis.__version__) HIREDIS_SUPPORTS_CALLABLE_ERRORS = hiredis_version >= StrictVersion('0.1.3') HIREDIS_SUPPORTS_BYTE_BUFFER = hiredis_version >= StrictVersion('0.1.4') if not HIREDIS_SUPPORTS_BYTE_BUFFER: msg = ("redis-py works best with hiredis >= 0.1.4. You're running " "hiredis %s. Please consider upgrading." % hiredis.__version__) warnings.warn(msg) HIREDIS_USE_BYTE_BUFFER = True if not HIREDIS_SUPPORTS_BYTE_BUFFER or ( sys.version_info[0] == 2 and sys.version_info[1] < 7): HIREDIS_USE_BYTE_BUFFER = False SYM_STAR = b('*') SYM_DOLLAR = b('$') SYM_CRLF = b('\r\n') SYM_EMPTY = b('') SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." class Token(object): _cache = {} @classmethod def get_token(cls, value): try: return cls._cache[value] except KeyError: token = Token(value) cls._cache[value] = token return token def __init__(self, value): if isinstance(value, Token): value = value.value self.value = value self.encoded_value = b(value) def __repr__(self): return self.value def __str__(self): return self.value class Encoder(object): def __init__(self, encoding, encoding_errors, decode_responses): self.encoding = encoding self.encoding_errors = encoding_errors self.decode_responses = decode_responses def encode(self, value): if isinstance(value, Token): return value.encoded_value elif isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = b(str(value)) elif isinstance(value, float): value = b(repr(value)) elif not isinstance(value, basestring): value = unicode(value) if isinstance(value, unicode): value = value.encode(self.encoding, self.encoding_errors) return value def decode(self, value, force=False): if (self.decode_responses or force) and isinstance(value, bytes): value = value.decode(self.encoding, self.encoding_errors) return value class BaseParser(object): EXCEPTION_CLASSES = { 'ERR': { 'max number of clients reached': ConnectionError }, 'EXECABORT': ExecAbortError, 'LOADING': BusyLoadingError, 'NOSCRIPT': NoScriptError, 'READONLY': ReadOnlyError, } def parse_error(self, response): error_code = response.split(' ')[0] if error_code in self.EXCEPTION_CLASSES: response = response[len(error_code) + 1:] exception_class = self.EXCEPTION_CLASSES[error_code] if isinstance(exception_class, dict): exception_class = exception_class.get(response, ResponseError) return exception_class(response) return ResponseError(response) class SocketBuffer(object): def __init__(self, socket, socket_read_size): self._sock = socket self.socket_read_size = socket_read_size self._buffer = BytesIO() self.bytes_written = 0 self.bytes_read = 0 @property def length(self): return self.bytes_written - self.bytes_read def _read_from_socket(self, length=None): socket_read_size = self.socket_read_size buf = self._buffer buf.seek(self.bytes_written) marker = 0 try: while True: data = recv(self._sock, socket_read_size) if isinstance(data, bytes) and len(data) == 0: raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) buf.write(data) data_length = len(data) self.bytes_written += data_length marker += data_length if length is not None and length > marker: continue break except socket.timeout: raise TimeoutError("Timeout reading from socket") except socket.error: e = sys.exc_info()[1] raise ConnectionError("Error while reading from socket: %s" % (e.args,)) def read(self, length): length = length + 2 if length > self.length: self._read_from_socket(length - self.length) self._buffer.seek(self.bytes_read) data = self._buffer.read(length) self.bytes_read += len(data) if self.bytes_read == self.bytes_written: self.purge() return data[:-2] def readline(self): buf = self._buffer buf.seek(self.bytes_read) data = buf.readline() while not data.endswith(SYM_CRLF): self._read_from_socket() buf.seek(self.bytes_read) data = buf.readline() self.bytes_read += len(data) if self.bytes_read == self.bytes_written: self.purge() return data[:-2] def purge(self): self._buffer.seek(0) self._buffer.truncate() self.bytes_written = 0 self.bytes_read = 0 def close(self): try: self.purge() self._buffer.close() except: pass self._buffer = None self._sock = None class PythonParser(BaseParser): def __init__(self, socket_read_size): self.socket_read_size = socket_read_size self.encoder = None self._sock = None self._buffer = None def __del__(self): try: self.on_disconnect() except Exception: pass def on_connect(self, connection): self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) self.encoder = connection.encoder def on_disconnect(self): if self._sock is not None: self._sock.close() self._sock = None if self._buffer is not None: self._buffer.close() self._buffer = None self.encoder = None def can_read(self): return self._buffer and bool(self._buffer.length) def read_response(self): response = self._buffer.readline() if not response: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) if byte == '-': response = nativestr(response) error = self.parse_error(response) if isinstance(error, ConnectionError): raise error return error elif byte == '+': pass elif byte == ':': response = long(response) elif byte == '$': length = int(response) if length == -1: return None response = self._buffer.read(length) elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes): response = self.encoder.decode(response) return response class HiredisParser(BaseParser): def __init__(self, socket_read_size): if not HIREDIS_AVAILABLE: raise RedisError("Hiredis is not installed") self.socket_read_size = socket_read_size if HIREDIS_USE_BYTE_BUFFER: self._buffer = bytearray(socket_read_size) def __del__(self): try: self.on_disconnect() except Exception: pass def on_connect(self, connection): self._sock = connection._sock kwargs = { 'protocolError': InvalidResponse, 'replyError': self.parse_error, } if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: kwargs['replyError'] = ResponseError if connection.encoder.decode_responses: kwargs['encoding'] = connection.encoder.encoding self._reader = hiredis.Reader(**kwargs) self._next_response = False def on_disconnect(self): self._sock = None self._reader = None self._next_response = False def can_read(self): if not self._reader: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) if self._next_response is False: self._next_response = self._reader.gets() return self._next_response is not False def read_response(self): if not self._reader: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) if self._next_response is not False: response = self._next_response self._next_response = False return response response = self._reader.gets() socket_read_size = self.socket_read_size while response is False: try: if HIREDIS_USE_BYTE_BUFFER: bufflen = recv_into(self._sock, self._buffer) if bufflen == 0: raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) else: buffer = recv(self._sock, socket_read_size) if not isinstance(buffer, bytes) or len(buffer) == 0: raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) except socket.timeout: raise TimeoutError("Timeout reading from socket") except socket.error: e = sys.exc_info()[1] raise ConnectionError("Error while reading from socket: %s" % (e.args,)) if HIREDIS_USE_BYTE_BUFFER: self._reader.feed(self._buffer, 0, bufflen) else: self._reader.feed(buffer) response = self._reader.gets() if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: if isinstance(response, ResponseError): response = self.parse_error(response.args[0]) elif isinstance(response, list) and response and isinstance(response[0], ResponseError): response[0] = self.parse_error(response[0].args[0]) if isinstance(response, ConnectionError): raise response elif isinstance(response, list) and response and isinstance(response[0], ConnectionError): raise response[0] return response if HIREDIS_AVAILABLE: DefaultParser = HiredisParser else: DefaultParser = PythonParser class Connection(object): description_format = "Connection<host=%(host)s,port=%(port)s,db=%(db)s>" def __init__(self, host='localhost', port=6379, db=0, password=None, socket_timeout=None, socket_connect_timeout=None, socket_keepalive=False, socket_keepalive_options=None, socket_type=0, retry_on_timeout=False, encoding='utf-8', encoding_errors='strict', decode_responses=False, parser_class=DefaultParser, socket_read_size=65536): self.pid = os.getpid() self.host = host self.port = int(port) self.db = db self.password = password self.socket_timeout = socket_timeout self.socket_connect_timeout = socket_connect_timeout or socket_timeout self.socket_keepalive = socket_keepalive self.socket_keepalive_options = socket_keepalive_options or {} self.socket_type = socket_type self.retry_on_timeout = retry_on_timeout self.encoder = Encoder(encoding, encoding_errors, decode_responses) self._sock = None self._parser = parser_class(socket_read_size=socket_read_size) self._description_args = { 'host': self.host, 'port': self.port, 'db': self.db, } self._connect_callbacks = [] def __repr__(self): return self.description_format % self._description_args def __del__(self): try: self.disconnect() except Exception: pass def register_connect_callback(self, callback): self._connect_callbacks.append(callback) def clear_connect_callbacks(self): self._connect_callbacks = [] def connect(self): if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError("Timeout connecting to server") except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock try: self.on_connect() except RedisError: self.disconnect() raise for callback in self._connect_callbacks: callback(self) def _connect(self): err = None for res in socket.getaddrinfo(self.host, self.port, self.socket_type, socket.SOCK_STREAM): family, socktype, proto, canonname, socket_address = res sock = None try: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.socket_keepalive: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) for k, v in iteritems(self.socket_keepalive_options): sock.setsockopt(socket.SOL_TCP, k, v) sock.settimeout(self.socket_connect_timeout) sock.connect(socket_address) sock.settimeout(self.socket_timeout) return sock except socket.error as _: err = _ if sock is not None: sock.close() if err is not None: raise err raise socket.error("socket.getaddrinfo returned an empty list") def _error_message(self, exception): if len(exception.args) == 1: return "Error connecting to %s:%s. %s." % (self.host, self.port, exception.args[0]) else: return "Error %s connecting to %s:%s. %s." % (exception.args[0], self.host, self.port, exception.args[1]) def on_connect(self): self._parser.on_connect(self) if self.password: self.send_command('AUTH', self.password) if nativestr(self.read_response()) != 'OK': raise AuthenticationError('Invalid Password') if self.db: self.send_command('SELECT', self.db) if nativestr(self.read_response()) != 'OK': raise ConnectionError('Invalid Database') def disconnect(self): self._parser.on_disconnect() if self._sock is None: return try: self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() except socket.error: pass self._sock = None def send_packed_command(self, command): if not self._sock: self.connect() try: if isinstance(command, str): command = [command] for item in command: self._sock.sendall(item) except socket.timeout: self.disconnect() raise TimeoutError("Timeout writing to socket") except socket.error: e = sys.exc_info()[1] self.disconnect() if len(e.args) == 1: errno, errmsg = 'UNKNOWN', e.args[0] else: errno = e.args[0] errmsg = e.args[1] raise ConnectionError("Error %s while writing to socket. %s." % (errno, errmsg)) except: self.disconnect() raise def send_command(self, *args): self.send_packed_command(self.pack_command(*args)) def can_read(self, timeout=0): sock = self._sock if not sock: self.connect() sock = self._sock return self._parser.can_read() or bool(select([sock], [], [], timeout)[0]) def read_response(self): try: response = self._parser.read_response() except: self.disconnect() raise if isinstance(response, ResponseError): raise response return response
MIT License
aotuai/brainframe-qt
brainframe_qt/ui/resources/ui_elements/applications/messaging_application.py
MessagingServer._handle_connection
python
def _handle_connection(self) -> None: if not self.hasPendingConnections(): return if self.current_connection is not None: logging.debug( f"Received new connection, but we're already handling another: " f"0x{id(self.current_connection):x}" ) self.current_connection = socket = self.nextPendingConnection() logging.debug(f"Client connected to server: 0x{id(socket):x}") if socket.canReadLine(): logging.debug("immediate _handle_ready_read") self._handle_ready_read() else: logging.debug("connect _handle_ready_read") socket.readyRead.connect(self._handle_ready_read) if socket.error() not in [ socket.UnknownSocketError, socket.PeerClosedError ]: logging.debug("immediate _handle_socket_error") self._handle_socket_error(socket.error()) else: logging.debug("connect _handle_socket_error") socket.error.connect(self._handle_socket_error) if socket.state() == QLocalSocket.UnconnectedState: logging.debug("immediate _handle_disconnect") self._handle_disconnect() else: logging.debug("connect _handle_disconnect") socket.disconnected.connect(self._handle_disconnect)
Called when a new connection is made to the server. Connects handler functions to the different events that can occur with a socket
https://github.com/aotuai/brainframe-qt/blob/23b47af6b6da448439288624f6b15515e79ee8d0/brainframe_qt/ui/resources/ui_elements/applications/messaging_application.py#L177-L219
import logging import typing from typing import NewType, Dict, Optional try: from PyQt5 import sip except ImportError: import sip from PyQt5.QtCore import pyqtSignal, QObject from PyQt5.QtNetwork import QLocalServer, QLocalSocket, QAbstractSocket from PyQt5.QtWidgets import QApplication IntraInstanceMessage = NewType("IntraInstanceMessage", str) class MessagingApplication(QApplication): class BaseMessagingError(Exception): pass class UnknownMessageError(BaseMessagingError): pass def __init__(self, *, socket_name: str, force_client=False): super().__init__([]) self._known_messages: Dict[IntraInstanceMessage, pyqtSignal] = {} if force_client: self.message_server = None else: self.message_server = self._init_message_server(socket_name) self.message_socket = ( self._init_message_socket(socket_name) if self.message_server is None else None ) self.__init_signals() def _init_message_server(self, socket_name: str, retries: int = 0) -> Optional["MessagingServer"]: if MessagingSocket.is_server_alive(socket_name): return None message_server = MessagingServer(socket_name=socket_name, parent=self) if message_server.serverError() == QAbstractSocket.AddressInUseError: if retries > 3: logging.error(f"Max retries reached. Giving up on socket takeover") return None logging.warning(f"Socket for IPC is open, but connection was refused. " f"Attempting takeover.") MessagingServer.removeServer(socket_name) message_server = self._init_message_server(socket_name, retries=retries + 1) if message_server: logging.info("IPC server socket takeover successful") else: logging.error("Unable to perform takeover on socket") return message_server def _init_message_socket(self, server_name: str) -> "MessagingSocket": message_socket = MessagingSocket(server_name=server_name, parent=self) return message_socket def __init_signals(self) -> None: if self.is_server: self.message_server.new_message.connect(self._handle_message) @property def is_server(self) -> bool: return self.message_server is not None @property def is_client(self) -> bool: return self.message_socket is not None def register_new_message( self, message_str: str, signal: pyqtSignal ) -> IntraInstanceMessage: message = IntraInstanceMessage(message_str) self._known_messages[message] = signal return message def _get_message_signal(self, message: IntraInstanceMessage) -> pyqtSignal: message_signal = self._known_messages.get(message) if message_signal is not None: return message_signal else: raise self.UnknownMessageError(f"Unknown message {message}") def _handle_message(self, message: IntraInstanceMessage) -> None: try: message_signal = self._get_message_signal(message) except self.UnknownMessageError: logging.warning(f"Received unknown IPC message: {message}") return message_signal.emit() class MessagingServer(QLocalServer): class MessageReadTimeoutError(MessagingApplication.BaseMessagingError): pass class UnknownSocketError(MessagingApplication.BaseMessagingError): pass new_message = pyqtSignal(str) def __init__(self, *, socket_name: str, parent: QObject): super().__init__(parent) self.current_connection: Optional[QLocalSocket] = None self.previous_connection: Optional[QLocalSocket] = None self.newConnection.connect(self._handle_connection) self.listen(socket_name) def _get_socket(self) -> Optional[QLocalSocket]: if self.current_connection is not None: socket = self.current_connection elif self.previous_connection is not None: logging.debug( "Attempted to get current socket, but current socket was None. Using " "previous socket" ) socket = self.previous_connection else: logging.warning( "Attempted to retrieve current socket, but there is no current socket" ) return None if sip.isdeleted(socket): logging.error("Attempted to retrieve deleted socket") return None return socket
BSD 3-Clause New or Revised License
diefenbach/django-lfs
lfs/manage/manufacturers/views.py
manufacturer_view
python
def manufacturer_view(request, manufacturer_id, template_name="manage/manufacturers/view.html"): manufacturer = lfs_get_object_or_404(Manufacturer, pk=manufacturer_id) if request.method == "POST": form = ViewForm(instance=manufacturer, data=request.POST) if form.is_valid(): form.save() message = _(u"View data has been saved.") else: message = _(u"Please correct the indicated errors.") else: form = ViewForm(instance=manufacturer) view_html = render_to_string(template_name, request=request, context={ "manufacturer": manufacturer, "form": form, }) if request.is_ajax(): html = [["#view", view_html]] return HttpResponse(json.dumps({ "html": html, "message": message, }, cls=LazyEncoder), content_type='application/json') else: return view_html
Displays the view data for the manufacturer with passed manufacturer id. This is used as a part of the whole category form.
https://github.com/diefenbach/django-lfs/blob/3bbcb3453d324c181ec68d11d5d35115a60a2fd5/lfs/manage/manufacturers/views.py#L82-L111
import json from django.contrib.auth.decorators import permission_required from django.urls import reverse from django.http import HttpResponse from django.http import HttpResponseRedirect from django.shortcuts import render from django.template.loader import render_to_string from django.utils.translation import ugettext_lazy as _ from django.views.decorators.http import require_POST from django.views.decorators.cache import never_cache from lfs.caching.utils import lfs_get_object_or_404 from lfs.catalog.settings import STANDARD_PRODUCT from lfs.catalog.settings import PRODUCT_WITH_VARIANTS from lfs.catalog.models import Category from lfs.catalog.models import Product from lfs.core.utils import LazyEncoder from lfs.manage.manufacturers.forms import ManufacturerDataForm, ManufacturerAddForm from lfs.manufacturer.models import Manufacturer from lfs.manage.manufacturers.forms import ViewForm from lfs.manage.seo.views import SEOView import logging logger = logging.getLogger(__name__) @permission_required("core.manage_shop") def manage_manufacturer(request, manufacturer_id, template_name="manage/manufacturers/manufacturer.html"): manufacturer = Manufacturer.objects.get(pk=manufacturer_id) categories = [] for category in Category.objects.filter(parent=None): checked, klass = _get_category_state(manufacturer, category) categories.append({ "id": category.id, "name": category.name, "checked": checked, "klass": klass, }) return render(request, template_name, { "categories": categories, "manufacturer": manufacturer, "manufacturer_id": manufacturer_id, "selectable_manufacturers_inline": selectable_manufacturers_inline(request, manufacturer_id), "manufacturer_data_inline": manufacturer_data_inline(request, manufacturer_id), "seo": SEOView(Manufacturer).render(request, manufacturer), "view": manufacturer_view(request, manufacturer_id), }) @permission_required("core.manage_shop") def no_manufacturers(request, template_name="manage/manufacturers/no_manufacturers.html"): return render(request, template_name, {}) def manufacturer_data_inline(request, manufacturer_id, template_name="manage/manufacturers/manufacturer_data_inline.html"): manufacturer = Manufacturer.objects.get(pk=manufacturer_id) if request.method == "POST": form = ManufacturerDataForm(instance=manufacturer, data=request.POST) else: form = ManufacturerDataForm(instance=manufacturer) return render_to_string(template_name, request=request, context={ "manufacturer": manufacturer, "form": form, }) @permission_required("core.manage_shop")
BSD 3-Clause New or Revised License
keon/algorithms
algorithms/maths/rsa.py
generate_key
python
def generate_key(k, seed=None): def modinv(a, m): b = 1 while not (a * b) % m == 1: b += 1 return b def gen_prime(k, seed=None): def is_prime(num): if num == 2: return True for i in range(2, int(num ** 0.5) + 1): if num % i == 0: return False return True random.seed(seed) while True: key = random.randrange(int(2 ** (k - 1)), int(2 ** k)) if is_prime(key): return key p_size = k / 2 q_size = k - p_size e = gen_prime(k, seed) while True: p = gen_prime(p_size, seed) if p % e != 1: break while True: q = gen_prime(q_size, seed) if q % e != 1: break n = p * q l = (p - 1) * (q - 1) d = modinv(e, l) return int(n), int(e), int(d)
the RSA key generating algorithm k is the number of bits in n
https://github.com/keon/algorithms/blob/a9e57d459557f0bcd2bad1e8fac302ab72d34fe8/algorithms/maths/rsa.py#L27-L78
import random
MIT License
facebookresearch/mtrl
mtrl/agent/components/critic.py
QFunction._make_trunk
python
def _make_trunk( self, obs_dim: int, action_dim: int, hidden_dim: int, output_dim: int, num_layers: int, multitask_cfg: ConfigType, ) -> ModelType: if ( "critic_cfg" in multitask_cfg and multitask_cfg.critic_cfg and "moe_cfg" in multitask_cfg.critic_cfg and multitask_cfg.critic_cfg.moe_cfg.should_use ): moe_cfg = multitask_cfg.critic_cfg.moe_cfg if moe_cfg.mode == "soft_modularization": trunk = SoftModularizedMLP( num_experts=moe_cfg.num_experts, in_features=obs_dim, out_features=output_dim, num_layers=2, hidden_features=hidden_dim, bias=True, ) else: raise NotImplementedError( f"""`moe_cfg.mode` = {moe_cfg.mode} is not implemented.""" ) else: trunk = agent_utils.build_mlp( input_dim=obs_dim + action_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers, ) return trunk
Make the tunk for the Q-function. Args: obs_dim (int): size of the observation. action_dim (int): size of the action vector. hidden_dim (int): size of the hidden layer of the trunk. output_dim (int): size of the output. num_layers (int): number of layers in the model. multitask_cfg (ConfigType): config for encoding the multitask knowledge. Returns: ModelType:
https://github.com/facebookresearch/mtrl/blob/184c7d39db21acc505cf7094ed87cd28a1735105/mtrl/agent/components/critic.py#L98-L149
from typing import List, Tuple import torch from torch import nn from mtrl.agent import utils as agent_utils from mtrl.agent.components import base as base_component from mtrl.agent.components import encoder, moe_layer from mtrl.agent.components.actor import ( check_if_should_use_multi_head_policy, check_if_should_use_task_encoder, ) from mtrl.agent.components.soft_modularization import SoftModularizedMLP from mtrl.agent.ds.mt_obs import MTObs from mtrl.agent.ds.task_info import TaskInfo from mtrl.utils.types import ConfigType, ModelType, TensorType class QFunction(base_component.Component): def __init__( self, obs_dim: int, action_dim: int, hidden_dim: int, num_layers: int, multitask_cfg: ConfigType, ): super().__init__() self.should_condition_model_on_task_info = False self.should_condition_encoder_on_task_info = True if "critic_cfg" in multitask_cfg and multitask_cfg.critic_cfg: self.should_condition_model_on_task_info = ( multitask_cfg.critic_cfg.should_condition_model_on_task_info ) self.should_condition_encoder_on_task_info = ( multitask_cfg.critic_cfg.should_condition_encoder_on_task_info ) self.should_use_multi_head_policy = check_if_should_use_multi_head_policy( multitask_cfg=multitask_cfg ) self.model = self.build_model( obs_dim=obs_dim, action_dim=action_dim, hidden_dim=hidden_dim, num_layers=num_layers, multitask_cfg=multitask_cfg, ) if self.should_condition_model_on_task_info: self.obs_action_projection_layer = nn.Linear( in_features=obs_dim + action_dim, out_features=obs_dim ) def _make_head( self, input_dim: int, hidden_dim: int, num_layers: int, multitask_cfg: ConfigType, ) -> ModelType: return moe_layer.FeedForward( num_experts=multitask_cfg.num_envs, in_features=input_dim, out_features=1, hidden_features=hidden_dim, num_layers=num_layers, bias=True, )
MIT License
nestauk/nesta
nesta/core/batchables/nih/nih_dedupe/run.py
extract_yearly_funds
python
def extract_yearly_funds(src): year = get_value(src, 'year_fiscal_funding') cost_ref = get_value(src, 'cost_total_project') start_date = get_value(src, 'date_start_project') end_date = get_value(src, 'date_end_project') yearly_funds = [] if year is not None: yearly_funds = [{'year':year, 'cost_ref': cost_ref, 'start_date': start_date, 'end_date': end_date}] return yearly_funds
Extract yearly funds
https://github.com/nestauk/nesta/blob/f0abf0b19a4b0c6c9799b3afe0bd67310122b705/nesta/core/batchables/nih/nih_dedupe/run.py#L31-L42
import logging from nesta.core.luigihacks.elasticsearchplus import ElasticsearchPlus from nesta.core.orms.orm_utils import load_json_from_pathstub from collections import Counter import json import boto3 import os import numpy as np import time def get_value(obj, key): try: return obj[key] except KeyError: return
MIT License
jason-ash/pyesg
pyesg/stochastic_process.py
StochasticProcess.standard_deviation
python
def standard_deviation(self, x0: Array, dt: float) -> np.ndarray: return self.diffusion(x0=x0) * dt ** 0.5
Returns the standard deviation of the stochastic process using the Euler Discretization method
https://github.com/jason-ash/pyesg/blob/95183b3f8e6d37797653bb672a69a1af8a01c1f4/pyesg/stochastic_process.py#L92-L97
from abc import ABC, abstractmethod from typing import Dict, Tuple import numpy as np from scipy import stats from scipy.stats._distn_infrastructure import rv_continuous from pyesg.utils import check_random_state, to_array, Array, RandomState class StochasticProcess(ABC): def __init__(self, dim: int = 1, dW: rv_continuous = stats.norm) -> None: self.dW = dW self.dim = dim def __repr__(self) -> str: params = (f"{k}={repr(v)}" for k, v in self.coefs().items()) return f"<pyesg.{self.__class__.__qualname__}({', '.join(params)})>" @abstractmethod def _apply(self, x0: np.ndarray, dx: np.ndarray) -> np.ndarray: @abstractmethod def _drift(self, x0: np.ndarray) -> np.ndarray: @abstractmethod def _diffusion(self, x0: np.ndarray) -> np.ndarray: @abstractmethod def coefs(self) -> Dict[str, np.ndarray]: @classmethod @abstractmethod def example(cls) -> "StochasticProcess": def apply(self, x0: Array, dx: Array) -> np.ndarray: return self._apply(x0=to_array(x0), dx=to_array(dx)) def drift(self, x0: Array) -> np.ndarray: return self._drift(x0=to_array(x0)) def diffusion(self, x0: Array) -> np.ndarray: return self._diffusion(x0=to_array(x0)) def expectation(self, x0: Array, dt: float) -> np.ndarray: return self.apply(to_array(x0), self.drift(x0=x0) * dt)
MIT License
googleads/google-ads-python
google/ads/googleads/v7/services/services/customer_client_service/client.py
CustomerClientServiceClient.get_customer_client
python
def get_customer_client( self, request: customer_client_service.GetCustomerClientRequest = None, *, resource_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> customer_client.CustomerClient: if request is not None and any([resource_name]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance( request, customer_client_service.GetCustomerClientRequest ): request = customer_client_service.GetCustomerClientRequest(request) if resource_name is not None: request.resource_name = resource_name rpc = self._transport._wrapped_methods[ self._transport.get_customer_client ] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("resource_name", request.resource_name),) ), ) response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) return response
r"""Returns the requested client in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Args: request (:class:`google.ads.googleads.v7.services.types.GetCustomerClientRequest`): The request object. Request message for [CustomerClientService.GetCustomerClient][google.ads.googleads.v7.services.CustomerClientService.GetCustomerClient]. resource_name (:class:`str`): Required. The resource name of the client to fetch. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v7.resources.types.CustomerClient: A link between the given customer and a client customer. CustomerClients only exist for manager customers. All direct and indirect client customers are included, as well as the manager itself.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v7/services/services/customer_client_service/client.py#L363-L447
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib from google.api_core import exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.ads.googleads.v7.resources.types import customer_client from google.ads.googleads.v7.services.types import customer_client_service from .transports.base import CustomerClientServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import CustomerClientServiceGrpcTransport class CustomerClientServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = CustomerClientServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[CustomerClientServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class CustomerClientServiceClient(metaclass=CustomerClientServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> CustomerClientServiceTransport: return self._transport @staticmethod def customer_path(customer_id: str,) -> str: return "customers/{customer_id}".format(customer_id=customer_id,) @staticmethod def parse_customer_path(path: str) -> Dict[str, str]: m = re.match(r"^customers/(?P<customer_id>.+?)$", path) return m.groupdict() if m else {} @staticmethod def customer_client_path(customer_id: str, client_customer_id: str,) -> str: return "customers/{customer_id}/customerClients/{client_customer_id}".format( customer_id=customer_id, client_customer_id=client_customer_id, ) @staticmethod def parse_customer_client_path(path: str) -> Dict[str, str]: m = re.match( r"^customers/(?P<customer_id>.+?)/customerClients/(?P<client_customer_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path ) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[credentials.Credentials] = None, transport: Union[str, CustomerClientServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool( util.strtobool( os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") ) ) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) if isinstance(transport, CustomerClientServiceTransport): if credentials: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = CustomerClientServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, )
Apache License 2.0
geophysics-ubonn/reda
lib/reda/utils/filter_config_types.py
_sort_dd_skips
python
def _sort_dd_skips(configs, dd_indices_all): config_current_skips = np.abs(configs[:, 1] - configs[:, 0]) if np.all(np.isnan(config_current_skips)): return {0: []} available_skips_raw = np.unique(config_current_skips) available_skips = available_skips_raw[ ~np.isnan(available_skips_raw) ].astype(int) dd_configs_sorted = {} for skip in available_skips: indices = np.where(config_current_skips == skip)[0] dd_configs_sorted[skip - 1] = dd_indices_all[indices] return dd_configs_sorted
Given a set of dipole-dipole configurations, sort them according to their current skip. Parameters ---------- configs: Nx4 numpy.ndarray Dipole-Dipole configurations Returns ------- dd_configs_sorted: dict dictionary with the skip as keys, and arrays/lists with indices to these skips.
https://github.com/geophysics-ubonn/reda/blob/5be52ecb184f45f0eabb23451f039fec3d9537c5/lib/reda/utils/filter_config_types.py#L158-L189
import numpy as np import pandas as pd def _filter_schlumberger(configs): configs_sorted = np.hstack(( np.sort(configs[:, 0:2], axis=1), np.sort(configs[:, 2:4], axis=1), )).astype(int) MN = configs_sorted[:, 2:4].copy() MN_unique = np.unique( MN.view( MN.dtype.descr * 2 ) ) MN_unique_reshape = MN_unique.view( MN.dtype ).reshape(-1, 2) schl_indices_list = [] for mn in MN_unique_reshape: nr_current_binary = ( (configs_sorted[:, 2] == mn[0]) & (configs_sorted[:, 3] == mn[1]) ) if len(np.where(nr_current_binary)[0]) < 2: continue nr_left_right = ( (configs_sorted[:, 0] < mn[0]) & (configs_sorted[:, 1] > mn[0]) & nr_current_binary ) distance_left = np.abs( configs_sorted[nr_left_right, 0] - mn[0] ).squeeze() distance_right = np.abs( configs_sorted[nr_left_right, 1] - mn[1] ).squeeze() nr_equal_distances = np.where(distance_left == distance_right)[0] indices = np.where(nr_left_right)[0][nr_equal_distances] if indices.size > 2: schl_indices_list.append(indices) if len(schl_indices_list) == 0: return configs, {0: np.array([])} else: schl_indices = np.hstack(schl_indices_list).squeeze() configs[schl_indices, :] = np.nan return configs, {0: schl_indices} def _filter_dipole_dipole(configs): dist_ab = np.abs(configs[:, 0] - configs[:, 1]) dist_mn = np.abs(configs[:, 2] - configs[:, 3]) distances_equal = (dist_ab == dist_mn) not_overlapping = ( ( (configs[:, 0] < configs[:, 2]) & (configs[:, 1] < configs[:, 2]) & (configs[:, 0] < configs[:, 3]) & (configs[:, 1] < configs[:, 3]) ) | ( (configs[:, 2] < configs[:, 0]) & (configs[:, 3] < configs[:, 0]) & (configs[:, 2] < configs[:, 1]) & (configs[:, 3] < configs[:, 1]) ) ) is_dipole_dipole = (distances_equal & not_overlapping) dd_indices = np.where(is_dipole_dipole)[0] dd_indices_sorted = _sort_dd_skips(configs[dd_indices, :], dd_indices) configs[dd_indices, :] = np.nan return configs, dd_indices_sorted
MIT License
dropbox/dropboxbusinessscripts
Sharing/ListSharedFolderMembers.py
SharedFolderLoader.__init__
python
def __init__(self, context): self.context = context
:type context: AsMemberContext
https://github.com/dropbox/dropboxbusinessscripts/blob/4f4c32ddd488b29e7fd16a40966761e70a758239/Sharing/ListSharedFolderMembers.py#L343-L347
import sys import argparse from dropbox import Dropbox from dropbox import DropboxTeam from dropbox.sharing import GroupMembershipInfo from dropbox.sharing import InviteeMembershipInfo from dropbox.sharing import SharedFolderMetadata from dropbox.sharing import SharedFolderMembers from dropbox.sharing import UserMembershipInfo from dropbox.team import TeamMemberInfo from dropbox.team import MembersGetInfoItem from dropbox.users import BasicAccount reload(sys) sys.setdefaultencoding('UTF8') class TraceEntity(object): def __init__(self, event): import time import traceback self.event = event self.time = time.time() self.trace = traceback.format_stack() class TraceRecorder(object): def __init__(self): self.traces = [] def log(self, event): self.traces.append(TraceEntity(event)) trace_recorder = TraceRecorder() class Entity(object): def __init__(self, identity): self.__identity = identity def identity(self): return self.__identity def __hash__(self): return hash(self.__identity) def __eq__(self, other): return self.identity() == other.identity() class Context(object): class EmptyContext(Context): class AsMemberContext(Context): def __init__(self, dropbox_team, member): self.team = dropbox_team self.member = member self.as_user = dropbox_team.as_user(self.member.identity()) def as_user(self): return self.as_user class EntityNotFoundException(Exception): def __init__(self, identity): self.identity = identity class Loader(object): def all_entities(self): raise NotImplementedError class CachedLoader(Loader): def __init__(self, loader): self.loader = loader self.cache = None def all_entities(self): if self.cache: return self.cache self.cache = self.loader.all_entities() return self.cache class Resolver(object): def resolve(self, identity, context): pass class CachedResolver(Resolver): def __init__(self, resolver): self.__resolver = resolver self.__cache = {} def resolve(self, identity, context): if identity in self.__cache: e = self.__cache[identity] if e is None: raise EntityNotFoundException(identity) else: return e try: e = self.__resolver.resolve(identity, context) self.__cache[identity] = e return e except EntityNotFoundException as ex: self.__cache[identity] = None raise ex class Member(Entity): def __init__(self, member_info): super(Member, self).__init__(member_info.profile.team_member_id) self.member_info = member_info def member_info(self): return self.member_info def email(self): return self.member_info.profile.email def status(self): if self.member_info.profile.status.is_active(): return 'active' elif self.member_info.profile.status.is_invited(): return 'invited' elif self.member_info.profile.status.is_suspended(): return 'suspended' elif self.email().endswith('#'): return 'deleted' else: return 'unknown' class MemberResolver(Resolver): def __init__(self, dropbox_team): self.team = dropbox_team def resolve(self, identity, context): from dropbox.team import UserSelectorArg q = UserSelectorArg.email(identity) m = self.team.team_members_get_info([q]) if len(m) < 1: raise EntityNotFoundException(identity) elif m[0].is_member_info(): return Member(m[0].get_member_info()) else: raise EntityNotFoundException(identity) class MemberLoader(Loader): def __init__(self, team): self.team = team def __load_team_members(self): chunk = self.team.team_members_list() if chunk.has_more: more = self.__load_more_team_members(chunk.cursor) return chunk.members + more else: return chunk.members def __load_more_team_members(self, cursor): chunk = self.team.team_members_list_continue(cursor) if chunk.has_more: more = self.__load_more_team_members(chunk.cursor) return chunk.members + more else: return chunk.members def all_entities(self): return [Member(m) for m in self.__load_team_members()] class MemberResolverLoader(Resolver, Loader): def __init__(self, member_loader): self.member_loader = member_loader def resolve(self, identity, context): members = self.member_loader.all_entities() account_id_to_member = {m.identity(): m for m in members} if identity in account_id_to_member: return account_id_to_member[identity] else: raise EntityNotFoundException(identity) def resolve_by_email(self, email): members = self.member_loader.all_entities() email_to_member = {m.email(): m for m in members} if email in email_to_member: return email_to_member[email] else: raise EntityNotFoundException(email) def all_entities(self): return self.member_loader.all_entities() class SharedFolder(Entity): def __init__(self, shared_folder, context): super(SharedFolder, self).__init__(shared_folder.shared_folder_id) self.shared_folder = shared_folder self.context = context def members(self): return self.__load_shared_folder_members(self.shared_folder.shared_folder_id) def __load_shared_folder_members(self, shared_folder_id): chunk = self.context.as_user.sharing_list_folder_members(shared_folder_id) if chunk.cursor: more = self.__load_more_shared_folder_members(chunk.cursor) return self.__merge_shared_folder_members(chunk, more) else: return chunk def __load_more_shared_folder_members(self, cursor): chunk = self.context.as_user.sharing_list_folder_members_continue(cursor) if chunk.cursor: more = self.__load_more_shared_folder_members(chunk.cursor) return self.__merge_shared_folder_members(chunk, more) else: return chunk def __merge_shared_folder_members(self, a, b): def f(x): return [] if x is None else x def g(x, y): return f(x) + f(y) return SharedFolderMembers( users=g(a.users, b.users), groups=g(a.groups, b.groups), invitees=g(a.invitees, b.invitees), cursor=None ) class SharedFolderLoader(Loader):
Apache License 2.0
ebellocchia/py_crypto_hd_wallet
py_crypto_hd_wallet/monero/hd_wallet_monero_keys.py
HdWalletMoneroKeys.__FromMoneroObj
python
def __FromMoneroObj(self, monero_obj: Monero) -> None: self.__SetKeyData(HdWalletMoneroKeyTypes.PUB_SPEND, monero_obj.PublicSpendKey().RawCompressed().ToHex()) self.__SetKeyData(HdWalletMoneroKeyTypes.PUB_VIEW, monero_obj.PublicViewKey().RawCompressed().ToHex()) self.__SetKeyData(HdWalletMoneroKeyTypes.PRIV_VIEW, monero_obj.PrivateViewKey().Raw().ToHex()) if not monero_obj.IsWatchOnly(): self.__SetKeyData(HdWalletMoneroKeyTypes.PRIV_SPEND, monero_obj.PrivateSpendKey().Raw().ToHex()) self.__SetKeyData(HdWalletMoneroKeyTypes.PRIMARY_ADDRESS, monero_obj.PrimaryAddress())
Create keys from the specified Monero object. Args: monero_obj (Monero object): Monero object
https://github.com/ebellocchia/py_crypto_hd_wallet/blob/a48aeb1e7fae9c6cdad781079c39aaae12668a6e/py_crypto_hd_wallet/monero/hd_wallet_monero_keys.py#L124-L143
from __future__ import annotations import json from typing import Dict, Optional from bip_utils import Monero from py_crypto_hd_wallet.monero.hd_wallet_monero_enum import HdWalletMoneroKeyTypes class HdWalletMoneroKeysConst: KEY_TYPE_TO_DICT_KEY: Dict[HdWalletMoneroKeyTypes, str] = { HdWalletMoneroKeyTypes.PRIV_SPEND: "priv_spend", HdWalletMoneroKeyTypes.PRIV_VIEW: "priv_view", HdWalletMoneroKeyTypes.PUB_SPEND: "pub_spend", HdWalletMoneroKeyTypes.PUB_VIEW: "pub_view", HdWalletMoneroKeyTypes.PRIMARY_ADDRESS: "primary_address", } class HdWalletMoneroKeys: m_key_data: Dict[str, str] def __init__(self, monero_obj: Monero) -> None: self.m_key_data = {} self.__FromMoneroObj(monero_obj) def ToDict(self) -> Dict[str, str]: return self.m_key_data def ToJson(self, json_indent: int = 4) -> str: return json.dumps(self.ToDict(), indent=json_indent) def HasKey(self, key_type: HdWalletMoneroKeyTypes) -> bool: if not isinstance(key_type, HdWalletMoneroKeyTypes): raise TypeError("Key type is not an enumerative of HdWalletMoneroKeyTypes") dict_key = HdWalletMoneroKeysConst.KEY_TYPE_TO_DICT_KEY[key_type] return dict_key in self.m_key_data def GetKey(self, key_type: HdWalletMoneroKeyTypes) -> Optional[str]: if self.HasKey(key_type): return self.m_key_data[HdWalletMoneroKeysConst.KEY_TYPE_TO_DICT_KEY[key_type]] return None
MIT License
man-group/mdf
mdf/builders/basic.py
_get_labels
python
def _get_labels(node, label=None, value=None): if value is not None: if label is None: label = _get_labels(node)[0] if isinstance(value, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)): if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)): label = list(label) if len(label) < len(value): label += ["%s.%d" % (label, i) for i in xrange(len(label), len(value))] return label[:len(value)] if isinstance(value, pa.Series): return ["%s.%s" % (label, c) for c in value.index] return ["%s.%d" % (label, i) for i in xrange(len(value))] if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)): return list(label[:1]) return [label] if label is not None: if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)): return list(label[:1]) return [label] if isinstance(node, MDFNode): return [node.name.split(".").pop()] return [str(node)]
returns a list of lables the same length as value, if value is a list (or of length 1 if value is not a list) If label is supplied that will be used as the base (eg x.0...x.N) or if it's a list it will be padded to the correct length and returned.
https://github.com/man-group/mdf/blob/4b2c78084467791ad883c0b4c53832ad70fc96ef/mdf/builders/basic.py#L18-L59
import numpy as np import pandas as pa from ..nodes import MDFNode, MDFEvalNode from collections import deque, defaultdict import datetime import operator import csv import matplotlib.pyplot as pp import sys import types if sys.version_info[0] > 2: basestring = str
MIT License
erenbalatkan/depthvisualizer
DepthVisualizer/DepthVisualizer.py
Utils.read_kitti_3d_object
python
def read_kitti_3d_object(path, convert_format=True): objects = [] with open(path, "r") as f: for line in f: object_label = line.split(" ")[0] if not (object_label == "DontCare"): object_data = [x.rstrip() for x in line.split(" ")] object_data[1:] = [float(x) for x in object_data[1:]] objects.append(object_data) if convert_format: objects = Utils.convert_objects_from_kitti_format(objects) return objects
Reads kitti 3d Object Labels :param path: Path of the label.txt file :param convert_format: If True, object format will be converted to the DepthVisualizer format which is on the following form [Type, Truncation, Occlusion, Observing Angle, 2D Left, 2D Top, 2D Right, 2D Bottom, 3D X, 3D Y, 3D Z, 3D Width, 3D Height, 3D Length, 3D Rotation] :return: A List of objects on either Kitti or DepthVisualizer format, based on convert_format argument
https://github.com/erenbalatkan/depthvisualizer/blob/f34ea92bf5ee037520ef16371bca5d055b778238/DepthVisualizer/DepthVisualizer.py#L289-L310
import glfw import OpenGL.GL as GL from OpenGL.GL import shaders from OpenGL.arrays import vbo import math import glm import ctypes import time import numpy as np from PIL import Image vertex_shader_source = "#version 330 core\n" + "uniform mat4 view;\n" + "uniform mat4 projection;\n" + "layout (location = 0) in vec3 aPos;\n" + "layout (location = 1) in vec3 aColor;\n" + "out vec3 vertexColor;\n" + "void main()\n" + "{\n" + " gl_Position = projection * view * vec4(aPos, 1.0);\n" + " vertexColor = aColor;" + "}" fragment_shader_source = "#version 330 core\n" + "out vec4 FragColor;\n" + "in vec3 vertexColor;\n" + "void main()\n" + "{\n" + " FragColor = vec4(vertexColor, 1);\n" + "}" class Utils: def __init__(self): pass @staticmethod def convert_depth_pixel_to_point(x, y, depth, focal_length_in_pixels, principal_point , rgb=[255, 255, 255], is_depth_along_z=True): rgb = list(rgb) x_dist = x - principal_point[1] y_dist = y - principal_point[0] z = depth if not is_depth_along_z: pixel_dist = (x_dist ** 2 + y_dist ** 2) ** 0.5 focal_target = (focal_length_in_pixels ** 2 + pixel_dist ** 2) ** 0.5 z = depth * focal_length_in_pixels / focal_target x = x_dist / focal_length_in_pixels * z y = y_dist / focal_length_in_pixels * z return [x, -y, z] + rgb @staticmethod def convert_point_to_pixel(point, focal_length_in_pixels, principal_point): x_dist = point[0] / point[2] * focal_length_in_pixels y_dist = point[1] / point[2] * focal_length_in_pixels y_coord = principal_point[0] - y_dist x_coord = principal_point[1] + x_dist return [y_coord, x_coord] + point[2:] @staticmethod def read_kitti_calibration(path): calib_data = {} with open(path, 'r') as f: for line in f.readlines(): if ':' in line: key, value = line.split(":", 1) calib_data[key] = np.array([float(x) for x in value.split()]) R0 = calib_data['P0'].reshape(3, 4) focal_length = R0[1, 1] principal_point = R0[1, 2], R0[0, 2] calib_data["focal_length"] = focal_length calib_data["principal_point"] = principal_point return calib_data @staticmethod def convert_depthmap_to_points(depth_map, focal_length_in_pixels=None, principal_point=None, rgb_image=None, is_depth_along_z=True): if focal_length_in_pixels is None: focal_length_in_pixels = 715 if principal_point is None: principal_point = [depth_map.shape[0] / 2, depth_map.shape[1] / 2] if depth_map.shape[0] == 1: depth_map = np.swapaxes(depth_map, 0, 1).swapaxes(1, 2) points = np.ones(shape=(depth_map.shape[0] * depth_map.shape[1], 6)) if rgb_image is None: points[:, 3:6] = [0.5, 0.7, 1] else: if rgb_image.shape[0] == 3: rgb_image = np.swapaxes(rgb_image, 0, 1).swapaxes(1, 2) rgb_image = rgb_image.reshape(-1, 3) points[:, 3:6] = rgb_image / 256.0 y, x = np.meshgrid(np.arange(0, depth_map.shape[1]), np.arange(0, depth_map.shape[0])) yx_coordinates = np.array([x.flatten(), y.flatten()], np.float32).T yx_coordinates += -1 * np.array(principal_point) yx_coordinates = np.flip(yx_coordinates, 1) points[:, 0:2] = yx_coordinates points[:, 2] = depth_map.flatten() pixel_dist = (points[:, 0] ** 2 + points[:, 1] ** 2) ** 0.5 focal_target_dist = (focal_length_in_pixels ** 2 + pixel_dist ** 2) ** 0.5 if not is_depth_along_z: points[:, 2] = points[:, 2] * focal_length_in_pixels / focal_target_dist points[:, 0] = points[:, 0] * points[:, 2] / focal_length_in_pixels points[:, 1] = points[:, 1] * points[:, 2] / focal_length_in_pixels points[:, 1] *= -1 return points @staticmethod def read_kitti_point_cloud(path, calib, color=[255, 255, 255]): color = np.array(color) / 256.0 points = np.fromfile(path, dtype=np.float32).reshape(-1, 4) points = points[:, :3] new_points = np.ones(shape=(len(points), 4)) new_points[:, :3] = points Tr_velo_to_cam = np.zeros((4, 4)) Tr_velo_to_cam[3, 3] = 1 Tr_velo_to_cam[:3, :4] = calib['Tr_velo_to_cam'].reshape(3, 4) converted_points = Tr_velo_to_cam.dot(new_points.T).T point_cloud = np.zeros((points.shape[0], 6)) point_cloud[:, :3] = converted_points[:, :3] point_cloud[:, 1] *= -1 point_cloud[:, 3:] = color return point_cloud @staticmethod def read_depth_map(path): depth_map = np.asarray(Image.open(path), np.float32) depth_map = np.expand_dims(depth_map, axis=2) / 256.0 return depth_map @staticmethod def convert_objects_from_kitti_format(objects): converted_objects = [] for object in objects: object = object.copy() object_data_array = np.array([float(x) for x in object[8:]], np.float32) object[8 + 0:8 + 3] = object_data_array[0 + 3:3 + 3] object[8 + 1] *= -1 object[11:14] = object_data_array[0:3] object[11], object[12] = object[12], object[11] object[8 + 1] += object[12] / 2 object[14] += math.radians(90) converted_objects.append(object) return converted_objects @staticmethod def convert_objects_to_kitti_format(objects): converted_objects = [] for object in objects.copy(): object_clone = object.copy() object_clone[8 + 1] -= object[12] / 2 object_clone[8 + 1] *= -1 object[11], object[12] = object[12], object[11] object_clone[11:14] = object_clone[8:11] object_clone[8:11] = object[11:14] object_clone[14] -= math.radians(90) converted_objects.append(object_clone) return converted_objects @staticmethod def convert_points_to_voxel_map(points, voxel_map_center, voxel_map_size, voxel_size): voxel_map_size = np.asarray(np.ceil(np.array(voxel_map_size) / voxel_size), np.int32) center_x, center_y, center_z = voxel_map_center x_begin, x_end = [center_x + sign * 0.5 * voxel_map_size[0] * voxel_size for sign in [-1, 1]] y_begin, y_end = [center_y + sign * 0.5 * voxel_map_size[1] * voxel_size for sign in [-1, 1]] z_begin, z_end = [center_z + sign * 0.5 * voxel_map_size[2] * voxel_size for sign in [-1, 1]] voxel_map = np.zeros(shape=(*voxel_map_size, 7)) for point in points: x, y, z, r, g, b = point if x_begin < x < x_end and y_begin < y < y_end and z_begin < z < z_end: voxel_map[math.floor((x - x_begin) / voxel_size), math.floor((y - y_begin) / voxel_size), math.floor((z - z_begin) / voxel_size)] += [r, g, b, x, y, z, 1] voxel_map[:, :, :, :-1] = voxel_map[:, :, :, :-1] / np.expand_dims(np.clip(voxel_map[:, :, :, -1], 1, None), axis=3) return voxel_map @staticmethod
MIT License
edgedb/edgedb
edb/ir/scopetree.py
ScopeTreeNode.mark_as_optional
python
def mark_as_optional(self) -> None: self.optional_count = 0
Indicate that this scope is used as an OPTIONAL argument.
https://github.com/edgedb/edgedb/blob/ab26440d9ff775a55f22e3b93e6f345eefc10f61/edb/ir/scopetree.py#L708-L710
from __future__ import annotations from typing import * if TYPE_CHECKING: from typing_extensions import TypeGuard import textwrap import weakref from edb import errors from edb.common import context as pctx from . import pathid class FenceInfo(NamedTuple): unnest_fence: bool factoring_fence: bool def __or__(self, other: FenceInfo) -> FenceInfo: return FenceInfo( unnest_fence=self.unnest_fence or other.unnest_fence, factoring_fence=self.factoring_fence or other.factoring_fence, ) def has_path_id(nobe: ScopeTreeNode) -> TypeGuard[ScopeTreeNodeWithPathId]: return nobe.path_id is not None class ScopeTreeNode: unique_id: Optional[int] path_id: Optional[pathid.PathId] fenced: bool computable_branch: bool unnest_fence: bool factoring_fence: bool factoring_allowlist: Set[pathid.PathId] optional_count: Optional[int] children: List[ScopeTreeNode] namespaces: Set[pathid.Namespace] _gravestone: Optional[ScopeTreeNode] def __init__( self, *, path_id: Optional[pathid.PathId]=None, fenced: bool=False, computable_branch: bool=False, unique_id: Optional[int]=None, optional: bool=False, ) -> None: self.unique_id = unique_id self.path_id = path_id self.fenced = fenced self.computable_branch = computable_branch self.unnest_fence = False self.factoring_fence = False self.factoring_allowlist = set() self.optional_count = 0 if optional else None self.children = [] self.namespaces = set() self._parent: Optional[weakref.ReferenceType[ScopeTreeNode]] = None self._gravestone = None def __repr__(self) -> str: name = 'ScopeFenceNode' if self.fenced else 'ScopeTreeNode' return (f'<{name} {self.path_id!r} at {id(self):0x}>') @property def forwarded(self) -> ScopeTreeNode: node = self while node._gravestone: node = node._gravestone return node def find_dupe_unique_ids(self) -> Set[int]: seen = set() dupes = set() for node in self.root.descendants: if node.unique_id is not None: if node.unique_id in seen: dupes.add(node.unique_id) seen.add(node.unique_id) return dupes def validate_unique_ids(self) -> None: dupes = self.find_dupe_unique_ids() assert not dupes, f'Duplicate "unique" ids seen {dupes}' @property def name(self) -> str: return self._name(debug=False) def _name(self, debug: bool) -> str: if self.path_id is None: name = ( ('C' if self.computable_branch else '') + ('FENCE' if self.fenced else 'BRANCH') ) else: name = self.path_id.pformat_internal(debug=debug) ocount = self.optional_count return ( f'{name}{" [OPT]" if self.optional else ""}' f'{" ["+str(ocount)+"]" if ocount else ""}' ) def debugname(self, fuller: bool=False) -> str: parts = [f'{self._name(debug=fuller)}'] if self.unique_id: parts.append(f'uid:{self.unique_id}') if self.namespaces: parts.append(','.join(self.namespaces)) if self.unnest_fence: parts.append('no-unnest') if self.factoring_fence: parts.append('no-factor') parts.append(f'0x{id(self):0x}') return ' '.join(parts) @property def optional(self) -> bool: return self.optional_count == 0 @property def fence_info(self) -> FenceInfo: return FenceInfo( unnest_fence=self.unnest_fence, factoring_fence=self.factoring_fence, ) @property def ancestors(self) -> Iterator[ScopeTreeNode]: node: Optional[ScopeTreeNode] = self while node is not None: yield node node = node.parent @property def strict_ancestors(self) -> Iterator[ScopeTreeNode]: node: Optional[ScopeTreeNode] = self.parent while node is not None: yield node node = node.parent @property def ancestors_and_namespaces(self) -> Iterator[Tuple[ScopeTreeNode, FrozenSet[pathid.Namespace]]]: namespaces: FrozenSet[str] = frozenset() node: Optional[ScopeTreeNode] = self while node is not None: namespaces |= node.namespaces yield node, namespaces node = node.parent @property def path_children(self) -> Iterator[ScopeTreeNodeWithPathId]: return ( p for p in self.children if has_path_id(p) ) @property def path_descendants(self) -> Iterator[ScopeTreeNodeWithPathId]: return ( p for p in self.descendants if has_path_id(p) ) def get_all_paths(self) -> Set[pathid.PathId]: return {pd.path_id for pd in self.path_descendants} @property def descendants(self) -> Iterator[ScopeTreeNode]: yield self yield from self.strict_descendants @property def strict_descendants(self) -> Iterator[ScopeTreeNode]: for child in tuple(self.children): yield child if child.parent is self: yield from child.strict_descendants def descendants_and_namespaces_ex( self, *, unfenced_only: bool=False, strict: bool=False, skip: Optional[ScopeTreeNode]=None, ) -> Iterator[ Tuple[ ScopeTreeNode, AbstractSet[pathid.Namespace], FenceInfo ] ]: if not strict: yield self, frozenset(), FenceInfo( unnest_fence=False, factoring_fence=False) for child in tuple(self.children): if unfenced_only and child.fenced: continue if child is skip: continue finfo = child.fence_info yield child, child.namespaces, finfo if child.parent is not self: continue desc_ns = child.descendants_and_namespaces_ex( unfenced_only=unfenced_only, strict=True) for desc, desc_namespaces, desc_finfo in desc_ns: yield ( desc, child.namespaces | desc_namespaces, finfo | desc_finfo, ) @property def strict_descendants_and_namespaces( self, ) -> Iterator[ Tuple[ ScopeTreeNode, AbstractSet[pathid.Namespace], FenceInfo ] ]: return self.descendants_and_namespaces_ex(strict=True) @property def descendant_namespaces(self) -> Set[pathid.Namespace]: namespaces = set() for child in self.descendants: namespaces.update(child.namespaces) return namespaces @property def fence(self) -> ScopeTreeNode: if self.fenced: return self else: return cast(ScopeTreeNode, self.parent_fence) @property def parent(self) -> Optional[ScopeTreeNode]: if self._parent is None: return None else: return self._parent() @property def path_ancestor(self) -> Optional[ScopeTreeNodeWithPathId]: for ancestor in self.strict_ancestors: if has_path_id(ancestor): return ancestor return None @property def parent_fence(self) -> Optional[ScopeTreeNode]: for ancestor in self.strict_ancestors: if ancestor.fenced: return ancestor return None @property def parent_branch(self) -> Optional[ScopeTreeNode]: for ancestor in self.strict_ancestors: if ancestor.path_id is None and not ancestor.computable_branch: return ancestor return None @property def root(self) -> ScopeTreeNode: node = self while node.parent is not None: node = node.parent return node def strip_path_namespace(self, ns: AbstractSet[str]) -> None: if not ns: return for pd in self.path_descendants: pd.path_id = pd.path_id.strip_namespace(ns) def attach_child(self, node: ScopeTreeNode, context: Optional[pctx.ParserContext]=None) -> None: if node.path_id is not None: for child in self.children: if child.path_id == node.path_id: raise errors.InvalidReferenceError( f'{node.path_id} is already present in {self!r}', context=context, ) if node.unique_id is not None: for child in self.children: if child.unique_id == node.unique_id: return node._set_parent(self) def attach_fence(self) -> ScopeTreeNode: fence = ScopeTreeNode(fenced=True) self.attach_child(fence) return fence def attach_branch(self) -> ScopeTreeNode: fence = ScopeTreeNode() self.attach_child(fence) return fence def attach_path( self, path_id: pathid.PathId, *, optional: bool=False, context: Optional[pctx.ParserContext], ) -> List[ScopeTreeNode]: subtree = parent = ScopeTreeNode(fenced=True) is_lprop = False fences = [] for prefix in reversed(list(path_id.iter_prefixes(include_ptr=True))): if prefix.is_ptr_path(): is_lprop = True continue new_child = ScopeTreeNode(path_id=prefix, optional=optional and parent is subtree) parent.attach_child(new_child) if ( not (is_lprop or prefix.is_linkprop_path()) and not prefix.is_tuple_indirection_path() ): parent = new_child if not prefix.is_type_intersection_path(): is_lprop = False if (rptr := prefix.rptr()) and rptr.is_computable: fence = ScopeTreeNode(computable_branch=True) fences.append(fence) parent.attach_child(fence) parent = fence self.attach_subtree(subtree, context=context) return [fence.forwarded for fence in fences] def attach_subtree(self, node: ScopeTreeNode, was_fenced: bool=False, context: Optional[pctx.ParserContext]=None) -> None: if node.path_id is not None: wrapper_node = ScopeTreeNode(fenced=True) wrapper_node.attach_child(node) node = wrapper_node for descendant, dns, _ in node.descendants_and_namespaces_ex(): if not has_path_id(descendant): continue path_id = descendant.path_id.strip_namespace(dns) visible, visible_finfo, vns = self.find_visible_ex(path_id) desc_optional = descendant.is_optional_upto(node.parent) if ( visible is None and (p := descendant.parent) and p.computable_branch and p.parent is node ): visible = self.find_child(path_id, in_branches=True) visible_finfo = None if visible is not None: p._gravestone = visible.parent if visible is not None: if visible_finfo is not None and visible_finfo.factoring_fence: raise errors.InvalidReferenceError( f'cannot reference correlated set ' f'{path_id.pformat()!r} here', context=context, ) desc_fenced = ( descendant.fence is not node.fence or was_fenced or visible.fence not in {self, self.parent_fence} ) descendant.remove() descendant._gravestone = visible keep_optional = desc_optional or desc_fenced if keep_optional: descendant.mark_as_optional() descendant.strip_path_namespace(dns | vns) visible.fuse_subtree(descendant, self_fenced=False) elif descendant.parent_fence is node: factorable_nodes = self.find_factorable_nodes(path_id) current = descendant if factorable_nodes: descendant.strip_path_namespace(dns) if desc_optional: descendant.mark_as_optional() for factorable in factorable_nodes: ( existing, factor_point, existing_ns, existing_finfo, unnest_fence, ) = factorable self._check_factoring_errors( path_id, descendant, factor_point, existing, unnest_fence, existing_finfo, context, ) existing_fenced = existing.parent_fence is not self if existing.is_optional_upto(factor_point): existing.mark_as_optional() existing.remove() current.remove() existing.strip_path_namespace(existing_ns) current.strip_path_namespace(existing_ns) factor_point.attach_child(existing) current._gravestone = existing existing.fuse_subtree( current, self_fenced=existing_fenced) current = existing for child in tuple(node.children): for pd in child.path_descendants: if pd.path_id.namespace: to_strip = set(pd.path_id.namespace) & node.namespaces pd.path_id = pd.path_id.strip_namespace(to_strip) self.attach_child(child) def _check_factoring_errors( self, path_id: pathid.PathId, descendant: ScopeTreeNodeWithPathId, factor_point: ScopeTreeNode, existing: ScopeTreeNodeWithPathId, unnest_fence: bool, existing_finfo: FenceInfo, context: Optional[pctx.ParserContext], ) -> None: if existing_finfo.factoring_fence: raise errors.InvalidReferenceError( f'cannot reference correlated set ' f'{path_id.pformat()!r} here', context=context, ) if ( unnest_fence and ( factor_point.find_child( path_id, in_branches=True, pfx_with_invariant_card=True, ) is None ) and ( not path_id.is_type_intersection_path() or ( (src_path := path_id.src_path()) and src_path is not None and not self.is_visible(src_path) ) ) ): path_ancestor = descendant.path_ancestor if path_ancestor is not None: offending_node = path_ancestor else: offending_node = descendant assert offending_node.path_id is not None raise errors.InvalidReferenceError( f'reference to ' f'{offending_node.path_id.pformat()!r} ' f'changes the interpretation of ' f'{existing.path_id.pformat()!r} ' f'elsewhere in the query', context=context, ) def fuse_subtree( self, node: ScopeTreeNode, self_fenced: bool=False, ) -> None: node.remove() if ( self.optional_count is not None and not node.optional ): self.optional_count += 1 if node.optional and self_fenced: self.mark_as_optional() if node.path_id is not None: subtree = ScopeTreeNode(fenced=True) subtree.optional_count = node.optional_count for child in tuple(node.children): subtree.attach_child(child) else: subtree = node self.attach_subtree(subtree, was_fenced=self_fenced) def remove_subtree(self, node: ScopeTreeNode) -> None: if node not in self.children: raise KeyError(f'{node} is not a child of {self}') node._set_parent(None) def remove_descendants( self, path_id: pathid.PathId, new: ScopeTreeNode) -> None: matching = set() for node in self.descendants: if (node.path_id is not None and _paths_equal(node.path_id, path_id, set())): matching.add(node) for node in matching: node.remove() node._gravestone = new
Apache License 2.0
jnez71/lqrrt
lqrrt/planner.py
Planner.kill_update
python
def kill_update(self): self.killed = True
Raises a flag that will cause an abrupt termination of the update_plan routine.
https://github.com/jnez71/lqrrt/blob/4796ee3fa8d1e658dc23c143f576b38d22642e45/lqrrt/planner.py#L596-L601
from __future__ import division import time import numpy as np import numpy.linalg as npl from tree import Tree from constraints import Constraints import scipy.interpolate if int(scipy.__version__.split('.')[1]) < 16: def interp1d(*args, **kwargs): kwargs.pop('assume_sorted', None) return scipy.interpolate.interp1d(*args, **kwargs) else: interp1d = scipy.interpolate.interp1d class Planner: def __init__(self, dynamics, lqr, constraints, horizon, dt=0.05, FPR=0, error_tol=0.05, erf=np.subtract, min_time=0.5, max_time=1, max_nodes=1E5, goal0=None, sys_time=time.time, printing=True): self.set_system(dynamics, lqr, constraints, erf) self.set_resolution(horizon, dt, FPR, error_tol) self.set_runtime(min_time, max_time, max_nodes, sys_time) self.set_goal(goal0) self.printing = printing self.killed = False def update_plan(self, x0, sample_space, goal_bias=0, guide=None, xrand_gen=None, pruning=True, finish_on_goal=False, specific_time=None): x0 = np.array(x0, dtype=np.float64) if self.goal is None: print("No goal has been set yet!") self.get_state = lambda t: x0 self.get_effort = lambda t: np.zeros(self.ncontrols) return False if specific_time is None: min_time = self.min_time max_time = self.max_time else: min_time = specific_time max_time = specific_time self.tree = Tree(x0, self.lqr(x0, np.zeros(self.ncontrols))) ignores = np.array([]) if xrand_gen is None or type(xrand_gen) is int: if goal_bias is None: goal_bias = [0] * self.nstates elif hasattr(goal_bias, '__contains__'): if len(goal_bias) != self.nstates: raise ValueError("Expected goal_bias to be scalar or have same length as state.") else: goal_bias = [goal_bias] * self.nstates if xrand_gen > 0: tries_limit = xrand_gen else: tries_limit = 10 sample_space = np.array(sample_space, dtype=np.float64) if sample_space.shape != (self.nstates, 2): raise ValueError("Expected sample_space to be list of nstates tuples.") sampling_centers = np.mean(sample_space, axis=1) sampling_spans = np.diff(sample_space).flatten() def xrand_gen(planner): tries = 0 while tries < tries_limit: xrand = sampling_centers + sampling_spans*(np.random.sample(self.nstates)-0.5) for i, choice in enumerate(np.greater(goal_bias, np.random.sample())): if choice: xrand[i] = self.goal[i] if self.constraints.is_feasible(xrand, np.zeros(self.ncontrols)): return xrand tries += 1 return xrand else: if not hasattr(xrand_gen, '__call__'): raise ValueError("Expected xrand_gen to be None, an integer >= 1, or a function.") if guide is None: self.xguide = np.copy(self.goal) else: self.xguide = np.array(guide, dtype=np.float64) if self.printing: print("\n...planning...") self.plan_reached_goal = False self.T = np.inf time_elapsed = 0 time_start = self.sys_time() while True: xrand = xrand_gen(self) if pruning: nearestIDs = np.argsort(self._costs_to_go(xrand)) nearestID = nearestIDs[0] for ID in nearestIDs: if ID not in ignores: nearestID = ID break else: nearestID = np.argmin(self._costs_to_go(xrand)) xnew_seq, unew_seq = self._steer(nearestID, xrand, force_arrive=False) if len(xnew_seq) > 0: xnew = np.copy(xnew_seq[-1]) self.tree.add_node(nearestID, xnew, self.lqr(xnew, np.copy(unew_seq[-1])), xnew_seq, unew_seq) if self._in_goal(xnew): self.plan_reached_goal = True node_seq = self.tree.climb(self.tree.size-1) x_seq, u_seq = self.tree.trajectory(node_seq) ignores = np.unique(np.concatenate((ignores, node_seq))) T = len(x_seq) * self.dt if T < self.T: self.T = T self.node_seq = node_seq self.x_seq = x_seq self.u_seq = u_seq self.t_seq = np.arange(len(self.x_seq)) * self.dt if self.printing: print("Found plan at elapsed time: {} s".format(np.round(time_elapsed, 6))) time_elapsed = self.sys_time() - time_start if self.killed: break elif self.plan_reached_goal and time_elapsed >= min_time: if finish_on_goal: xgoal_seq, ugoal_seq = self._steer(self.node_seq[-1], self.goal, force_arrive=True) if len(xgoal_seq) > 0: self.tree.add_node(self.node_seq[-1], self.goal, None, xgoal_seq, ugoal_seq) self.node_seq.append(self.tree.size-1) self.x_seq.extend(xgoal_seq) self.u_seq.extend(ugoal_seq) self.t_seq = np.arange(len(self.x_seq)) * self.dt if self.printing: print("Tree size: {0}\nETA: {1} s".format(self.tree.size, np.round(self.T, 2))) self._prepare_interpolators() break elif time_elapsed >= max_time or self.tree.size > self.max_nodes: Sguide = self.lqr(self.xguide, np.zeros(self.ncontrols))[0] for i, g in enumerate(self.constraints.goal_buffer): if np.isinf(g): Sguide[:, i] = 0 guide_diffs = self.erf_v(self.xguide, self.tree.state) closestID = np.argmin(np.sum(np.tensordot(guide_diffs, Sguide, axes=1) * guide_diffs, axis=1)) self.node_seq = self.tree.climb(closestID) self.x_seq, self.u_seq = self.tree.trajectory(self.node_seq) self.T = len(self.x_seq) * self.dt self.t_seq = np.arange(len(self.x_seq)) * self.dt if self.printing: print("Didn't reach goal.\nTree size: {0}\nETA: {1} s".format(self.tree.size, np.round(self.T, 2))) self._prepare_interpolators() break if self.killed or self.tree.size > self.max_nodes: if self.printing: print("Plan update terminated abruptly!") self.killed = False return False else: return True def _costs_to_go(self, x): S = self.lqr(x, np.zeros(self.ncontrols))[0] diffs = self.erf_v(x, self.tree.state) return np.sum(np.tensordot(diffs, S, axes=1) * diffs, axis=1) def _steer(self, ID, xtar, force_arrive=False): K = np.copy(self.tree.lqr[ID][1]) x = np.copy(self.tree.state[ID]) x_seq = []; u_seq = [] last_emag = np.inf i = 0; elapsed_time = 0 start_time = self.sys_time() while True: e = self.erf(np.copy(xtar), np.copy(x)) u = K.dot(e) x = self.dynamics(np.copy(x), np.copy(u), self.dt) if not self.constraints.is_feasible(x, u): x_seq = x_seq[:int(self.FPR * len(x_seq))] u_seq = u_seq[:int(self.FPR * len(u_seq))] break if force_arrive: elapsed_time = self.sys_time() - start_time if elapsed_time > np.clip(self.min_time/2, 0.1, np.inf): if self.printing: print("(exact goal-convergence timed-out)") break if np.allclose(x, xtar, rtol=1E-4, atol=1E-4): break else: i += 1 emag = np.abs(e) if self.hfactor: if np.all(emag >= last_emag): x_seq = []; u_seq = [] self.horizon_iters = int(np.clip(self.horizon_iters/self.hfactor, self.hspan[0], self.hspan[1])) break if i == self.horizon_iters: self.horizon_iters = int(np.clip(self.hfactor*self.horizon_iters, self.hspan[0], self.hspan[1])) last_emag = emag if i > self.horizon_iters or np.all(emag <= self.error_tol): break x_seq.append(x) u_seq.append(u) K = self.lqr(x, u)[1] return (x_seq, u_seq) def _in_goal(self, x): return all(goal_span[0] < v < goal_span[1] for goal_span, v in zip(self.goal_region, x)) def _prepare_interpolators(self): if len(self.x_seq) == 1: self.get_state = lambda t: self.x_seq[0] self.get_effort = lambda t: np.zeros(self.ncontrols) else: self.get_state = interp1d(self.t_seq, np.array(self.x_seq), axis=0, assume_sorted=True, bounds_error=False, fill_value=self.x_seq[-1][:]) self.get_effort = interp1d(self.t_seq, np.array(self.u_seq), axis=0, assume_sorted=True, bounds_error=False, fill_value=self.u_seq[-1][:]) def set_goal(self, goal): if goal is None: self.goal = None else: if len(goal) == self.nstates: self.goal = np.array(goal, dtype=np.float64) else: raise ValueError("The goal state must have same dimensionality as state space.") goal_region = [] for i, buff in enumerate(self.constraints.goal_buffer): goal_region.append((self.goal[i]-buff, self.goal[i]+buff)) self.goal_region = goal_region self.plan_reached_goal = False def set_runtime(self, min_time=None, max_time=None, max_nodes=None, sys_time=None): if min_time is not None: self.min_time = min_time if max_time is not None: self.max_time = max_time if self.min_time > self.max_time: raise ValueError("The min_time must be less than or equal to the max_time.") if max_nodes is not None: self.max_nodes = max_nodes if sys_time is not None: if hasattr(sys_time, '__call__'): self.sys_time = sys_time else: raise ValueError("Expected sys_time to be a function.") def set_resolution(self, horizon=None, dt=None, FPR=None, error_tol=None): if horizon is not None: self.horizon = horizon if dt is not None: self.dt = dt if FPR is not None: self.FPR = FPR if error_tol is not None: if np.shape(error_tol) in [(), (self.nstates,)]: self.error_tol = np.abs(error_tol).astype(np.float64) else: raise ValueError("Shape of error_tol must be scalar or length of state.") if hasattr(self.horizon, '__contains__'): if len(self.horizon) != 2: raise ValueError("Expected horizon to be tuple (min, max) or a single scalar.") if self.horizon[0] < self.dt: raise ValueError("The minimum horizon must be at least as big as dt.") if self.horizon[0] >= self.horizon[1]: raise ValueError("A horizon range tuple must be given as (min, max) where min < max.") self.horizon_iters = 1 self.hspan = np.divide(self.horizon, self.dt).astype(np.int64) self.hfactor = int(2) elif self.horizon >= self.dt: self.horizon_iters = int(self.horizon / self.dt) self.hspan = (self.horizon_iters, self.horizon_iters) self.hfactor = 0 else: raise ValueError("The horizon must be at least as big as dt.") def set_system(self, dynamics=None, lqr=None, constraints=None, erf=None): if dynamics is not None or lqr is not None: if hasattr(dynamics, '__call__'): self.dynamics = dynamics else: raise ValueError("Expected dynamics to be a function.") if hasattr(lqr, '__call__'): self.lqr = lqr else: raise ValueError("Expected lqr to be a function.") if constraints is not None: if isinstance(constraints, Constraints): self.constraints = constraints self.nstates = self.constraints.nstates self.ncontrols = self.constraints.ncontrols else: raise ValueError("Expected constraints to be an instance of the Constraints class.") if erf is not None: if hasattr(erf, '__call__'): self.erf = erf if erf is np.subtract: self.erf_v = erf else: self.erf_v = lambda g, x: -np.apply_along_axis(erf, 1, x, g) else: raise ValueError("Expected erf to be a function.") self.plan_reached_goal = False
MIT License
chirpradio/chirpradio-machine
chirp/stream/looper.py
Looper._trap_exceptions
python
def _trap_exceptions(self, callable_to_wrap): try: callable_to_wrap() except Exception, err: logging.exception("Swallowed Exception in %s" % self) self.trapped_exceptions.append((time.time(), err)) if len(self.trapped_exceptions) > self.MAX_TRAPPED_EXCEPTIONS: self.trapped_exceptions.pop(0)
Execute a callable, trapping any raised exceptions. A list of the last MAX_TRAPPED_EXCEPTIONS exceptions is maintained.
https://github.com/chirpradio/chirpradio-machine/blob/5977203ca3f561cabb05b5070d0d1227d82b10dc/chirp/stream/looper.py#L38-L52
import logging import threading import time class Looper(object): MAX_TRAPPED_EXCEPTIONS = 100 def __init__(self): self._finished = threading.Event() self._looped_once = threading.Event() self._looping = False self.trapped_exceptions = [] def _begin_looping(self): pass def _loop_once(self): raise NotImplementedError def _done_looping(self): pass
Apache License 2.0
uwbmrb/pynmrstar
pynmrstar/entry.py
Entry.from_template
python
def from_template(cls, entry_id, all_tags=False, default_values=False, schema=None) -> 'Entry': schema = utils.get_schema(schema) entry = cls(entry_id=entry_id, all_tags=all_tags, default_values=default_values, schema=schema) entry.source = f"from_template({schema.version})" return entry
Create an entry that has all of the saveframes and loops from the schema present. No values will be assigned. Specify the entry ID when calling this method. The optional argument 'all_tags' forces all tags to be included rather than just the mandatory tags. The optional argument 'default_values' will insert the default values from the schema. The optional argument 'schema' allows providing a custom schema.
https://github.com/uwbmrb/pynmrstar/blob/c6e3cdccb4aa44dfbc3b4e984837a6bcde3cf171/pynmrstar/entry.py#L371-L387
import hashlib import json import logging import warnings from io import StringIO from typing import TextIO, BinaryIO, Union, List, Optional, Dict, Any, Tuple from pynmrstar import definitions, utils, loop as loop_mod, parser as parser_mod, saveframe as saveframe_mod from pynmrstar._internal import _json_serialize, _interpret_file, _get_entry_from_database, write_to_file from pynmrstar.exceptions import InvalidStateError from pynmrstar.schema import Schema class Entry(object): def __contains__(self, item: Any): if isinstance(item, (list, tuple)): to_process: List[Union[str, saveframe_mod.Saveframe, loop_mod.Loop]] = list(item) elif isinstance(item, (loop_mod.Loop, saveframe_mod.Saveframe, str)): to_process = [item] else: return False for item in to_process: if isinstance(item, saveframe_mod.Saveframe): if item not in self._frame_list: return False elif isinstance(item, (loop_mod.Loop, str)): found = False for saveframe in self._frame_list: if item in saveframe: found = True break if not found: return False else: return False return True def __delitem__(self, item: Union['saveframe_mod.Saveframe', int, str]) -> None: if isinstance(item, int): try: del self._frame_list[item] except IndexError: raise IndexError(f'Index out of range: no saveframe at index: {item}') else: self.remove_saveframe(item) def __eq__(self, other) -> bool: if not isinstance(other, Entry): return False return (self.entry_id, self._frame_list) == (other.entry_id, other._frame_list) def __getitem__(self, item: Union[int, str]) -> 'saveframe_mod.Saveframe': try: return self._frame_list[item] except TypeError: return self.get_saveframe_by_name(item) def __init__(self, **kwargs) -> None: self._entry_id: Union[str, int] = 0 self._frame_list: List[saveframe_mod.Saveframe] = [] self.source: Optional[str] = None if len(kwargs) == 0: raise ValueError("You should not directly instantiate an Entry using this method. Instead use the " "class methods: Entry.from_database(), Entry.from_file(), Entry.from_string(), " "Entry.from_scratch(), and Entry.from_json().") if 'the_string' in kwargs: star_buffer: StringIO = StringIO(kwargs['the_string']) self.source = "from_string()" elif 'file_name' in kwargs: star_buffer = _interpret_file(kwargs['file_name']) self.source = f"from_file('{kwargs['file_name']}')" elif 'all_tags' in kwargs: self._entry_id = kwargs['entry_id'] saveframe_categories: dict = {} schema = utils.get_schema(kwargs['schema']) schema_obj = schema.schema for tag in [schema_obj[x.lower()] for x in schema.schema_order]: category = tag['SFCategory'] if category not in saveframe_categories: saveframe_categories[category] = True templated_saveframe = saveframe_mod.Saveframe.from_template(category, category + "_1", entry_id=self._entry_id, all_tags=kwargs['all_tags'], default_values=kwargs['default_values'], schema=schema) self._frame_list.append(templated_saveframe) entry_saveframe = self.get_saveframes_by_category('entry_information')[0] entry_saveframe['NMR_STAR_version'] = schema.version entry_saveframe['Original_NMR_STAR_version'] = schema.version return else: self._entry_id = kwargs['entry_id'] self.source = "from_scratch()" return parser: parser_mod.Parser = parser_mod.Parser(entry_to_parse_into=self) parser.parse(star_buffer.read(), source=self.source, convert_data_types=kwargs.get('convert_data_types', False)) def __iter__(self) -> saveframe_mod.Saveframe: for saveframe in self._frame_list: yield saveframe def __len__(self) -> int: return len(self._frame_list) def __repr__(self) -> str: return f"<pynmrstar.Entry '{self._entry_id}' {self.source}>" def __setitem__(self, key: Union[int, str], item: 'saveframe_mod.Saveframe') -> None: if isinstance(item, saveframe_mod.Saveframe): if isinstance(key, int): self._frame_list[key] = item else: contains_frame: bool = False for pos, frame in enumerate(self._frame_list): if frame.name == key: if contains_frame: raise ValueError(f"Cannot replace the saveframe with the name '{frame.name} " f"because multiple saveframes in the entry have the same name. " f'This library does not allow that normally, as it is ' f'invalid NMR-STAR. Did you manually edit the Entry.frame_list ' f'object? Please use the Entry.add_saveframe() method instead to ' f'add new saveframes.') self._frame_list[pos] = item contains_frame = True if not contains_frame: raise ValueError(f"Saveframe with name '{key}' does not exist and therefore cannot be " f"written to. Use the add_saveframe() method to add new saveframes.") else: raise ValueError("You can only assign a saveframe to an entry splice. You attempted to assign: " f"'{repr(item)}'") def __str__(self, skip_empty_loops: bool = False, skip_empty_tags: bool = False, show_comments: bool = True) -> str: sf_strings = [] seen_saveframes = {} for saveframe_obj in self: if saveframe_obj.category in seen_saveframes: sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags, show_comments=False)) else: sf_strings.append(saveframe_obj.format(skip_empty_loops=skip_empty_loops, skip_empty_tags=skip_empty_tags, show_comments=show_comments)) seen_saveframes[saveframe_obj.category] = True return f"data_{self.entry_id}\n\n" + "\n".join(sf_strings) @property def category_list(self) -> List[str]: category_list = [] for saveframe in self._frame_list: category = saveframe.category if category and category not in category_list: category_list.append(category) return list(category_list) @property def empty(self) -> bool: for saveframe in self._frame_list: if not saveframe.empty: return False return True @property def entry_id(self) -> Union[str, int]: return self._entry_id @entry_id.setter def entry_id(self, value: Union[str, int]) -> None: self._entry_id = value schema = utils.get_schema() for saveframe in self._frame_list: for tag in saveframe.tags: fqtn = (saveframe.tag_prefix + "." + tag[0]).lower() try: if schema.schema[fqtn]['entryIdFlg'] == 'Y': tag[1] = self._entry_id except KeyError: pass for loop in saveframe.loops: for tag in loop.tags: fqtn = (loop.category + "." + tag).lower() try: if schema.schema[fqtn]['entryIdFlg'] == 'Y': loop[tag] = [self._entry_id] * len(loop[tag]) except KeyError: pass @property def frame_dict(self) -> Dict[str, 'saveframe_mod.Saveframe']: fast_dict = dict((frame.name, frame) for frame in self._frame_list) if len(fast_dict) == len(self._frame_list): return fast_dict frame_dict = {} for frame in self._frame_list: if frame.name in frame_dict: raise InvalidStateError("The entry has multiple saveframes with the same name. That is not allowed in " "the NMR-STAR format. Please remove or rename one. Duplicate name: " f"'{frame.name}'. Furthermore, please use Entry.add_saveframe() and " f"Entry.remove_saveframe() rather than manually editing the Entry.frame_list " f"list, which will prevent this state from existing in the future.") frame_dict[frame.name] = frame return frame_dict @property def frame_list(self) -> List['saveframe_mod.Saveframe']: return self._frame_list @classmethod def from_database(cls, entry_num: Union[str, int], convert_data_types: bool = False): return _get_entry_from_database(entry_num, convert_data_types=convert_data_types) @classmethod def from_file(cls, the_file: Union[str, TextIO, BinaryIO], convert_data_types: bool = False): return cls(file_name=the_file, convert_data_types=convert_data_types) @classmethod def from_json(cls, json_dict: Union[dict, str]): if not isinstance(json_dict, dict): try: json_dict = json.loads(json_dict) except (TypeError, ValueError): raise ValueError("The JSON you provided was neither a Python dictionary nor a JSON string.") if "saveframes" not in json_dict: raise ValueError("The JSON you provide must be a hash and must contain the key 'saveframes' - even if the " "key points to 'None'.") if "entry_id" not in json_dict and "bmrb_id" not in json_dict: raise ValueError("The JSON you provide must be a hash and must contain the key 'entry_id' - even if the" " key points to 'None'.") if 'entry_id' not in json_dict: json_dict['entry_id'] = json_dict['bmrb_id'] ret = Entry.from_scratch(json_dict['entry_id']) ret._frame_list = [saveframe_mod.Saveframe.from_json(x) for x in json_dict['saveframes']] ret.source = "from_json()" return ret @classmethod def from_string(cls, the_string: str, convert_data_types: bool = False): return cls(the_string=the_string, convert_data_types=convert_data_types) @classmethod def from_scratch(cls, entry_id: Union[str, int]): return cls(entry_id=entry_id) @classmethod
MIT License
hazyresearch/fonduer
src/fonduer/candidates/mentions.py
MentionExtractor.apply
python
def apply( self, docs: Collection[Document], clear: bool = True, parallelism: Optional[int] = None, progress_bar: bool = True, ) -> None: super().apply( docs, clear=clear, parallelism=parallelism, progress_bar=progress_bar )
Run the MentionExtractor. :Example: To extract mentions from a set of training documents using 4 cores:: mention_extractor.apply(train_docs, parallelism=4) :param docs: Set of documents to extract from. :param clear: Whether or not to clear the existing Mentions beforehand. :param parallelism: How many threads to use for extraction. This will override the parallelism value used to initialize the MentionExtractor if it is provided. :param progress_bar: Whether or not to display a progress bar. The progress bar is measured per document.
https://github.com/hazyresearch/fonduer/blob/c9fd6b91998cd708ab95aeee3dfaf47b9e549ffd/src/fonduer/candidates/mentions.py#L426-L451
import logging import re from builtins import map, range from typing import Any, Collection, Dict, Iterable, Iterator, List, Optional, Set, Union from sqlalchemy.orm import Session from fonduer.candidates.matchers import _Matcher from fonduer.candidates.models import Candidate, Mention from fonduer.candidates.models.candidate import candidate_subclasses from fonduer.candidates.models.caption_mention import TemporaryCaptionMention from fonduer.candidates.models.cell_mention import TemporaryCellMention from fonduer.candidates.models.document_mention import TemporaryDocumentMention from fonduer.candidates.models.figure_mention import TemporaryFigureMention from fonduer.candidates.models.paragraph_mention import TemporaryParagraphMention from fonduer.candidates.models.section_mention import TemporarySectionMention from fonduer.candidates.models.span_mention import TemporarySpanMention from fonduer.candidates.models.table_mention import TemporaryTableMention from fonduer.candidates.models.temporary_context import TemporaryContext from fonduer.parser.models import Context, Document, Sentence from fonduer.utils.udf import UDF, UDFRunner from fonduer.utils.utils import get_dict_of_stable_id logger = logging.getLogger(__name__) class MentionSpace(object): def __init__(self) -> None: pass def apply(self, x: Context) -> Iterator[TemporaryContext]: raise NotImplementedError() class Ngrams(MentionSpace): def __init__( self, n_min: int = 1, n_max: int = 5, split_tokens: Collection[str] = [] ) -> None: MentionSpace.__init__(self) self.n_min = n_min self.n_max = n_max self.split_rgx = ( r"(" + r"|".join(map(re.escape, sorted(split_tokens, reverse=True))) + r")" if split_tokens and len(split_tokens) > 0 else None ) def apply(self, context: Sentence) -> Iterator[TemporarySpanMention]: offsets = context.char_offsets L = len(offsets) seen: Set[TemporarySpanMention] = set() for j in range(self.n_min, self.n_max + 1)[::-1]: for i in range(L - j + 1): w = context.words[i + j - 1] start = offsets[i] end = offsets[i + j - 1] + len(w) - 1 ts = TemporarySpanMention( char_start=start, char_end=end, sentence=context ) if ts not in seen: seen.add(ts) yield ts if ( j == 1 and self.n_max >= 1 and self.n_min <= 1 and self.split_rgx is not None and end - start > 0 ): text = context.text[start - offsets[0] : end - offsets[0] + 1] start_idxs = [0] end_idxs = [] for m in re.finditer(self.split_rgx, text): start_idxs.append(m.end()) end_idxs.append(m.start()) end_idxs.append(len(text)) for start_idx in start_idxs: for end_idx in end_idxs: if start_idx < end_idx: ts = TemporarySpanMention( char_start=start_idx, char_end=end_idx - 1, sentence=context, ) if ts not in seen and ts.get_span(): seen.add(ts) yield ts class MentionNgrams(Ngrams): def __init__( self, n_min: int = 1, n_max: int = 5, split_tokens: Collection[str] = [] ) -> None: Ngrams.__init__(self, n_min=n_min, n_max=n_max, split_tokens=split_tokens) def apply(self, doc: Document) -> Iterator[TemporarySpanMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionNgrams.apply() must be of type Document" ) for sentence in doc.sentences: for ts in Ngrams.apply(self, sentence): yield ts class MentionFigures(MentionSpace): def __init__(self, types: Optional[str] = None) -> None: MentionSpace.__init__(self) if types is not None: self.types = [t.strip().lower() for t in types] else: self.types = None def apply(self, doc: Document) -> Iterator[TemporaryFigureMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionFigures.apply() must be of type Document" ) for figure in doc.figures: if self.types is None or any( figure.url.lower().endswith(type) for type in self.types ): yield TemporaryFigureMention(figure) class MentionSentences(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporarySpanMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSentences.apply() must be of type Document" ) for sentence in doc.sentences: yield TemporarySpanMention( char_start=0, char_end=len(sentence.text) - 1, sentence=sentence ) class MentionParagraphs(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporaryParagraphMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionParagraphs.apply() must be of type Document" ) for paragraph in doc.paragraphs: yield TemporaryParagraphMention(paragraph) class MentionCaptions(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporaryCaptionMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionCaptions.apply() must be of type Document" ) for caption in doc.captions: yield TemporaryCaptionMention(caption) class MentionCells(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporaryCellMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionCells.apply() must be of type Document" ) for cell in doc.cells: yield TemporaryCellMention(cell) class MentionTables(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporaryTableMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionTables.apply() must be of type Document" ) for table in doc.tables: yield TemporaryTableMention(table) class MentionSections(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporarySectionMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionSections.apply() must be of type Document" ) for section in doc.sections: yield TemporarySectionMention(section) class MentionDocuments(MentionSpace): def __init__(self) -> None: MentionSpace.__init__(self) def apply(self, doc: Document) -> Iterator[TemporaryDocumentMention]: if not isinstance(doc, Document): raise TypeError( "Input Contexts to MentionDocuments.apply() must be of type Document" ) yield TemporaryDocumentMention(doc) class MentionExtractor(UDFRunner): def __init__( self, session: Session, mention_classes: List[Mention], mention_spaces: List[MentionSpace], matchers: List[_Matcher], parallelism: int = 1, ): super().__init__( session, MentionExtractorUDF, parallelism=parallelism, mention_classes=mention_classes, mention_spaces=mention_spaces, matchers=matchers, ) arity = len(mention_classes) if not all( len(x) == arity for x in [mention_classes, mention_spaces, matchers] ): raise ValueError( "Mismatched arity of mention classes, spaces, and matchers." ) self.mention_classes = mention_classes
MIT License
mrknow/filmkodi
plugin.video.fanfilm/resources/lib/libraries/f4mproxy/F4mProxy.py
Server.get_request
python
def get_request(self): self.socket.settimeout(5.0) result = None while result is None: try: result = self.socket.accept() except socket.timeout: pass result[0].settimeout(1000) return result
Get the request and client address from the socket.
https://github.com/mrknow/filmkodi/blob/0162cde9ae25ddbf4a69330948714833ff2f78c9/plugin.video.fanfilm/resources/lib/libraries/f4mproxy/F4mProxy.py#L298-L308
import base64 import re import time import urllib import urllib2 import sys import traceback import socket from SocketServer import ThreadingMixIn from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from urllib import * import urlparse from f4mDownloader import F4MDownloader from interalSimpleDownloader import interalSimpleDownloader from hlsDownloader import HLSDownloader import xbmc import thread import zlib from StringIO import StringIO import hmac import hashlib import base64 import threading import xbmcgui,xbmcplugin import xbmc import hashlib g_stopEvent=None g_downloader=None class MyHandler(BaseHTTPRequestHandler): def do_HEAD(self): print "XBMCLocalProxy: Serving HEAD request..." self.send_response(200) rtype="flv-application/octet-stream" self.end_headers() """ Serves a GET request. """ def do_GET(s): print "XBMCLocalProxy: Serving GET request..." s.answer_request(True) def answer_request(self, sendData): global g_stopEvent global g_downloader try: request_path=self.path[1:] request_path=re.sub(r"\?.*","",request_path) if request_path.lower()=="stop": sys.exit() return if request_path.lower()=="favicon.ico": print 'dont have no icone here, may be in future' self.wfile.close() return (url,proxy,use_proxy_for_chunks,maxbitrate,simpledownloader, auth,streamtype)=self.decode_url(request_path) print 'simpledownloaderxxxxxxxxxxxxxxx',simpledownloader if streamtype=='' or streamtype==None or streamtype=='none': streamtype='HDS' if streamtype=='HDS': print 'Url received at proxy',url,proxy,use_proxy_for_chunks,maxbitrate downloader=None if not downloader or downloader.live==True or not (downloader.init_done and downloader.init_url ==url): downloader=F4MDownloader() if not downloader.init(self.wfile,url,proxy,use_proxy_for_chunks,g_stopEvent,maxbitrate,auth): print 'cannot init' return g_downloader=downloader print 'init...' enableSeek=False requested_range=self.headers.getheader("Range") if requested_range==None: requested_range="" srange, erange=(None,None) if downloader.live==False and len(requested_range)>0 and not requested_range=="bytes=0-0": enableSeek=True (srange, erange) = self.get_range_request(requested_range, downloader.total_frags) print 'PROXY DATA',downloader.live,enableSeek,requested_range,downloader.total_frags,srange, erange enableSeek=False framgementToSend=0 inflate=1815002 if enableSeek: self.send_response(206) rtype="flv-application/octet-stream" self.send_header("Content-Type", rtype) self.send_header("Accept-Ranges","bytes") print 'not LIVE,enable seek',downloader.total_frags totalsize=downloader.total_frags*inflate framgementToSend=1 erange=srange+framgementToSend*inflate if erange>=totalsize: erange=totalsize-1 crange="bytes "+str(srange)+"-" +str(int(erange))+"/*" print srange/inflate,erange/inflate,totalsize/inflate self.send_header("Content-Length", str(totalsize)) self.send_header("Content-Range",crange) etag=self.generate_ETag(url) self.send_header("ETag",etag) print crange self.send_header("Last-Modified","Wed, 21 Feb 2000 08:43:39 GMT") self.send_header("Cache-Control","public, must-revalidate") self.send_header("Cache-Control","no-cache") self.send_header("Pragma","no-cache") self.send_header("features","seekable,stridable") self.send_header("client-id","12345") self.send_header("Connection", 'close') else: self.send_response(200) rtype="flv-application/octet-stream" self.send_header("Content-Type", rtype) srange=None elif streamtype=='SIMPLE' or simpledownloader : downloader=interalSimpleDownloader(); if not downloader.init(self.wfile,url,proxy,g_stopEvent,maxbitrate): print 'cannot init' return srange,framgementToSend=(None,None) self.send_response(200) rtype="flv-application/octet-stream" self.send_header("Content-Type", rtype) srange=None elif streamtype=='HLS' or simpledownloader : downloader=HLSDownloader() if not downloader.init(self.wfile,url,proxy,use_proxy_for_chunks,g_stopEvent,maxbitrate,auth): print 'cannot init' return srange,framgementToSend=(None,None) self.send_response(200) rtype="flv-application/octet-stream" self.send_header("Content-Type", rtype) srange=None self.end_headers() if not srange==None: srange=srange/inflate if sendData: downloader.keep_sending_video(self.wfile,srange,framgementToSend) print 'srange,framgementToSend',srange,framgementToSend xbmc.sleep(500) while not downloader.status=="finished": xbmc.sleep(200); except: traceback.print_exc() g_stopEvent.set() self.send_response(404) self.wfile.close() return self.wfile.close() return def generate_ETag(self, url): md=hashlib.md5() md.update(url) return md.hexdigest() def get_range_request(self, hrange, file_size): if hrange==None: srange=0 erange=None else: try: hrange=str(hrange) splitRange=hrange.split("=")[1].split("-") srange=int(splitRange[0]) erange = splitRange[1] if erange=="": erange=int(file_size)-1 except: srange=0 erange=int(file_size-1); return (srange, erange) def decode_url(self, url): print 'in params' params=urlparse.parse_qs(url) print 'params',params received_url = params['url'][0] use_proxy_for_chunks =False proxy=None try: proxy = params['proxy'][0] use_proxy_for_chunks = params['use_proxy_for_chunks'][0] except: pass maxbitrate=0 try: maxbitrate = int(params['maxbitrate'][0]) except: pass auth=None try: auth = params['auth'][0] except: pass if auth=='None' and auth=='': auth=None if proxy=='None' or proxy=='': proxy=None if use_proxy_for_chunks=='False': use_proxy_for_chunks=False simpledownloader=False try: simpledownloader = params['simpledownloader'][0] if simpledownloader.lower()=='true': print 'params[simpledownloader][0]',params['simpledownloader'][0] simpledownloader=True else: simpledownloader=False except: pass streamtype='HDS' try: streamtype = params['streamtype'][0] except: pass if streamtype=='None' and streamtype=='': streamtype='HDS' return (received_url,proxy,use_proxy_for_chunks,maxbitrate,simpledownloader,auth,streamtype) """ Sends the requested file and add additional headers. """ class Server(HTTPServer):
Apache License 2.0
ryanc414/pytest_commander
pytest_commander/result_tree.py
_ensure_branch
python
def _ensure_branch( root_branch: BranchNode, nodeid_fragments: List[nodeid.NodeidFragment], nodeid_prefix: nodeid.Nodeid, root_dir: str, ) -> BranchNode: next_fragment, rest_fragments = nodeid_fragments[0], nodeid_fragments[1:] if not rest_fragments: return root_branch child_nodeid = nodeid_prefix.append(next_fragment) try: child = root_branch.child_branches[next_fragment.val] assert child.nodeid == child_nodeid except KeyError: child = BranchNode(branch_nodeid=child_nodeid, root_dir=root_dir) root_branch.child_branches[next_fragment.val] = child return _ensure_branch(child, rest_fragments, child_nodeid, root_dir)
Retrieve the branch node under the given root node that corresponds to the given chain of collectors. If any branch nodes do not yet exist, they will be automatically created.
https://github.com/ryanc414/pytest_commander/blob/11681fea458de1761e808684f578e183bddc40ef/pytest_commander/result_tree.py#L285-L310
import abc import enum import logging import os import textwrap from typing import List, Tuple, Dict, Generator, Iterator, Optional, Any, cast, Union import marshmallow from marshmallow import fields import marshmallow_enum from _pytest import nodes from pytest_commander import environment from pytest_commander import nodeid LOGGER = logging.getLogger(__name__) class TestState(enum.Enum): INIT = "init" SKIPPED = "skipped" PASSED = "passed" FAILED = "failed" RUNNING = "running" _TEST_STATE_PRECEDENT = { TestState.INIT: 1, TestState.SKIPPED: 2, TestState.PASSED: 3, TestState.FAILED: 4, TestState.RUNNING: 5, } def _status_precedent(statuses: Iterator[TestState]) -> TestState: return max( statuses, key=lambda status: _TEST_STATE_PRECEDENT[status], default=TestState.INIT, ) class Node(abc.ABC): @property @abc.abstractmethod def status(self) -> TestState: raise NotImplementedError @status.setter def status(self, new_status: TestState): raise NotImplementedError @property @abc.abstractmethod def nodeid(self) -> nodeid.Nodeid: raise NotImplementedError @property @abc.abstractmethod def short_id(self) -> str: raise NotImplementedError @abc.abstractmethod def pretty_format(self) -> str: raise NotImplementedError class BranchNode(Node): def __init__( self, branch_nodeid: nodeid.Nodeid, root_dir: str, short_id: Optional[str] = None, ): self._nodeid = branch_nodeid self._short_id = short_id self._fspath = os.path.join(root_dir, branch_nodeid.fspath) self.child_branches: Dict[str, BranchNode] = {} self.child_leaves: Dict[str, LeafNode] = {} self.environment: Optional[environment.EnvironmentManager] if os.path.isdir(self._fspath): self.environment = environment.EnvironmentManager(self._fspath) else: self.environment = None def __eq__(self, other: object) -> bool: if not isinstance(other, BranchNode): return False return self.nodeid == other.nodeid def __repr__(self) -> str: return f"BranchNode <{self.nodeid} {self.status}>" def pretty_format(self) -> str: formatted_children = textwrap.indent( "\n".join(child.pretty_format() for child in self.iter_children()), prefix=" ", ) return f"{self}\n{formatted_children}" def iter_children(self) -> Generator[Node, None, None]: for branch in self.child_branches.values(): yield branch for leaf in self.child_leaves.values(): yield leaf def merge(self, other: "BranchNode", merge_base: nodeid.Nodeid): for child_branch in other.child_branches.values(): if child_branch.short_id in self.child_leaves: del self.child_leaves[child_branch.short_id] if ( child_branch.short_id in self.child_branches and child_branch.nodeid != merge_base ): self.child_branches[child_branch.short_id].merge( child_branch, merge_base ) else: self.child_branches[child_branch.short_id] = child_branch for leaf_id in other.child_leaves: if leaf_id in self.child_branches: del self.child_branches[leaf_id] self.child_leaves.update(other.child_leaves) @property def environment_state(self): if self.environment is None: return environment.EnvironmentState.INACTIVE return self.environment.state @property def nodeid(self) -> nodeid.Nodeid: return self._nodeid @property def short_id(self) -> str: if self._short_id: return self._short_id return self.nodeid.short_id @property def fspath(self) -> str: return self._fspath @property def status(self) -> TestState: return _status_precedent(child.status for child in self.iter_children()) @status.setter def status(self, new_status: TestState): for child in self.iter_children(): child.status = new_status class LeafNode(Node): def __init__(self, leaf_nodeid: nodeid.Nodeid, root_dir: str): self._nodeid = leaf_nodeid self._status = TestState.INIT self._fspath = os.path.join(root_dir, leaf_nodeid.fspath) self.longrepr: Optional[str] = None def __eq__(self, other: object) -> bool: if not isinstance(other, LeafNode): return False return self.nodeid == other.nodeid def __repr__(self) -> str: return f"LeafNode <{self.nodeid} {self.status}>" @property def nodeid(self) -> nodeid.Nodeid: return self._nodeid @property def short_id(self) -> str: return self.nodeid.short_id @property def fspath(self) -> str: return self._fspath def pretty_format(self) -> str: return str(self) @property def status(self) -> TestState: return self._status @status.setter def status(self, new_status): self._status = new_status def build_from_items(items: List, root_dir: str) -> BranchNode: short_id = os.path.basename(root_dir.rstrip(os.sep)) root_branch = BranchNode( branch_nodeid=nodeid.EMPTY_NODEID, root_dir=root_dir, short_id=short_id ) for item in items: item_nodeid = nodeid.Nodeid.from_string(item.nodeid) nodeid_fragments = item_nodeid.fragments leaf = LeafNode(nodeid.Nodeid.from_string(item.nodeid), root_dir) child = _ensure_branch( root_branch, nodeid_fragments, nodeid.EMPTY_NODEID, root_dir ) child.child_leaves[leaf.short_id] = leaf return root_branch
MIT License
zaproxy/zap-api-python
src/zapv2/graphql.py
graphql.import_file
python
def import_file(self, endurl, file, apikey=''): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/action/importFile/', {'endurl': endurl, 'file': file, 'apikey': apikey})))
Imports a GraphQL Schema from a File. This component is optional and therefore the API will only work if it is installed
https://github.com/zaproxy/zap-api-python/blob/5166b67ebd5e2d89b285aa7b9d9d7cfd83b88d31/src/zapv2/graphql.py#L94-L99
import six class graphql(object): def __init__(self, zap): self.zap = zap @property def option_args_type(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionArgsType/'))) @property def option_lenient_max_query_depth_enabled(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionLenientMaxQueryDepthEnabled/'))) @property def option_max_additional_query_depth(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxAdditionalQueryDepth/'))) @property def option_max_args_depth(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxArgsDepth/'))) @property def option_max_query_depth(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionMaxQueryDepth/'))) @property def option_optional_args_enabled(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionOptionalArgsEnabled/'))) @property def option_query_split_type(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionQuerySplitType/'))) @property def option_request_method(self): return six.next(six.itervalues(self.zap._request(self.zap.base + 'graphql/view/optionRequestMethod/')))
Apache License 2.0
mschulth/rhc
env/Environment.py
Environment.var_plt
python
def var_plt(self, x, y=None, label=None, var=None): if y is not None: line = plt.plot(x, y, label=label) else: line = plt.plot(x, label=label) if var is not None: var = np.log(var + 1e-5)[:, 0]/2 col = line[0]._color plt.fill_between(np.arange(x.shape[0]), x - var, x + var, color=col, alpha=0.2) return line
Creates a line plot with variances :param x: the x values for plotting :param y: the y values for plotting :param label: the label for the lines :param var: the variances to plot :return: the line of the plot
https://github.com/mschulth/rhc/blob/0e9adf74267ad6c2aa8712f46c54791f30d532ab/env/Environment.py#L265-L286
import numpy as np import matplotlib.pyplot as plt from abc import ABC, abstractmethod import casadi as cas from gym.spaces import Box class Environment(ABC): def __init__(self): self.a_dim = None self.s_dim = None self.o_dim = None self.a_lim = None self.s_lim = None self.o_lim = None self.o_labels = None self.x0 = None self.task_cost_weights = None self.task_goal = None self.task_num_rff_feats = None self.task_t = None self.task_state_cost_start = 0 self._x0_obs = None self._x_bounds = None self._action_space = None self._observation_space = None self._state = None def reset(self): self.state = self.x0 return self.obs_state @abstractmethod def step(self, u): pass def obs(self, s): return s def render(self): pass def close(self): pass def run_actions_as_feats(self, actions, reset=True): s, u = self.run_actions(actions, reset=reset) x, y = self.features(s, actions) return u, s, x, y def features(self, s, us): us = np.atleast_2d(us) s = np.atleast_2d(s) x = np.hstack((s[:-1, :], us)) y = s[1:, :] - s[:-1, :] return x, y def obs_x(self, x): x = np.atleast_2d(x) xobs = np.hstack((self.obs(x[:, :self.s_dim]), x[:, self.s_dim:])) return xobs @property def x_bounds(self): if self._x_bounds is None: if self.o_lim is None: return None else: self._x_bounds = np.vstack((self.o_lim, self.a_lim)) return self._x_bounds @property def action_space(self): if self._action_space is None: self._action_space = Box(self.a_lim[:, 0], self.a_lim[:, 1], dtype=np.float64) return self._action_space @property def observation_space(self): if self._observation_space is None: self._observation_space = Box(-np.inf, np.inf, (self.o_dim,), dtype=np.float64) return self._observation_space @property def state(self): return self._state @state.setter def state(self, s): self._state = s @property def obs_state(self): return self.obs(self.state) @property def obs_x0(self): if self._x0_obs is None: self._x0_obs = self.obs(self.x0) return self._x0_obs @property def x_dim(self): return self.o_dim + self.a_dim @property def x_lim(self): return np.vstack((self.o_lim, self.a_lim)) def execute_rand(self, num_actions, reset=False): rand = np.random.random_sample((num_actions, self.a_dim)) upper = self.a_lim[:, 1] lower = self.a_lim[:, 0] dist_uplow = upper - lower a = rand * dist_uplow + lower _, s, x, y = self.run_actions_as_feats(a, reset=reset) return a, s, x, y def clip_actions(self, u): return np.clip(u, self.a_lim[:, 0], self.a_lim[:, 1]) def run_actions(self, u, repeat_infinite=False, plot=False, render=False, vr=None, reset=True): if repeat_infinite and not render: repeat_infinite = False if u.ndim == 1 or (np.shape(u)[-1] != 1 and self.a_dim == 1): u = np.expand_dims(u, axis=-1) if u.ndim == 2: u = np.expand_dims(u, axis=0) for ex in range(u.shape[0]): u[ex] = self.clip_actions(u[ex]) if reset: self.reset() if render: for _ in range(5): self.render() if vr is not None: vr.capture_frame() s = self.obs_state for i in range(u[ex].shape[0]): a = u[ex][i, :] self.step(a) if render: self.render() if vr is not None: vr.capture_frame() s = np.vstack((s, self.obs_state)) if plot: uflat = np.reshape(u, (-1, u.shape[-1])) self.plot(s, uflat, "Simulated") if repeat_infinite: return self.run_actions(u, repeat_infinite=repeat_infinite, plot=False) else: if render: self.close() return s, u def plot(self, o, a, title=None, varx=None, ax=None, postfix=""): if ax is None: axr = plt.figure() if title is not None: plt.title(title) else: axr = ax if a is not None: a = np.squeeze(a) if a.ndim == 1: a = a[:, None] for i in range(self.a_dim): ai = a[:, i] plt.plot(ai, label="a{}{}".format(i, postfix)) if o.ndim == 1: o = o[:, None] for i in range(self.o_dim): if self.o_labels is not None: lbl = self.o_labels[i] else: lbl = "x{}".format(i) self.var_plt(o[:, i], label=lbl + postfix, var=varx) if ax is None: plt.legend() plt.show() return axr
MIT License
maniacallabs/bibliopixelanimations
BiblioPixelAnimations/matrix/TicTacToe.py
Tic.available_combos
python
def available_combos(self, player): return self.available_moves() + self.get_squares(player)
what combos are available?
https://github.com/maniacallabs/bibliopixelanimations/blob/fba81f6b94f5265272a53f462ef013df1ccdb426/BiblioPixelAnimations/matrix/TicTacToe.py#L28-L30
import random from bibliopixel.animation.matrix import Matrix from bibliopixel.colors import COLORS class Tic: winning_combos = ( [0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]) winners = ('X-win', 'Draw', 'O-win') def __init__(self, squares=[]): if len(squares) == 0: self.clearBoard() else: self.squares = squares def clearBoard(self): self.squares = [None for i in range(9)] def available_moves(self): return [k for k, v in enumerate(self.squares) if v is None]
MIT License
okta/okta-sdk-python
okta/cache/cache.py
Cache.create_key
python
def create_key(self, request): url_object = urlparse(request) return url_object.geturl()
A method used to create a unique key for an entry in the cache. Used with URLs that requests fire at. Arguments: request {str} -- The key to use to produce a unique key Returns: str -- Unique key based on the input
https://github.com/okta/okta-sdk-python/blob/c86b8fdc4525e84199143c27213c0aebc6b2af8f/okta/cache/cache.py#L75-L88
from urllib.parse import urlparse class Cache(): def __init__(self): pass def get(self, key): raise NotImplementedError def contains(self, key): raise NotImplementedError def add(self, key, value): raise NotImplementedError def delete(self, key): raise NotImplementedError def clear(self): raise NotImplementedError
Apache License 2.0
hyperledger/avalon
sdk/avalon_sdk/work_order/work_order_params.py
WorkOrderParams.set_requester_id
python
def set_requester_id(self, requester_id): self.params_obj["requesterId"] = requester_id
Set requesterId work order parameter.
https://github.com/hyperledger/avalon/blob/cf762fd08b34cc3ba01aaec0143168c60c35d929/sdk/avalon_sdk/work_order/work_order_params.py#L130-L132
import json import logging import schema_validation.validate as WOcheck import avalon_crypto_utils.crypto_utility as crypto_utility import avalon_crypto_utils.worker_encryption as worker_encryption import avalon_crypto_utils.worker_signing as worker_signing import avalon_crypto_utils.worker_hash as worker_hash from error_code.error_status import WorkOrderStatus import utility.jrpc_utility as util logger = logging.getLogger(__name__) class WorkOrderParams(): def __init__(self): self.params_obj = {} self.signer = worker_signing.WorkerSign() self.encrypt = worker_encryption.WorkerEncrypt() self.hasher = worker_hash.WorkerHash() def create_request( self, work_order_id, worker_id, workload_id, requester_id, session_key, session_iv, requester_nonce, verifying_key=None, payload_format="JSON-RPC", response_timeout_msecs=6000, result_uri=None, notify_uri=None, worker_encryption_key=None, data_encryption_algorithm=None, encrypted_session_key=None): if work_order_id: self.set_work_order_id(work_order_id) self.set_response_timeout_msecs(response_timeout_msecs) self.set_payload_format(payload_format) self.set_requester_nonce(requester_nonce) self.session_key = session_key self.set_workload_id(workload_id) self.set_worker_id(worker_id) if requester_id is not None: self.set_requester_id(requester_id) if session_iv: self.set_session_key_iv( crypto_utility.byte_array_to_hex(session_iv)) if result_uri: self.set_result_uri(result_uri) if notify_uri: self.set_notify_uri(notify_uri) if worker_encryption_key: self.set_worker_encryption_key( worker_encryption_key) if data_encryption_algorithm: self.set_data_encryption_algorithm(data_encryption_algorithm) self.set_encrypted_session_key(encrypted_session_key) code, err_msg = WOcheck.schema_validation( "sdk_WorkOrderSubmit", self.params_obj) if not code: return util.create_error_response( WorkOrderStatus.INVALID_PARAMETER_FORMAT_OR_VALUE, 0, err_msg) self.set_worker_encryption_key( worker_encryption_key.encode("UTF-8").hex()) self.session_iv = session_iv self.params_obj["encryptedRequestHash"] = "" self.params_obj["requesterSignature"] = "" self.params_obj["inData"] = list() if encrypted_session_key is None: try: encrypted_session_key = self.encrypt.encrypt_session_key( session_key, worker_encryption_key) self.set_encrypted_session_key( crypto_utility.byte_array_to_hex(encrypted_session_key)) except Exception as err: return util.create_error_response( WorkOrderStatus.INVALID_PARAMETER_FORMAT_OR_VALUE, 0, err) return None def set_response_timeout_msecs(self, response_timeout_msecs): self.params_obj["responseTimeoutMSecs"] = response_timeout_msecs def set_payload_format(self, payload_format): self.params_obj["payloadFormat"] = payload_format def set_result_uri(self, result_uri): self.params_obj["resultUri"] = result_uri def set_notify_uri(self, notify_uri): self.params_obj["notifyUri"] = notify_uri def set_worker_id(self, worker_id): self.params_obj["workerId"] = worker_id def set_work_order_id(self, work_order_id): self.params_obj["workOrderId"] = work_order_id def set_workload_id(self, workload_id): self.params_obj["workloadId"] = workload_id
Apache License 2.0
demisto/demisto-py
demisto_client/demisto_api/models/task_loop.py
TaskLoop.script_id
python
def script_id(self, script_id): self._script_id = script_id
Sets the script_id of this TaskLoop. :param script_id: The script_id of this TaskLoop. # noqa: E501 :type: str
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/task_loop.py#L225-L233
import pprint import re import six from demisto_client.demisto_api.models.advance_arg import AdvanceArg from demisto_client.demisto_api.models.arg_filter import ArgFilter class TaskLoop(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'brand': 'str', 'builtin_condition': 'list[ArgFilter]', 'exit_condition': 'str', 'for_each': 'bool', 'is_command': 'bool', 'script_arguments': 'dict(str, AdvanceArg)', 'script_id': 'str', 'wait': 'int' } attribute_map = { 'brand': 'brand', 'builtin_condition': 'builtinCondition', 'exit_condition': 'exitCondition', 'for_each': 'forEach', 'is_command': 'isCommand', 'script_arguments': 'scriptArguments', 'script_id': 'scriptId', 'wait': 'wait' } def __init__(self, brand=None, builtin_condition=None, exit_condition=None, for_each=None, is_command=None, script_arguments=None, script_id=None, wait=None): self._brand = None self._builtin_condition = None self._exit_condition = None self._for_each = None self._is_command = None self._script_arguments = None self._script_id = None self._wait = None self.discriminator = None if brand is not None: self.brand = brand if builtin_condition is not None: self.builtin_condition = builtin_condition if exit_condition is not None: self.exit_condition = exit_condition if for_each is not None: self.for_each = for_each if is_command is not None: self.is_command = is_command if script_arguments is not None: self.script_arguments = script_arguments if script_id is not None: self.script_id = script_id if wait is not None: self.wait = wait @property def brand(self): return self._brand @brand.setter def brand(self, brand): self._brand = brand @property def builtin_condition(self): return self._builtin_condition @builtin_condition.setter def builtin_condition(self, builtin_condition): self._builtin_condition = builtin_condition @property def exit_condition(self): return self._exit_condition @exit_condition.setter def exit_condition(self, exit_condition): self._exit_condition = exit_condition @property def for_each(self): return self._for_each @for_each.setter def for_each(self, for_each): self._for_each = for_each @property def is_command(self): return self._is_command @is_command.setter def is_command(self, is_command): self._is_command = is_command @property def script_arguments(self): return self._script_arguments @script_arguments.setter def script_arguments(self, script_arguments): self._script_arguments = script_arguments @property def script_id(self): return self._script_id @script_id.setter
Apache License 2.0
ericssonresearch/calvin-base
calvin/utilities/confsort.py
Options.dict
python
def dict(self): optionsdict = {} for option in self.options: optionsdict[option.key] = option.value return optionsdict
Return unstructured dictionary with key, value of options.
https://github.com/ericssonresearch/calvin-base/blob/bc4645c2061c30ca305a660e48dc86e3317f5b6f/calvin/utilities/confsort.py#L40-L47
from operator import itemgetter, attrgetter, methodcaller class Options: def __init__(self): self.options = [] def insert(self, option): self.options.append(option) def __repr__(self): return repr(self.options)
Apache License 2.0
woudt/bunq2ifttt
app/bunq.py
session_request
python
def session_request(method, endpoint, config, data=None, extra_headers=None): result = request(method, endpoint, config, data, extra_headers) if isinstance(result, dict) and "Error" in result and result["Error"][0]["error_description"] in ["Insufficient authorisation.", "Insufficient authentication."]: refresh_session_token(config) result = request(method, endpoint, config, data, extra_headers) return result
Send a request, refreshing session keys if needed
https://github.com/woudt/bunq2ifttt/blob/de53ca03743b705c4f5149c756e0fd90d55231ee/app/bunq.py#L312-L320
import base64 import json import re import secrets import traceback import requests from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, hmac, serialization from cryptography.hazmat.primitives.asymmetric import padding, rsa from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes import storage NAME = "bunq2IFTTT" def get(endpoint, config={}): return session_request('GET', endpoint, config) def post(endpoint, data, config={}): return session_request('POST', endpoint, config, data) def put(endpoint, data, config={}): return session_request('PUT', endpoint, config, data) def delete(endpoint, config={}): return session_request('DELETE', endpoint, config) def install(token, name=NAME, allips=False, urlroot=None, mode=None): try: oldconfig = {} retrieve_config(oldconfig) config = {"access_token": token, "mode": mode} if "permissions" in oldconfig: config["permissions"] = oldconfig["permissions"] if "private_key" not in config: generate_key(config) install_key(config) register_token(config, name, allips) retrieve_userid(config) retrieve_accounts(config) save_config(config) if urlroot is not None: register_callback(config, urlroot) if urlroot is not None and "user_id" in oldconfig and oldconfig["user_id"] != config["user_id"]: unregister_callback(oldconfig) return config except: traceback.print_exc() raise def generate_key(config): print("[bunq] Generating new private key...") my_private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ) my_private_key_enc = str(my_private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ), encoding='ascii') my_public_key = my_private_key.public_key() my_public_key_enc = str(my_public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ), encoding='ascii') config["private_key"] = my_private_key config["private_key_enc"] = my_private_key_enc config["public_key"] = my_public_key config["public_key_enc"] = my_public_key_enc def install_key(config): print("[bunq] Installing key...") data = {"client_public_key": config["public_key_enc"]} result = post("v1/installation", data, config) install_token = result["Response"][1]["Token"]["token"] srv_key = result["Response"][2]["ServerPublicKey"]["server_public_key"] config["install_token"] = install_token config["server_key_enc"] = srv_key config["server_key"] = serialization.load_pem_public_key( srv_key.encode("ascii"), backend=default_backend()) def register_token(config, name, allips): print("[bunq] Registering token...") if allips: ips = ["*"] else: ips = [requests.get("https://api.ipify.org").text] data = {"description": name, "secret": config["access_token"], "permitted_ips": ips} post("v1/device-server", data, config) def retrieve_userid(config): print("[bunq] Retrieving userid...") result = get("v1/user", config) for user in result["Response"]: for typ in user: userid = user[typ]["id"] config["user_id"] = userid _TYPE_TRANSLATION = { "MonetaryAccountBank": "monetary-account-bank", "MonetaryAccountJoint": "monetary-account-joint", "MonetaryAccountSavings": "monetary-account-savings", } def retrieve_accounts(config): print("[bunq] Retrieving accounts...") config["accounts"] = [] result = get("v1/user/{}/monetary-account".format(config["user_id"]), config) for res in result["Response"]: for typ in res: acc = res[typ] type_url = _TYPE_TRANSLATION[typ] if acc["status"] == "ACTIVE": iban = None for alias in acc["alias"]: if alias["type"] == "IBAN": iban = alias["value"] name = alias["name"] accinfo = {"iban": iban, "name": name, "type": type_url, "id": acc["id"], "description": acc["description"]} config["accounts"].append(accinfo) def retrieve_account_balances(config): print("[bunq] Retrieving account balances...") result = get("v1/user/{}/monetary-account".format(config["user_id"]), config) response = {} for res in result["Response"]: for typ in res: acc = res[typ] type_url = _TYPE_TRANSLATION[typ] if acc["status"] == "ACTIVE": iban = None for alias in acc["alias"]: if alias["type"] == "IBAN": iban = alias["value"] response[iban] = float(acc["balance"]["value"]) return response def register_callback(config, urlroot): print("[bunq] Set notification filters...") post("v1/user/{}/notification-filter-url".format(config["user_id"]), { "notification_filters": [{ "category": "MUTATION", "notification_target": urlroot + "/bunq2ifttt_mutation" }, { "category": "REQUEST", "notification_target": urlroot + "/bunq2ifttt_request" }] }, config) def unregister_callback(config): print("[bunq] Removing old notification filters...") old = get("v1/user/{}/notification-filter-url".format(config["user_id"]), config) new = {"notification_filters": []} if "notification_filters" in old: for noti in old["notification_filters"]: if not noti["notification_target"].endswith("bunq2ifttt_mutation") and not noti["notification_target"].endswith("bunq2ifttt_request"): new["notification_filters"].append(noti) print("old: "+json.dumps(old)) print("new: "+json.dumps(new)) post("v1/user/{}/notification-filter-url".format(config["user_id"]), new, config) def save_config(config): tosave = {} for key in config: if isinstance(config[key], (str, int, float, dict, list)) or config[key] is None: tosave[key] = config[key] storage.store_large("bunq2IFTTT", "bunq_config", tosave) def retrieve_config(config={}): for key in list(config.keys()): del config[key] toload = storage.get_value("bunq2IFTTT", "bunq_config") if toload is not None: for key in toload: config[key] = toload[key] if "server_key_enc" in config: config["server_key"] = serialization.load_pem_public_key( config["server_key_enc"].encode("ascii"), backend=default_backend()) if "public_key_enc" in config: config["public_key"] = serialization.load_pem_public_key( config["public_key_enc"].encode("ascii"), backend=default_backend()) if "private_key_enc" in config: config["private_key"] = serialization.load_pem_private_key( config["private_key_enc"].encode("ascii"), password=None, backend=default_backend()) return config def get_session_token(config): if "private_key" not in config: retrieve_config(config) if "session_token" not in config: refresh_session_token(config) return config["session_token"] def get_access_token(config): if "access_token" not in config: retrieve_config(config) return config["access_token"] def get_install_token(config): if "install_token" not in config: retrieve_config(config) return config["install_token"] def get_server_key(config): if "server_key" not in config: retrieve_config(config) return config["server_key"] def get_private_key(config): if "private_key" not in config: retrieve_config(config) return config["private_key"] def get_public_key(config): if "public_key" not in config: retrieve_config(config) return config["public_key"]
MIT License
psiq/gdsfactory
pp/components/rectangle.py
rectangle
python
def rectangle( size: Tuple[float, float] = (4.0, 2.0), layer: Tuple[int, int] = pp.LAYER.WG, centered: bool = False, ports_parameters: Dict[str, List[Tuple[float, float]]] = {}, **port_settings ) -> Component: c = pp.Component() w, h = size if centered: points = [ [-w / 2.0, -h / 2.0], [-w / 2.0, h / 2], [w / 2, h / 2], [w / 2, -h / 2.0], ] else: points = [[w, h], [w, 0], [0, 0], [0, h]] c.add_polygon(points, layer=layer) i = 0 for direction, list_port_params in ports_parameters.items(): angle = DIRECTION_TO_ANGLE[direction] for x_or_y, width in list_port_params: if direction == "W": position = (0, x_or_y) elif direction == "E": position = (w, x_or_y) elif direction == "S": position = (x_or_y, 0) elif direction == "N": position = (x_or_y, h) c.add_port( name="{}".format(i), orientation=angle, midpoint=position, width=width, layer=layer, **port_settings ) i += 1 pp.port.auto_rename_ports(c) return c
rectangle Args: size: (tuple) Width and height of rectangle. layer: (int, array-like[2], or set) Specific layer(s) to put polygon geometry on. ports: {direction: [(x_or_y, width), ...]} direction: 'W', 'E', 'N' or 'S' .. plot:: :include-source: import pp c = pp.c.rectangle(size=(4, 2), layer=0) pp.plotgds(c)
https://github.com/psiq/gdsfactory/blob/34c8ecbed465e8eda0d5116687fd02e95e530f35/pp/components/rectangle.py#L9-L73
import pp from pp.component import Component from typing import Dict, List, Tuple DIRECTION_TO_ANGLE = {"W": 180, "E": 0, "N": 90, "S": 270} @pp.autoname
MIT License
pypa/twine
twine/utils.py
get_userpass_value
python
def get_userpass_value( cli_value: Optional[str], config: RepositoryConfig, key: str, prompt_strategy: Optional[Callable[[], str]] = None, ) -> Optional[str]: if cli_value is not None: logger.info(f"{key} set by command options") return cli_value elif config.get(key) is not None: logger.info(f"{key} set from config file") return config[key] elif prompt_strategy: warning = "" value = prompt_strategy() if not value: warning = f"Your {key} is empty" elif any(unicodedata.category(c).startswith("C") for c in value): warning = f"Your {key} contains control characters" if warning: logger.warning(f" {warning}. Did you enter it correctly?") logger.warning( " See https://twine.readthedocs.io/#entering-credentials " "for more information." ) return value else: return None
Get a credential (e.g. a username or password) from the configuration. Uses the following rules: 1. If ``cli_value`` is specified, use that. 2. If ``config[key]`` is specified, use that. 3. If ``prompt_strategy`` is specified, use its return value. 4. Otherwise return ``None`` :param cli_value: The value supplied from the command line. :param config: A dictionary of repository configuration values. :param key: The credential to look up in ``config``, e.g. ``"username"`` or ``"password"``. :param prompt_strategy: An argumentless function to get the value, e.g. from keyring or by prompting the user. :return: The credential value, i.e. the username or password.
https://github.com/pypa/twine/blob/658037f05898b4dc96113628153aa2691b0dbfa3/twine/utils.py#L213-L270
import argparse import collections import configparser import functools import logging import os import os.path import unicodedata from typing import Any, Callable, DefaultDict, Dict, Optional, Sequence, Union from urllib.parse import urlparse from urllib.parse import urlunparse import requests import rfc3986 from twine import exceptions input_func = input DEFAULT_REPOSITORY = "https://upload.pypi.org/legacy/" TEST_REPOSITORY = "https://test.pypi.org/legacy/" DEFAULT_CONFIG_FILE = "~/.pypirc" RepositoryConfig = Dict[str, Optional[str]] logger = logging.getLogger(__name__) def get_config(path: str) -> Dict[str, RepositoryConfig]: realpath = os.path.realpath(os.path.expanduser(path)) parser = configparser.RawConfigParser() try: with open(realpath) as f: parser.read_file(f) logger.info(f"Using configuration from {realpath}") except FileNotFoundError: if path != DEFAULT_CONFIG_FILE: raise defaults: RepositoryConfig = { "username": parser.get("server-login", "username", fallback=None), "password": parser.get("server-login", "password", fallback=None), } config: DefaultDict[str, RepositoryConfig] config = collections.defaultdict(lambda: defaults.copy()) index_servers = parser.get( "distutils", "index-servers", fallback="pypi testpypi" ).split() config["pypi"]["repository"] = DEFAULT_REPOSITORY if "testpypi" in index_servers: config["testpypi"]["repository"] = TEST_REPOSITORY for repository in index_servers: for key in [ "username", "repository", "password", "ca_cert", "client_cert", ]: if parser.has_option(repository, key): config[repository][key] = parser.get(repository, key) return dict(config) def _validate_repository_url(repository_url: str) -> None: validator = ( rfc3986.validators.Validator() .allow_schemes("http", "https") .require_presence_of("scheme", "host") ) try: validator.validate(rfc3986.uri_reference(repository_url)) except rfc3986.exceptions.RFC3986Exception as exc: raise exceptions.UnreachableRepositoryURLDetected( f"Invalid repository URL: {exc.args[0]}." ) def get_repository_from_config( config_file: str, repository: str, repository_url: Optional[str] = None, ) -> RepositoryConfig: if repository_url: _validate_repository_url(repository_url) return { "repository": repository_url, "username": None, "password": None, } try: return get_config(config_file)[repository] except OSError as exc: raise exceptions.InvalidConfiguration(str(exc)) except KeyError: raise exceptions.InvalidConfiguration( f"Missing '{repository}' section from {config_file}.\n" f"More info: https://packaging.python.org/specifications/pypirc/ " ) _HOSTNAMES = { "pypi.python.org", "testpypi.python.org", "upload.pypi.org", "test.pypi.org", } def normalize_repository_url(url: str) -> str: parsed = urlparse(url) if parsed.netloc in _HOSTNAMES: return urlunparse(("https",) + parsed[1:]) return urlunparse(parsed) def get_file_size(filename: str) -> str: file_size = os.path.getsize(filename) / 1024 size_unit = "KB" if file_size > 1024: file_size = file_size / 1024 size_unit = "MB" return f"{file_size:.1f} {size_unit}" def check_status_code(response: requests.Response, verbose: bool) -> None: if response.status_code == 410 and "pypi.python.org" in response.url: raise exceptions.UploadToDeprecatedPyPIDetected( f"It appears you're uploading to pypi.python.org (or " f"testpypi.python.org). You've received a 410 error response. " f"Uploading to those sites is deprecated. The new sites are " f"pypi.org and test.pypi.org. Try using {DEFAULT_REPOSITORY} (or " f"{TEST_REPOSITORY}) to upload your packages instead. These are " f"the default URLs for Twine now. More at " f"https://packaging.python.org/guides/migrating-to-pypi-org/." ) elif response.status_code == 405 and "pypi.org" in response.url: raise exceptions.InvalidPyPIUploadURL( f"It appears you're trying to upload to pypi.org but have an " f"invalid URL. You probably want one of these two URLs: " f"{DEFAULT_REPOSITORY} or {TEST_REPOSITORY}. Check your " f"--repository-url value." ) try: response.raise_for_status() except requests.HTTPError as err: if not verbose: logger.warning( "Error during upload. " "Retry with the --verbose option for more details." ) if response.text: logger.info("Content received from server:\n{}".format(response.text)) raise err
Apache License 2.0
googleapis/python-bigtable
google/cloud/bigtable/backup.py
Backup.update_expire_time
python
def update_expire_time(self, new_expire_time): backup_update = table.Backup( name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api = self._instance._client.table_admin_client api.update_backup(request={"backup": backup_update, "update_mask": update_mask}) self._expire_time = new_expire_time
Update the expire time of this Backup. :type new_expire_time: :class:`datetime.datetime` :param new_expire_time: the new expiration time timestamp
https://github.com/googleapis/python-bigtable/blob/a99bf88417d6aec03923447c70c2752f6bb5c459/google/cloud/bigtable/backup.py#L378-L390
import re from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 _BACKUP_NAME_RE = re.compile( r"^projects/(?P<project>[^/]+)/" r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/" r"clusters/(?P<cluster_id>[a-z][-a-z0-9]*)/" r"backups/(?P<backup_id>[a-z][a-z0-9_\-]*[a-z0-9])$" ) _TABLE_NAME_RE = re.compile( r"^projects/(?P<project>[^/]+)/" r"instances/(?P<instance_id>[a-z][-a-z0-9]*)/" r"tables/(?P<table_id>[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" ) class Backup(object): def __init__( self, backup_id, instance, cluster_id=None, table_id=None, expire_time=None, encryption_info=None, ): self.backup_id = backup_id self._instance = instance self._cluster = cluster_id self.table_id = table_id self._expire_time = expire_time self._encryption_info = encryption_info self._parent = None self._source_table = None self._start_time = None self._end_time = None self._size_bytes = None self._state = None @property def name(self): if not self._cluster: raise ValueError('"cluster" parameter must be set') return BigtableTableAdminClient.backup_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, backup=self.backup_id, ) @property def cluster(self): return self._cluster @cluster.setter def cluster(self, cluster_id): self._cluster = cluster_id @property def parent(self): if not self._parent and self._cluster: self._parent = BigtableTableAdminClient.cluster_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, ) return self._parent @property def source_table(self): if not self._source_table and self.table_id: self._source_table = BigtableTableAdminClient.table_path( project=self._instance._client.project, instance=self._instance.instance_id, table=self.table_id, ) return self._source_table @property def expire_time(self): return self._expire_time @expire_time.setter def expire_time(self, new_expire_time): self._expire_time = new_expire_time @property def encryption_info(self): return self._encryption_info @property def start_time(self): return self._start_time @property def end_time(self): return self._end_time @property def size_bytes(self): return self._size_bytes @property def state(self): return self._state @classmethod def from_pb(cls, backup_pb, instance): match = _BACKUP_NAME_RE.match(backup_pb.name) if match is None: raise ValueError( "Backup protobuf name was not in the expected format.", backup_pb.name ) if match.group("project") != instance._client.project: raise ValueError( "Project ID of the Backup does not match the Project ID " "of the instance's client" ) instance_id = match.group("instance_id") if instance_id != instance.instance_id: raise ValueError( "Instance ID of the Backup does not match the Instance ID " "of the instance" ) backup_id = match.group("backup_id") cluster_id = match.group("cluster_id") match = _TABLE_NAME_RE.match(backup_pb.source_table) table_id = match.group("table_id") if match else None expire_time = backup_pb._pb.expire_time encryption_info = EncryptionInfo._from_pb(backup_pb.encryption_info) backup = cls( backup_id, instance, cluster_id=cluster_id, table_id=table_id, expire_time=expire_time, encryption_info=encryption_info, ) backup._start_time = backup_pb._pb.start_time backup._end_time = backup_pb._pb.end_time backup._size_bytes = backup_pb._pb.size_bytes backup._state = backup_pb._pb.state return backup def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return other.backup_id == self.backup_id and other._instance == self._instance def __ne__(self, other): return not self == other def create(self, cluster_id=None): if not self._expire_time: raise ValueError('"expire_time" parameter must be set') if not self.table_id: raise ValueError('"table" parameter must be set') if cluster_id: self._cluster = cluster_id if not self._cluster: raise ValueError('"cluster" parameter must be set') backup = table.Backup( source_table=self.source_table, expire_time=_datetime_to_pb_timestamp(self.expire_time), ) api = self._instance._client.table_admin_client return api.create_backup( request={ "parent": self.parent, "backup_id": self.backup_id, "backup": backup, } ) def get(self): api = self._instance._client.table_admin_client try: return api.get_backup(request={"name": self.name}) except NotFound: return None def reload(self): backup = self.get() self._source_table = backup.source_table self._expire_time = backup._pb.expire_time self._start_time = backup._pb.start_time self._end_time = backup._pb.end_time self._size_bytes = backup._pb.size_bytes self._state = backup._pb.state def exists(self): return self.get() is not None
Apache License 2.0
derkarnold/pylifx
pylifx/networking.py
LifxSocket.recv
python
def recv(self): while True: raw_data, addr = self._socket.recvfrom(_RECV_BUFFER_SIZE) if raw_data == None or len(raw_data) == 0: raise IOError('disconnected') try: return decode(raw_data), addr except Exception as e: print 'Invalid packet from', self._net_addr, '-', e
Returns a tuple of ((method, args), addr)
https://github.com/derkarnold/pylifx/blob/ae69511728f5316872d0441608b84d4484c73244/pylifx/networking.py#L101-L112
from __future__ import absolute_import from socket import socket, AF_INET, SOCK_DGRAM, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR, SO_BROADCAST, error from .packet import encode, decode from re import match from thread import start_new_thread from netifaces import ifaddresses, interfaces _RECV_BUFFER_SIZE = 1024 _LIFX_PROTO_TOBULB = 13312 _LIFX_PROTO_ASBULB = 21504 _BLANK_MAC = '00:00:00:00:00:00' _MAC_ADDR_FORMAT = '([A-Fa-f0-9]{2})[:\-]?([A-Fa-f0-9]{2})[:\-]?([A-Fa-f0-9]{2})[:\-]?([A-Fa-f0-9]{2})[:\-]?([A-Fa-f0-9]{2})[:\-]?([A-Fa-f0-9]{2})' _AVAILABLE_INTERFACES = {} for intf_name in interfaces(): addrs = ifaddresses(intf_name) if addrs.has_key(AF_INET): for addr in addrs[AF_INET]: if addr.has_key('broadcast'): _AVAILABLE_INTERFACES[intf_name] = addr break def get_interfaces(): return _AVAILABLE_INTERFACES def get_interface(intf_name): if intf_name is None: return _AVAILABLE_INTERFACES.itervalues().next() else: return _AVAILABLE_INTERFACES[intf_name] def processMAC(mac): if mac is None: mac = _BLANK_MAC m = match(_MAC_ADDR_FORMAT, mac) if m is None: raise ValueError('invalid MAC address:', mac, '. Address may be colon or hyphen delimited.') else: return ''.join(m.groups()) class LifxSocket(object): def __init__(self, site_addr, bulb_addr, sock, net_addr): self._site_addr = processMAC(site_addr) self._bulb_addr = processMAC(bulb_addr) self._socket = sock self._net_addr = net_addr self._socket.settimeout(1.0) def __del__(self): self.close() def __str__(self): return str(self._net_addr) def __repr__(self): return self.__str__() def close(self): if self._socket is not None: self._socket.close() self._socket = None def send_to_bulb(self, packet_name, **kwargs): self._send(_LIFX_PROTO_TOBULB, packet_name, kwargs) def send_as_bulb(self, packet_name, **kwargs): self._send(_LIFX_PROTO_ASBULB, packet_name, kwargs)
BSD 2-Clause Simplified License
autodesk/aomi
aomi/model/context.py
ensure_backend
python
def ensure_backend(resource, backend, backends, opt, managed=True): existing_mount = find_backend(resource.mount, backends) if not existing_mount: new_mount = backend(resource, opt, managed=managed) backends.append(new_mount) return new_mount return existing_mount
Ensure the backend for a resource is properly in context
https://github.com/autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/context.py#L59-L67
import sys import inspect import logging from future.utils import iteritems from aomi.helpers import normalize_vault_path import aomi.exceptions as aomi_excep from aomi.model.resource import Resource, Mount, Secret, Auth, AuditLog from aomi.model.aws import AWS from aomi.model.auth import Policy, UserPass, LDAP from aomi.model.backend import LogBackend, AuthBackend, SecretBackend LOG = logging.getLogger(__name__) def filtered_context(context): ctx = Context(context.opt) for resource in context.resources(): if resource.child: continue if resource.filtered(): ctx.add(resource) return ctx def childless_first(resource): return resource.child def absent_sort(resource): return resource.present def find_backend(path, backends): for backend in backends: if backend.path == path: return backend return None
MIT License
ayoolaolafenwa/pixellib
pixellib/torchbackend/instance/modeling/backbone/regnet.py
gap2d
python
def gap2d(): return nn.AdaptiveAvgPool2d((1, 1))
Helper for building a global average pooling layer.
https://github.com/ayoolaolafenwa/pixellib/blob/ae56003c416a98780141a1170c9d888fe9a31317/pixellib/torchbackend/instance/modeling/backbone/regnet.py#L38-L40
import numpy as np from torch import nn from pixellib.torchbackend.instance.layers.blocks import CNNBlockBase from pixellib.torchbackend.instance.layers.batch_norm import get_norm from pixellib.torchbackend.instance.layers.shape_spec import ShapeSpec from .backbone import Backbone __all__ = [ "AnyNet", "RegNet", "ResStem", "SimpleStem", "VanillaBlock", "ResBasicBlock", "ResBottleneckBlock", ] def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False): assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues." s, p, g, b = stride, (k - 1) // 2, groups, bias return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/analytics_error_detail.py
AnalyticsErrorDetail.message
python
def message(self): return self._message
Gets the message of this AnalyticsErrorDetail. Error message :return: The message of this AnalyticsErrorDetail. :rtype: string_types
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/analytics_error_detail.py#L192-L201
from enum import Enum from datetime import datetime from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.analytics_error_data import AnalyticsErrorData import pprint import six class AnalyticsErrorDetail(object): @poscheck_model def __init__(self, error_id=None, time=None, client_time=None, code=None, message=None, error_data=None, http_requests=None): self._error_id = None self._time = None self._client_time = None self._code = None self._message = None self._error_data = None self._http_requests = list() self.discriminator = None if error_id is not None: self.error_id = error_id if time is not None: self.time = time if client_time is not None: self.client_time = client_time if code is not None: self.code = code if message is not None: self.message = message if error_data is not None: self.error_data = error_data if http_requests is not None: self.http_requests = http_requests @property def openapi_types(self): types = { 'error_id': 'int', 'time': 'datetime', 'client_time': 'datetime', 'code': 'int', 'message': 'string_types', 'error_data': 'AnalyticsErrorData', 'http_requests': 'list[AnalyticsHttpRequest]' } return types @property def attribute_map(self): attributes = { 'error_id': 'errorId', 'time': 'time', 'client_time': 'clientTime', 'code': 'code', 'message': 'message', 'error_data': 'errorData', 'http_requests': 'httpRequests' } return attributes @property def error_id(self): return self._error_id @error_id.setter def error_id(self, error_id): if error_id is not None: if not isinstance(error_id, int): raise TypeError("Invalid type for `error_id`, type has to be `int`") self._error_id = error_id @property def time(self): return self._time @time.setter def time(self, time): if time is not None: if not isinstance(time, datetime): raise TypeError("Invalid type for `time`, type has to be `datetime`") self._time = time @property def client_time(self): return self._client_time @client_time.setter def client_time(self, client_time): if client_time is not None: if not isinstance(client_time, datetime): raise TypeError("Invalid type for `client_time`, type has to be `datetime`") self._client_time = client_time @property def code(self): return self._code @code.setter def code(self, code): if code is not None: if not isinstance(code, int): raise TypeError("Invalid type for `code`, type has to be `int`") self._code = code @property
MIT License
skelsec/pydesfire
pyDESFire/pydesfire.py
Desfire.ChangeKeySettings
python
def ChangeKeySettings(self, newKeySettings): self.logger.debug('Changing key settings to %s' %('|'.join(a.name for a in newKeySettings),)) params = int2hex(calc_key_settings(newKeySettings)) cmd = DESFireCommand.DF_INS_CHANGE_KEY_SETTINGS.value raw_data = self.communicate(cmd)
Changes key settings for the key that was used to authenticate with in the current session. Authentication is ALWAYS needed to call this function. Args: newKeySettings (list) : A list with DESFireKeySettings enum value Returns: None
https://github.com/skelsec/pydesfire/blob/59b67eef5d3170a295eccc20cd01de5ecf3bcf52/pyDESFire/pydesfire.py#L853-L866
from enum import Enum import logging import struct from .readers import PCSCReader, DummyReader from .cards import SmartCardTypes, SmartCard from .utils import * from Crypto.Cipher import DES, DES3, AES from Crypto import Random _logger = logging.getLogger(__name__) class DESFireCommand(Enum): DF_INS_AUTHENTICATE_LEGACY = '\x0A' DF_INS_CHANGE_KEY_SETTINGS = '\x54' DF_INS_GET_KEY_SETTINGS = '\x45' DF_INS_CHANGE_KEY = '\xC4' DF_INS_GET_KEY_VERSION = '\x64' DF_INS_CREATE_APPLICATION = '\xCA' DF_INS_DELETE_APPLICATION = '\xDA' DF_INS_GET_APPLICATION_IDS = '\x6A' DF_INS_SELECT_APPLICATION = '\x5A' DF_INS_FORMAT_PICC = '\xFC' DF_INS_GET_VERSION = '\x60' DF_INS_GET_FILE_IDS = '\x6F' DF_INS_GET_FILE_SETTINGS = '\xF5' DF_INS_CHANGE_FILE_SETTINGS = '\x5F' DF_INS_CREATE_STD_DATA_FILE = '\xCD' DF_INS_CREATE_BACKUP_DATA_FILE = '\xCB' DF_INS_CREATE_VALUE_FILE = '\xCC' DF_INS_CREATE_LINEAR_RECORD_FILE = '\xC1' DF_INS_CREATE_CYCLIC_RECORD_FILE = '\xC0' DF_INS_DELETE_FILE = '\xDF' DF_INS_READ_DATA = '\xBD' DF_INS_WRITE_DATA = '\x3D' DF_INS_GET_VALUE = '\x6C' DF_INS_CREDIT = '\x0C' DF_INS_DEBIT = '\xDC' DF_INS_LIMITED_CREDIT = '\x1C' DF_INS_WRITE_RECORD = '\x3B' DF_INS_READ_RECORDS = '\xBB' DF_INS_CLEAR_RECORD_FILE = '\xEB' DF_COMMIT_TRANSACTION = '\xC7' DF_INS_ABORT_TRANSACTION = '\xA7' DF_INS_ADDITIONAL_FRAME = '\xAF' DFEV1_INS_AUTHENTICATE_ISO ='\x1A' DFEV1_INS_AUTHENTICATE_AES ='\xAA' DFEV1_INS_FREE_MEM ='\x6E' DFEV1_INS_GET_DF_NAMES ='\x6D' DFEV1_INS_GET_CARD_UID ='\x51' DFEV1_INS_GET_ISO_FILE_IDS ='\x61' DFEV1_INS_SET_CONFIGURATION ='\x5C' ISO7816_INS_EXTERNAL_AUTHENTICATE ='\x82' ISO7816_INS_INTERNAL_AUTHENTICATE ='\x88' ISO7816_INS_APPEND_RECORD ='\xE2' ISO7816_INS_GET_CHALLENGE ='\x84' ISO7816_INS_READ_RECORDS ='\xB2' ISO7816_INS_SELECT_FILE ='\xA4' ISO7816_INS_READ_BINARY ='\xB0' ISO7816_INS_UPDATE_BINARY ='\xD6' class DESFireStatus(Enum): ST_Success = '\x00' ST_NoChanges = '\x0C' ST_OutOfMemory = '\x0E' ST_IllegalCommand = '\x1C' ST_IntegrityError = '\x1E' ST_KeyDoesNotExist = '\x40' ST_WrongCommandLen = '\x7E' ST_PermissionDenied = '\x9D' ST_IncorrectParam = '\x9E' ST_AppNotFound = '\xA0' ST_AppIntegrityError = '\xA1' ST_AuthentError = '\xAE' ST_MoreFrames = '\xAF' ST_LimitExceeded = '\xBE' ST_CardIntegrityError = '\xC1' ST_CommandAborted = '\xCA' ST_CardDisabled = '\xCD' ST_InvalidApp = '\xCE' ST_DuplicateAidFiles = '\xDE' ST_EepromError = '\xEE' ST_FileNotFound = '\xF0' ST_FileIntegrityError = '\xF1' class DESFireCardVersion(): def __init__(self): self.hardwareVendorId = None self.hardwareType = None self.hardwareSubType = None self.hardwareMajVersion = None self.hardwareMinVersion = None self.hardwareStorageSize= None self.hardwareProtocol = None self.softwareVendorId = None self.softwareType = None self.softwareSubType = None self.softwareMajVersion = None self.softwareMinVersion = None self.softwareStorageSize= None self.softwareProtocol = None self.UID = None self.batchNo = None self.cwProd = None self.yearProd = None self.rawBytes = None def parse(self, data): self.rawBytes = data self.hardwareVendorId = data[0] self.hardwareType = data[1] self.hardwareSubType = data[2] self.hardwareMajVersion = data[3] self.hardwareMinVersion = data[4] self.hardwareStorageSize = data[5] self.hardwareProtocol = data[6] self.softwareVendorId = data[7] self.softwareType = data[8] self.softwareSubType = data[9] self.softwareMajVersion = data[10] self.softwareMinVersion = data[11] self.softwareStorageSize = data[12] self.softwareProtocol = data[13] self.UID = data[14:21] self.batchNo = data[21:25] self.cwProd = data[26] self.yearProd = data[27] def __repr__(self): temp = "--- Desfire Card Details ---\r\n" temp += "Hardware Version: %d.%d\r\n"% (hex2int(self.hardwareMajVersion), hex2int(self.hardwareMinVersion)) temp += "Software Version: %d.%d\r\n"% (hex2int(self.softwareMajVersion), hex2int(self.softwareMinVersion)) temp += "EEPROM size: %d bytes\r\n"% (1 << ((hex2int(self.hardwareStorageSize) / 2))) temp += "Production : week %X, year 20%02X\r\n" % (hex2int(self.cwProd), hex2int(self.yearProd)) temp += "UID no : %s\r\n" % (hex2hexstr(self.UID),) temp += "Batch no: %s\r\n" % (hex2hexstr(self.batchNo),) return temp def toDict(self): temp = {} temp['rawBytes'] = self.rawBytes.encode('hex') temp['hardwareVendorId'] = self.hardwareVendorId.encode('hex') temp['hardwareType'] = self.hardwareType.encode('hex') temp['hardwareSubType'] = self.hardwareSubType.encode('hex') temp['hardwareMajVersion'] = self.hardwareMajVersion.encode('hex') temp['hardwareMinVersion'] = self.hardwareMinVersion.encode('hex') temp['hardwareStorageSize'] = self.hardwareStorageSize.encode('hex') temp['hardwareProtocol'] = self.hardwareProtocol.encode('hex') temp['softwareVendorId'] = self.softwareVendorId.encode('hex') temp['softwareType'] = self.softwareType.encode('hex') temp['softwareSubType'] = self.softwareSubType.encode('hex') temp['softwareMajVersion'] = self.softwareMajVersion.encode('hex') temp['softwareMinVersion'] = self.softwareMinVersion.encode('hex') temp['softwareStorageSize'] = self.softwareStorageSize.encode('hex') temp['softwareProtocol'] = self.softwareProtocol.encode('hex') temp['UID'] = self.UID.encode('hex') temp['batchNo'] = self.batchNo.encode('hex') temp['cwProd'] = self.cwProd.encode('hex') temp['yearProd'] = self.yearProd.encode('hex') return temp class DESFireKeySettings(Enum): KS_ALLOW_CHANGE_MK = 0x01 KS_LISTING_WITHOUT_MK = 0x02 KS_CREATE_DELETE_WITHOUT_MK = 0x04 KS_CONFIGURATION_CHANGEABLE = 0x08 KS_CHANGE_KEY_WITH_MK = 0x00 KS_CHANGE_KEY_WITH_KEY_1 = 0x10 KS_CHANGE_KEY_WITH_KEY_2 = 0x20 KS_CHANGE_KEY_WITH_KEY_3 = 0x30 KS_CHANGE_KEY_WITH_KEY_4 = 0x40 KS_CHANGE_KEY_WITH_KEY_5 = 0x50 KS_CHANGE_KEY_WITH_KEY_6 = 0x60 KS_CHANGE_KEY_WITH_KEY_7 = 0x70 KS_CHANGE_KEY_WITH_KEY_8 = 0x80 KS_CHANGE_KEY_WITH_KEY_9 = 0x90 KS_CHANGE_KEY_WITH_KEY_A = 0xA0 KS_CHANGE_KEY_WITH_KEY_B = 0xB0 KS_CHANGE_KEY_WITH_KEY_C = 0xC0 KS_CHANGE_KEY_WITH_KEY_D = 0xD0 KS_CHANGE_KEY_WITH_TARGETED_KEY = 0xE0 KS_CHANGE_KEY_FROZEN = 0xF0 KS_FACTORY_DEFAULT = 0x0F def calc_key_settings(mask): if type(mask) is list: res = 0 for keysetting in mask: res += keysetting.value return res & 0xFF a=2147483648L result = [] while a>>1: a = a>>1 masked = mask&a if masked: if DESFireKeySettings(masked): result.append(DESFireKeySettings(masked)) return result class DESFireAccessRights(Enum): AR_KEY0 = 0x00 AR_KEY1 = 0x01 AR_KEY2 = 0x02 AR_KEY3 = 0x03 AR_KEY4 = 0x04 AR_KEY5 = 0x05 AR_KEY6 = 0x06 AR_KEY7 = 0x07 AR_KEY8 = 0x08 AR_KEY9 = 0x09 AR_KEY10 = 0x0A AR_KEY11 = 0x0B AR_KEY12 = 0x0C AR_KEY13 = 0x0D AR_FREE = 0x0E AR_NEVER = 0x0F class DESFireFilePermissions(): def __init__(self): self.ReadAccess = None self.WriteAccess = None self.ReadAndWriteAccess = None self.ChangeAccess = None def pack(self): return (self.ReadAccess << 12) | (self.WriteAccess << 8) | (self.ReadAndWriteAccess << 4) | self.ChangeAccess; def unpack(self, data): self.ReadAccess = bool((data >> 12) & 0x0F) self.WriteAccess = bool((data >> 8) & 0x0F) self.ReadAndWriteAccess = bool((data >> 4) & 0x0F) self.ChangeAccess = bool((data ) & 0x0F) def __repr__(self): temp = '----- DESFireFilePermissions ---\r\n' if self.ReadAccess: temp += 'READ|' if self.WriteAccess: temp += 'WRITE|' if self.ReadAndWriteAccess: temp += 'READWRITE|' if self.ReadAndWriteAccess: temp += 'CHANGE|' return temp def toDict(self): temp = {} temp['ReadAccess'] = self.ReadAccess temp['WriteAccess'] = self.WriteAccess temp['ReadAndWriteAccess'] = self.ReadAndWriteAccess temp['ChangeAccess'] = self.ChangeAccess return temp class DESFireFileEncryption(Enum): CM_PLAIN = 0x00 CM_MAC = 0x01 CM_ENCRYPT = 0x03 class DESFireFileType(Enum): MDFT_STANDARD_DATA_FILE = 0x00 MDFT_BACKUP_DATA_FILE = 0x01 MDFT_VALUE_FILE_WITH_BACKUP = 0x02 MDFT_LINEAR_RECORD_FILE_WITH_BACKUP = 0x03 MDFT_CYCLIC_RECORD_FILE_WITH_BACKUP = 0x04 class DESFireFileSettings: def __init__(self): self.FileType = None self.Encryption = None self.Permissions = DESFireFilePermissions() self.FileSize = None self.LowerLimit = None self.UpperLimit = None self.LimitedCreditValue = None self.LimitedCreditEnabled = None self.RecordSize = None self.MaxNumberRecords = None self.CurrentNumberRecords = None def parse(self, data): self.FileType = DESFireFileType(hex2int(data[0])) self.Encryption = DESFireFileEncryption(hex2int(data[1])) self.Permissions.unpack(struct.unpack('>H',data[2:4])[0]) if self.FileType == DESFireFileType.MDFT_LINEAR_RECORD_FILE_WITH_BACKUP: self.RecordSize = struct.unpack('<I', data[4:6] + '\x00\x00')[0] self.MaxNumberRecords = struct.unpack('<I', data[6:8] + '\x00\x00')[0] self.CurrentNumberRecords = struct.unpack('<I', data[8:10] + '\x00\x00')[0] elif self.FileType == DESFireFileType.MDFT_STANDARD_DATA_FILE: self.FileSize = struct.unpack('<I', data[4:6] + '\x00\x00')[0] else: pass def __repr__(self): temp = ' ----- DESFireFileSettings ----\r\n' temp += 'File type: %s\r\n' % (self.FileType.name) temp += 'Encryption: %s\r\n' % (self.Encryption.name) temp += 'Permissions: %s\r\n' % (repr(self.Permissions)) if self.FileType == DESFireFileType.MDFT_LINEAR_RECORD_FILE_WITH_BACKUP: temp += 'RecordSize: %d\r\n' % (self.RecordSize) temp += 'MaxNumberRecords: %d\r\n' % (self.MaxNumberRecords) temp += 'CurrentNumberRecords: %d\r\n' % (self.CurrentNumberRecords) elif self.FileType == DESFireFileType.MDFT_STANDARD_DATA_FILE: temp += 'File size: %d\r\n' % (self.FileSize) return temp def toDict(self): temp = {} temp['FileType'] = self.FileType.name temp['Encryption'] = self.Encryption.name temp['Permissions'] = self.Permissions.toDict() temp['LowerLimit'] = self.LowerLimit temp['UpperLimit'] = self.UpperLimit temp['LimitedCreditValue'] = self.LimitedCreditValue temp['LimitedCreditEnabled'] = self.LimitedCreditEnabled if self.FileType == DESFireFileType.MDFT_LINEAR_RECORD_FILE_WITH_BACKUP: temp['RecordSize'] = self.RecordSize temp['MaxNumberRecords'] = self.MaxNumberRecords temp['CurrentNumberRecords'] = self.CurrentNumberRecords elif self.FileType == DESFireFileType.MDFT_STANDARD_DATA_FILE: temp['FileSize'] = self.FileSize return temp class DESFireCmac(Enum): MAC_None = 0 MAC_Tmac = 1 MAC_Tcrypt = 2 MAC_Rmac = 4 MAC_Rcrypt = 8 MAC_TmacRmac = MAC_Tmac | MAC_Rmac MAC_TmacRcrypt = MAC_Tmac | MAC_Rcrypt MAC_TcryptRmac = MAC_Tcrypt | MAC_Rmac class DESFireKeyType(Enum): DF_KEY_2K3DES = 0x00 DF_KEY_3K3DES = 0x40 DF_KEY_AES = 0x80 DF_KEY_INVALID = 0xFF class DESFireCipher(Enum): KEY_ENCIPHER = 0 KEY_DECIPHER = 1 class DESFireCBC(Enum): CBC_SEND = 0 CBC_RECEIVE = 1 class DESFireException(Exception): def __init__(self, msg): super(Exception,self).__init__() self.msg = msg class DESFireAuthException(DESFireException): def __init__(self, msg): super(DESFireException,self).__init__(msg) class DESFireCommsException(DESFireException): def __init__(self, status_code): super(DESFireException,self).__init__(DESFireStatus(status_code).name) self.status_code = DESFireStatus(status_code) self.msg = DESFireStatus(status_code).name class Desfire(SmartCard): def __init__(self, reader, logger=None): SmartCard.__init__(self,SmartCardTypes.DESFIRE, reader.getATR()) self.reader = reader if logger: self.logger = logger else: self.logger = _logger self.isAuthenticated = False self.lastAuthKeyNo = None self.sessionKey = None self.lastSelectedApplication = 0x00 self.versioninfo = None self.applications = [] def wrap_command(self, cmd, parameters = None): res = '\x90' if parameters: return res + cmd + '\x00\x00' + int2hex(len(parameters)) + parameters + '\x00' else: return res + cmd + '\x00\x00\x00' def _communicate(self, rawdata, autorecieve = True): result = '' while True: rawdata = hex2bytelist(rawdata) self.logger.debug("[+] Sending APDU : %s" % (bytelist2hex(rawdata),)) response, sw1, status = self.reader.sendAPDU(rawdata) self.logger.debug("[+] Card response: %s SW1: %x SW2: %x" % (bytelist2hex(response), sw1, status)) if len(response) >0: response = intlist2hex(response) else: response = '' sw1 = int2hex(sw1) status = int2hex(status) if sw1 != '\x91': if status == DESFireStatus.ST_AuthentError.value: raise DESFireAuthException('Card returned status ST_AuthentError') raise DESFireCommsException(status) if status not in [DESFireStatus.ST_Success.value, DESFireStatus.ST_NoChanges.value, DESFireStatus.ST_MoreFrames.value]: if status == DESFireStatus.ST_AuthentError.value: raise DESFireAuthException('Card returned status ST_AuthentError') raise DESFireCommsException(status) result += response if status != DESFireStatus.ST_MoreFrames.value or not autorecieve: break else: rawdata = self.wrap_command(DESFireCommand.DF_INS_ADDITIONAL_FRAME.value) return result, status def communicate(self, cmd, data, isEncryptedComm = False, withTXCMAC = False, autorecieve = True ): result = [] if withTXCMAC or isEncryptedComm: if not self.isAuthenticated: raise Exception('Cant perform CMAC calc without authantication!') if isEncryptedComm: raise Exception('Not implemented') if withTXCMAC: return else: return else: if withTXCMAC: cmacdata = cmd + data TXCMAC = self.sessionKey.CalculateCmac(cmacdata) self.logger.debug("TXCMAC : " + hex2hexstr(TXCMAC)) response, status = self._communicate(self.wrap_command(cmd, data), autorecieve) else: response, status = self._communicate(self.wrap_command(cmd, data), autorecieve) if self.isAuthenticated and len(response) >= 8 and status == DESFireStatus.ST_Success.value: if len(response) == 8: if self.sessionKey.keyType == DESFireKeyType.DF_KEY_2K3DES or self.sessionKey.keyType == DESFireKeyType.DF_KEY_3K3DES: RXCMAC = response response = '' else: return response else: RXCMAC = response[-8:] response = response[:-8] cmacdata = response + status RXCAMAC_CALC = self.sessionKey.CalculateCmac(cmacdata) self.logger.debug("RXCMAC : " + hex2hexstr(RXCMAC)) self.logger.debug("RXCAMAC_CALC: " + hex2hexstr(RXCAMAC_CALC)) return response def security_check(self): ver_n = self.GetCardVersion() ver_n_1 = self.GetCardVersion() if ver_n.UID == ver_n_1.UID: print '[!] Random UID not enabled!' MF = DESFireApplication(0x000000) MF.enumerate(self) if MF.keytype == DESFireKeyType.DF_KEY_INVALID: print '[!]Master KEY type unknown. This is strange' elif MF.keytype == DESFireKeyType.DF_KEY_2K3DES: print '[!]Master KEY encryption type FAIL' elif MF.keytype == DESFireKeyType.DF_KEY_3K3DES or MF.keytype == DESFireKeyType.DF_KEY_AES: print '[+]Master KEY type OK' if MF.keycount != 1: print 'Strange' if DESFireKeySettings.KS_ALLOW_CHANGE_MK in MF.keysettings: print 'Warning, key can be changed later (but only by supplying the original key)' if DESFireKeySettings.KS_LISTING_WITHOUT_MK in MF.keysettings: print 'Warning, enumeration of the card is possible without authentication' if DESFireKeySettings.KS_CREATE_DELETE_WITHOUT_MK in MF.keysettings: print 'Warning, apps can be created without authentication' if DESFireKeySettings.KS_CONFIGURATION_CHANGEABLE in MF.keysettings: print 'Warning, key config can be changed (but only by supplying the original key)' return None def enumerate(self): self.versioninfo = self.GetCardVersion() appids = [0x000000] appids += self.GetApplicationIDs() for appid in appids: app = DESFireApplication(appid) app.enumerate(self) self.applications.append(app) def toDict(self): temp = SmartCard.toDict(self) temp ['versioninfo'] = self.versioninfo.toDict() temp ['applications'] = [] for app in self.applications: temp ['applications'].append(app.toDict()) return temp def GetCardVersion(self): self.logger.debug('Getting card version info') cmd = DESFireCommand.DF_INS_GET_VERSION.value raw_data = self.communicate(cmd, '', withTXCMAC=self.isAuthenticated) cv = DESFireCardVersion() cv.parse(raw_data) self.logger.debug(repr(cv)) return cv def FormatCard(self): self.logger.debug('Formatting card') cmd = DESFireCommand.DF_INS_FORMAT_PICC.value self.communicate(cmd, '', withTXCMAC=self.isAuthenticated) def GetApplicationIDs(self): self.logger.debug("GetApplicationIDs") appids = [] cmd = DESFireCommand.DF_INS_GET_APPLICATION_IDS.value raw_data = self.communicate(cmd, '', withTXCMAC=self.isAuthenticated) pointer = 0 apps = [] while pointer < len(raw_data): appid = (hex2int(raw_data[pointer]) << 16) + (hex2int(raw_data[pointer+1]) << 8) + hex2int(raw_data[pointer+2]) self.logger.debug("Reading %d %08x", pointer, appid) apps.append(appid) pointer += 3 return apps def SelectApplication(self, appid): self.logger.debug('Selecting application with AppID %s' % (appid,)) parameters = int2hex((appid >> 16) & 0xff)+int2hex((appid >> 8) & 0xff)+int2hex((appid >> 0) & 0xff) cmd = DESFireCommand.DF_INS_SELECT_APPLICATION.value self.communicate(cmd, parameters) self.isAuthenticated = False self.lastSelectedApplication = appid def CreateApplication(self, appid, keysettings, keycount, type): self.logger.debug('Creating application with appid: 0x%x, ' %(appid)) appid = int2hex((appid >> 16) & 0xff)+int2hex((appid >> 8) & 0xff)+int2hex((appid >> 0) & 0xff) params = appid + int2hex(calc_key_settings(keysettings)) + int2hex(keycount|type.value) cmd = DESFireCommand.DF_INS_CREATE_APPLICATION.value self.communicate(cmd, params, withTXCMAC=self.isAuthenticated) def DeleteApplication(self, appid): self.logger.debug('Deleting application for AppID 0x%x', (appid)) appid = int2hex((appid >> 16) & 0xff)+int2hex((appid >> 8) & 0xff)+int2hex((appid >> 0) & 0xff) params = appid cmd = DESFireCommand.DF_INS_DELETE_APPLICATION.value self.communicate(cmd, params, withTXCMAC=self.isAuthenticated) def GetFileIDs(self): self.logger.debug('Enumerating all files for the selected application') fileIDs = [] cmd = DESFireCommand.DF_INS_GET_FILE_IDS.value raw_data = self.communicate(cmd, '', withTXCMAC=self.isAuthenticated) if len(raw_data) == 0: self.logger.debug("No files found") else: for byte in raw_data: fileIDs.append(hex2int(byte)) self.logger.debug("File ids: %s" % (''.join([str(id) for id in fileIDs]),)) return fileIDs def GetFileSettings(self, fileid): self.logger.debug('Getting file settings for file %s' % (fileid,)) cmd = DESFireCommand.DF_INS_GET_FILE_SETTINGS.value raw_data = raw_data = self.communicate(cmd, int2hex(fileid), withTXCMAC=self.isAuthenticated) file_settings = DESFireFileSettings() file_settings.parse(raw_data) return file_settings def ReadFileData(self,fileid): self.logger.debug('Reading file data for file %s' % (fileid,)) parameters = int2hex(fileid) + '\x00'*6 cmd = DESFireCommand.DF_INS_READ_DATA.value buffer = self.communicate(cmd, parameters, withTXCMAC=self.isAuthenticated) self.logger.debug('File %s Data: ' % (fileid,bytelist2hex(buffer))) return buffer def GetKeySettings(self): self.logger.debug('Getting key settings') cmd = DESFireCommand.DF_INS_GET_KEY_SETTINGS.value raw_data = self.communicate(cmd, '', withTXCMAC=self.isAuthenticated) keysettings = calc_key_settings(hex2int(raw_data[0])) keycount = hex2int(raw_data[1]) & 0x0F keytype = DESFireKeyType(hex2int(raw_data[1]) & 0xF0) self.logger.debug("Settings: %s, KeyCount: %d, KeyType: %s\r\n" % ('|'.join(a.name for a in keysettings), keycount, keytype)) return keysettings, keycount, keytype def GetKeyVersion(self, keyNo): self.logger.debug('Getting key version for keyid %x' %(keyNo,)) params = int2hex(keyNo) cmd = DESFireCommand.DF_INS_GET_KEY_VERSION.value raw_data = self.communicate(cmd, params) self.logger.debug('Got key version 0x%s for keyid %x' %(raw_data.encode('hex'),keyNo)) return raw_data
MIT License
epswartz/block_distortion
block_distortion/effects.py
animate_image
python
def animate_image( image: np.ndarray, frames: int=100, splits: int=2000, progress: bool=False ): X_SIZE, Y_SIZE, CHANNELS = image.shape init_box = Box(0, 0, X_SIZE, Y_SIZE) images = [] r = range(frames) if progress: r = track(r, "Rendering Frames") for i in r: grid = np.zeros((X_SIZE, Y_SIZE, CHANNELS)) boxes = always_largest_split(grid, init_box, n=splits, orient=ORIENTATION) grid = color_grid_from_image(boxes, grid, image) images.append(grid) return images
Produce a gif with distortion effects. This function returns a list of frames, which you can write with write_frames_to_gif(). Args: image: (W,H,3) or (W,H,4) np.ndarray frames: Number of frames in output gif splits: Number of times to split the image (higher makes a "smoother" looking image) progress: If True, prints a progress bar on stdout. Returns: list: List of (W,H,3) or (W,H,4) np.ndarrays representing frames of the gif.
https://github.com/epswartz/block_distortion/blob/22cefc3bb9e1be32278d74957e7798b13db2258b/block_distortion/effects.py#L14-L46
import numpy as np from .splitting import * from .utils import * from .Box import Box from .coloring import * from rich.progress import track from rich.console import Console ORIENTATION = "alternating"
MIT License
tlc-pack/tenset
python/tvm/topi/generic/search.py
schedule_argwhere
python
def schedule_argwhere(outs): return _default_schedule(outs, False)
Schedule for argwhere operator. Parameters ---------- outs: Array of Tensor The computation graph description of argwhere. Returns ------- s: Schedule The computation schedule for the op.
https://github.com/tlc-pack/tenset/blob/3f7ed0291df47331d43f43a064fffacdc2914b47/python/tvm/topi/generic/search.py#L23-L36
from __future__ import absolute_import as _abs from .default import default_schedule as _default_schedule
Apache License 2.0
dell/ansible-isilon
dellemc_ansible/isilon/library/dellemc_isilon_nfs.py
IsilonNfsExport._check_remove_clients
python
def _check_remove_clients(self, nfs_export): playbook_client_dict = self._create_current_client_dict_from_playbook() current_client_dict = self._create_current_client_dict() mod_flag = False mod_flag1 = False if playbook_client_dict['clients']: for client in playbook_client_dict['clients']: if client in current_client_dict['clients']: current_client_dict['clients'].remove(client) mod_flag1 = True if mod_flag1: nfs_export.clients = current_client_dict['clients'] mod_flag2 = False if playbook_client_dict['read_write_clients']: for client in playbook_client_dict['read_write_clients']: if client in current_client_dict['read_write_clients']: current_client_dict['read_write_clients'].remove(client) mod_flag2 = True if mod_flag2: nfs_export.read_write_clients = current_client_dict['read_write_clients'] mod_flag3 = False if playbook_client_dict['read_only_clients']: for client in playbook_client_dict['read_only_clients']: if client in current_client_dict['read_only_clients']: current_client_dict['read_only_clients'].remove(client) mod_flag3 = True if mod_flag3: nfs_export.read_only_clients = current_client_dict['read_only_clients'] mod_flag4 = False if playbook_client_dict['root_clients']: for client in playbook_client_dict['root_clients']: if client in current_client_dict['root_clients']: current_client_dict['root_clients'].remove(client) mod_flag4 = True if mod_flag4: nfs_export.root_clients = current_client_dict['root_clients'] mod_flag = mod_flag1 or mod_flag2 or mod_flag3 or mod_flag4 return mod_flag, nfs_export
Check if clients are to be removed from NFS export
https://github.com/dell/ansible-isilon/blob/9e98faf2344083e0c74467cb2de39f9b8a3145f9/dellemc_ansible/isilon/library/dellemc_isilon_nfs.py#L517-L566
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: dellemc_isilon_nfs version_added: '2.7' short_description: Manage NFS exports on a DellEMC Isilon system description: - Managing NFS exports on an Isilon system includes creating NFS export for a directory in an access zone, adding or removing clients, modifying different parameters of the export and deleting export. extends_documentation_fragment: - dellemc_isilon.dellemc_isilon author: - Manisha Agrawal(@agrawm3) [email protected] options: path: description: - Specifies the filesystem path. It is absolute path for System access zone and relative if using non-System access zone. For example, if your access zone is 'Ansible' and it has a base path '/ifs/ansible' and the path specified is '/user1', then the effective path would be '/ifs/ansible/user1'. If your access zone is System, and you have 'directory1' in the access zone, the path provided should be '/ifs/directory1'. - The directory on the path must exist - the NFS module will not create the directory. - Ansible module will only support exports with a unique path. - If there multiple exports present with the same path, fetching details, creation, modification or deletion of such exports will fail. required: True type: str access_zone: description: - Specifies the zone in which the export is valid. - Access zone once set cannot be changed. type: str default: System clients: description: - Specifies the clients to the export. The type of access to clients in this list is determined by the 'read_only' parameter. - This list can be changed anytime during the lifetime of the NFS export. type: list root_clients: description: - Specifies the clients with root access to the export. - This list can be changed anytime during the lifetime of the NFS export. type: list read_only_clients: description: - Specifies the clients with read-only access to the export, even when the export is read/write. - This list can be changed anytime during the lifetime of the NFS export. type: list read_write_clients: description: - Specifies the clients with both read and write access to the export, even when the export is set to read-only. - This list can be changed anytime during the lifetime of the NFS export. type: list read_only: description: - Specifies whether the export is read-only or read-write. This parameter only has effect on the 'clients' list and not the other three types of clients. - This setting can be modified any time. If it is not set at the time of creation, the export will be of type read/write. type: bool sub_directories_mountable: description: - True if all directories under the specified paths are mountable. If not set, sub-directories will not be mountable. - This setting can be modified any time. type: bool description: description: - Optional description field for the NFS export. - Can be modified by passing new value. type: str state: description: - Define whether the NFS export should exist or not. - present indicates that the NFS export should exist in system. - absent indicates that the NFS export should not exist in system. required: True type: str choices: [absent, present] client_state: description: - Define whether the clients can access the NFS export. - present-in-export indicates that the clients can access the NFS export. - absent-in-export indicates that the client cannot access the NFS export. - Required when adding or removing access of clients from the export. - While removing clients, only the specified clients will be removed from the export, others will remain as is. required: False type: str choices: [present-in-export, absent-in-export] ''' EXAMPLES = r''' - name: Create NFS Export dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" read_only_clients: - "{{client1}}" - "{{client2}}" read_only: True clients: ["{{client3}}"] client_state: 'present-in-export' state: 'present' - name: Get NFS Export dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" state: 'present' - name: Add a root client dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" root_clients: - "{{client4}}" client_state: 'present-in-export' state: 'present' - name: Set sub_directories_mountable flag to True dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" sub_directories_mountable: True state: 'present' - name: Remove a root client dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" root_clients: - "{{client4}}" client_state: 'absent-in-export' state: 'present' - name: Modify description dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" description: "new description" state: 'present' - name: Set read_only flag to False dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" read_only: False state: 'present' - name: Delete NFS Export dellemc_isilon_nfs: onefs_host: "{{onefs_host}}" api_user: "{{api_user}}" api_password: "{{api_password}}" verify_ssl: "{{verify_ssl}}" path: "{{path}}" access_zone: "{{access_zone}}" state: 'absent' ''' RETURN = r''' changed: description: A boolean indicating if the task had to make changes. returned: always type: bool NFS_export_details: description: The updated NFS Export details. type: complex returned: always contains: all_dirs: description: - sub_directories_mountable flag value. type: bool id: description: - The ID of the NFS Export, generated by the array. type: int sample: 12 paths: description: - The filesystem path. type: list sample: ['/ifs/dir/filepath'] zone: description: - Specifies the zone in which the export is valid. type: string sample: 'System' read_only: description: - Specifies whether the export is read-only or read-write. type: bool read_only_clients: description: - The list of read only clients for the NFS Export. type: list sample: ['client_ip', 'client_ip'] read_write_clients: description: - The list of read write clients for the NFS Export. type: list sample: ['client_ip', 'client_ip'] root_clients: description: - The list of root clients for the NFS Export. type: list sample: ['client_ip', 'client_ip'] clients: description: - The list of clients for the NFS Export. type: list sample: ['client_ip', 'client_ip'] description: description: - Description for the export. type: string ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.storage.dell import dellemc_ansible_isilon_utils as utils import logging import re LOG = utils.get_logger( module_name='dellemc_isilon_nfs', log_devel=logging.INFO) HAS_ISILON_SDK = utils.has_isilon_sdk() ISILON_SDK_VERSION_CHECK = utils.isilon_sdk_version_check() class IsilonNfsExport(object): def __init__(self): self.module_params = utils.get_isilon_management_host_parameters() self.module_params.update(self.get_isilon_nfs_parameters()) self.module = AnsibleModule( argument_spec=self.module_params, supports_check_mode=False ) self.result = { "changed": False, "NFS_export_details": {} } if HAS_ISILON_SDK is False: self.module.fail_json(msg="Ansible modules for Isilon require " "the Isilon SDK to be installed. Please " "install the library before using these " "modules.") if ISILON_SDK_VERSION_CHECK and not ISILON_SDK_VERSION_CHECK['supported_version']: err_msg = ISILON_SDK_VERSION_CHECK['unsupported_version_message'] LOG.error(err_msg) self.module.fail_json(msg=err_msg) self.api_client = utils.get_isilon_connection(self.module.params) self.isi_sdk = utils.get_isilon_sdk() LOG.info('Got python SDK instance for provisioning on Isilon ') self.protocol_api = self.isi_sdk.ProtocolsApi(self.api_client) self.zone_summary_api = self.isi_sdk.ZonesSummaryApi(self.api_client) def get_zone_base_path(self, access_zone): try: zone_path = (self.zone_summary_api. get_zones_summary_zone(access_zone)).to_dict() return zone_path['summary']['path'] except Exception as e: error_msg = self.determine_error(error_obj=e) error_message = 'Unable to fetch base path of Access Zone {0} ' 'failed with error: {1}'.format(access_zone, str(error_msg)) LOG.error(error_message) self.module.fail_json(msg=error_message) def get_nfs_export(self, path, access_zone): LOG.info( "Getting NFS export details for path: {0} and access zone: " "{1}".format( path, access_zone)) try: NfsExportsExtendedObj = self.protocol_api.list_nfs_exports( path=path, zone=access_zone) if NfsExportsExtendedObj.total > 1: error_msg = 'Multiple NFS Exports found' LOG.error(error_msg) self.module.fail_json(msg=error_msg) elif NfsExportsExtendedObj.total == 0: LOG.info( 'NFS Export for given path: {0} and access zone: {1} not found'.format( path, access_zone)) return {} else: nfs_export = NfsExportsExtendedObj.exports[0] return nfs_export.to_dict() except Exception as e: error_msg = ( "Got error {0} while getting NFS export details for path: " "{1} and access zone: {2}" .format( self.determine_error(e), path, access_zone)) LOG.error(error_msg) self.module.fail_json(msg=error_msg) def _create_client_lists_from_playbook(self): all_client_list = [ self.module.params['clients'], self.module.params['read_only_clients'], self.module.params['read_write_clients'], self.module.params['root_clients']] return all_client_list def _get_nfs_export_from_id(self, nfs_export_id, access_zone): LOG.info( "Getting NFS export details for id: {0} and access zone: {1}".format( nfs_export_id, access_zone)) try: NfsExportsObj = self.protocol_api.get_nfs_export( nfs_export_id, zone=access_zone) nfs_export = NfsExportsObj.exports[0] return nfs_export.to_dict() except Exception as e: error_msg = ( "Got error {0} while getting NFS export details for ID: " "{1} and access zone: {2}" .format( self.determine_error(e), nfs_export_id, access_zone)) LOG.error(error_msg) self.module.fail_json(msg=error_msg) def _create_nfs_export_create_params_object(self, path): try: nfs_export = self.isi_sdk.NfsExportCreateParams( paths=[path], clients=self.module.params['clients'], read_only_clients=self.module.params['read_only_clients'], read_write_clients=self.module.params['read_write_clients'], root_clients=self.module.params['root_clients'], read_only=self.module.params['read_only'], all_dirs=self.module.params['sub_directories_mountable'], description=self.module.params['description'], zone=self.module.params['access_zone']) return nfs_export except Exception as e: errorMsg = 'Create NfsExportCreateParams object for path {0}' 'failed with error {1}'.format( path, self.determine_error(e)) LOG.error(errorMsg) self.module.fail_json(msg=errorMsg) def create_nfs_export(self, path, access_zone): nfs_export = self._create_nfs_export_create_params_object(path) try: msg = ( "Creating NFS export with parameters:nfs_export={0}") LOG.info(msg.format(nfs_export)) response = self.protocol_api.create_nfs_export(nfs_export, zone=access_zone) self.result['NFS_export_details'] = self._get_nfs_export_from_id(response.id, access_zone=access_zone) return True except Exception as e: errorMsg = 'Create NFS export for path: {0} and access zone: {1}' ' failed with error: {2}'.format( path, access_zone, self.determine_error(e)) LOG.error(errorMsg) self.module.fail_json(msg=errorMsg) def _create_current_client_dict_from_playbook(self): client_dict_playbook_input = { 'read_only_clients': self.module.params['read_only_clients'], 'clients': self.module.params['clients'], 'root_clients': self.module.params['root_clients'], 'read_write_clients': self.module.params['read_write_clients']} return client_dict_playbook_input def _create_current_client_dict(self): current_client_dict = { 'read_only_clients': self.result['NFS_export_details']['read_only_clients'], 'clients': self.result['NFS_export_details']['clients'], 'root_clients': self.result['NFS_export_details']['root_clients'], 'read_write_clients': self.result['NFS_export_details']['read_write_clients']} return current_client_dict def _check_add_clients(self, nfs_export): playbook_client_dict = self._create_current_client_dict_from_playbook() current_client_dict = self._create_current_client_dict() mod_flag = False mod_flag1 = False if playbook_client_dict['clients']: for client in playbook_client_dict['clients']: if client not in current_client_dict['clients']: current_client_dict['clients'].append(client) mod_flag1 = True if mod_flag1: nfs_export.clients = current_client_dict['clients'] mod_flag2 = False if playbook_client_dict['read_write_clients']: for client in playbook_client_dict['read_write_clients']: if client not in current_client_dict['read_write_clients']: current_client_dict['read_write_clients'].append(client) mod_flag2 = True if mod_flag2: nfs_export.read_write_clients = current_client_dict['read_write_clients'] mod_flag3 = False if playbook_client_dict['read_only_clients']: for client in playbook_client_dict['read_only_clients']: if client not in current_client_dict['read_only_clients']: current_client_dict['read_only_clients'].append(client) mod_flag3 = True if mod_flag3: nfs_export.read_only_clients = current_client_dict['read_only_clients'] mod_flag4 = False if playbook_client_dict['root_clients']: for client in playbook_client_dict['root_clients']: if client not in current_client_dict['root_clients']: current_client_dict['root_clients'].append(client) mod_flag4 = True if mod_flag4: nfs_export.root_clients = current_client_dict['root_clients'] mod_flag = mod_flag1 or mod_flag2 or mod_flag3 or mod_flag4 return mod_flag, nfs_export
Apache License 2.0
appliedgeometry/poissongeometry
poisson/utils.py
validate_dimension
python
def validate_dimension(dim): if not isinstance(dim, int): raise DimensionError(F"{dim} is not int") if dim < 2: raise DimensionError(F"{dim} < 2") else: return dim
This method check if the dimension variable is valid for the this class
https://github.com/appliedgeometry/poissongeometry/blob/98bea08d9127f1bda45bc04b88d73b05dd480c65/poisson/utils.py#L30-L38
from __future__ import unicode_literals from poisson.errors import DimensionError
MIT License
mwaskom/seaborn
seaborn/rcmod.py
set_theme
python
def set_theme(context="notebook", style="darkgrid", palette="deep", font="sans-serif", font_scale=1, color_codes=True, rc=None): set_context(context, font_scale) set_style(style, rc={"font.family": font}) set_palette(palette, color_codes=color_codes) if rc is not None: mpl.rcParams.update(rc)
Set aspects of the visual theme for all matplotlib and seaborn plots. This function changes the global defaults for all plots using the :ref:`matplotlib rcParams system <matplotlib:matplotlib-rcparams>`. The themeing is decomposed into several distinct sets of parameter values. The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>` and :doc:`color palette <../tutorial/color_palettes>` tutorials. Parameters ---------- context : string or dict Scaling parameters, see :func:`plotting_context`. style : string or dict Axes style parameters, see :func:`axes_style`. palette : string or sequence Color palette, see :func:`color_palette`. font : string Font family, see matplotlib font manager. font_scale : float, optional Separate scaling factor to independently scale the size of the font elements. color_codes : bool If ``True`` and ``palette`` is a seaborn palette, remap the shorthand color codes (e.g. "b", "g", "r", etc.) to the colors from this palette. rc : dict or None Dictionary of rc parameter mappings to override the above. Examples -------- .. include:: ../docstrings/set_theme.rst
https://github.com/mwaskom/seaborn/blob/59e61256a704e709007685c9840595b53221e367/seaborn/rcmod.py#L83-L124
import warnings import functools import matplotlib as mpl from cycler import cycler from . import palettes __all__ = ["set_theme", "set", "reset_defaults", "reset_orig", "axes_style", "set_style", "plotting_context", "set_context", "set_palette"] _style_keys = [ "axes.facecolor", "axes.edgecolor", "axes.grid", "axes.axisbelow", "axes.labelcolor", "figure.facecolor", "grid.color", "grid.linestyle", "text.color", "xtick.color", "ytick.color", "xtick.direction", "ytick.direction", "lines.solid_capstyle", "patch.edgecolor", "patch.force_edgecolor", "image.cmap", "font.family", "font.sans-serif", "xtick.bottom", "xtick.top", "ytick.left", "ytick.right", "axes.spines.left", "axes.spines.bottom", "axes.spines.right", "axes.spines.top", ] _context_keys = [ "font.size", "axes.labelsize", "axes.titlesize", "xtick.labelsize", "ytick.labelsize", "legend.fontsize", "legend.title_fontsize", "axes.linewidth", "grid.linewidth", "lines.linewidth", "lines.markersize", "patch.linewidth", "xtick.major.width", "ytick.major.width", "xtick.minor.width", "ytick.minor.width", "xtick.major.size", "ytick.major.size", "xtick.minor.size", "ytick.minor.size", ]
BSD 3-Clause New or Revised License
sanderslab/magellanmapper
magmap/cv/stack_detect.py
StackDetector.detect_sub_roi_from_data
python
def detect_sub_roi_from_data(cls, coord, sub_roi_slices, offset): return cls.detect_sub_roi( coord, offset, cls.last_coord, cls.denoise_max_shape, cls.exclude_border, cls.img[sub_roi_slices], cls.channel, coloc=cls.coloc)
Perform 3D blob detection within a sub-ROI using data stored as class attributes for forked multiprocessing. Args: coord (Tuple[int]): Coordinate of the sub-ROI in the order z,y,x. sub_roi_slices (Tuple[slice]): Sequence of slices within :attr:``img`` defining the sub-ROI. offset (Tuple[int]): Offset of the sub-ROI within the full ROI, in z,y,x. Returns: Tuple[int], :obj:`np.ndarray`: The coordinate given back again to identify the sub-ROI position and an array of detected blobs.
https://github.com/sanderslab/magellanmapper/blob/35e910035217edab799d4fbaa61e39931527a354/magmap/cv/stack_detect.py#L55-L74
from enum import Enum import os from time import time import numpy as np import pandas as pd from magmap.cv import chunking, colocalizer, detector, verifier from magmap.io import cli, df_io, importer, libmag, naming from magmap.plot import plot_3d from magmap.settings import config, roi_prof _logger = config.logger.getChild(__name__) class StackTimes(Enum): DETECTION = "Detection" PRUNING = "Pruning" TOTAL = "Total_stack" class StackDetector(object): img = None last_coord = None denoise_max_shape = None exclude_border = None coloc = False channel = None @classmethod
BSD 3-Clause New or Revised License
osmr/imgclsmob
tensorflow_/tensorflowcv/models/mnasnet.py
mnas_init_block
python
def mnas_init_block(x, in_channels, out_channels, mid_channels, use_skip, training, data_format, name="mnas_init_block"): x = conv3x3_block( x=x, in_channels=in_channels, out_channels=mid_channels, strides=2, training=training, data_format=data_format, name=name + "/conv1") x = dws_exp_se_res_unit( x=x, in_channels=mid_channels, out_channels=out_channels, use_skip=use_skip, training=training, data_format=data_format, name=name + "/conv2") return x
MnasNet specific initial block. Parameters: ---------- x : Tensor Input tensor. in_channels : int Number of input channels. out_channels : int Number of output channels. mid_channels : int Number of middle channels. use_skip : bool Whether to use skip connection in the second block. training : bool, or a TensorFlow boolean scalar tensor Whether to return the output in training mode or in inference mode. data_format : str The ordering of the dimensions in tensors. name : str, default 'mnas_init_block' Block name. Returns: ------- Tensor Resulted tensor.
https://github.com/osmr/imgclsmob/blob/ea5f784eea865ce830f3f97c5c1d1f6491d9cbb2/tensorflow_/tensorflowcv/models/mnasnet.py#L114-L165
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small'] import os import tensorflow as tf from .common import is_channels_first, flatten, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, se_block, round_channels def dws_exp_se_res_unit(x, in_channels, out_channels, strides=1, use_kernel3=True, exp_factor=1, se_factor=0, use_skip=True, activation="relu", training=False, data_format="channels_last", name="dws_exp_se_res_unit"): assert (exp_factor >= 1) residual = (in_channels == out_channels) and (strides == 1) and use_skip use_exp_conv = exp_factor > 1 use_se = se_factor > 0 mid_channels = exp_factor * in_channels dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block if residual: identity = x if use_exp_conv: x = conv1x1_block( x=x, in_channels=in_channels, out_channels=mid_channels, activation=activation, training=training, data_format=data_format, name=name + "/exp_conv") x = dwconv_block_fn( x=x, in_channels=mid_channels, out_channels=mid_channels, strides=strides, activation=activation, training=training, data_format=data_format, name=name + "/dw_conv") if use_se: x = se_block( x=x, channels=mid_channels, reduction=(exp_factor * se_factor), approx_sigmoid=False, round_mid=False, activation=activation, data_format=data_format, name=name + "/se") x = conv1x1_block( x=x, in_channels=mid_channels, out_channels=out_channels, activation=None, training=training, data_format=data_format, name=name + "/pw_conv") if residual: x = x + identity return x
MIT License
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/models/v1_deployment_condition.py
V1DeploymentCondition.last_transition_time
python
def last_transition_time(self, last_transition_time): self._last_transition_time = last_transition_time
Sets the last_transition_time of this V1DeploymentCondition. Last time the condition transitioned from one status to another. # noqa: E501 :param last_transition_time: The last_transition_time of this V1DeploymentCondition. # noqa: E501 :type: datetime
https://github.com/tomplus/kubernetes_asyncio/blob/22bf0f4ec775b920abc9cee86bb38abcfc57506d/kubernetes_asyncio/client/models/v1_deployment_condition.py#L90-L99
import pprint import re import six from kubernetes_asyncio.client.configuration import Configuration class V1DeploymentCondition(object): """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'last_transition_time': 'datetime', 'last_update_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str' } attribute_map = { 'last_transition_time': 'lastTransitionTime', 'last_update_time': 'lastUpdateTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type' } def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._last_transition_time = None self._last_update_time = None self._message = None self._reason = None self._status = None self._type = None self.discriminator = None if last_transition_time is not None: self.last_transition_time = last_transition_time if last_update_time is not None: self.last_update_time = last_update_time if message is not None: self.message = message if reason is not None: self.reason = reason self.status = status self.type = type @property def last_transition_time(self): return self._last_transition_time @last_transition_time.setter
Apache License 2.0
laserkelvin/pyspectools
pyspectools/mmw/mmw_analysis.py
sec_deriv_peak_detection
python
def sec_deriv_peak_detection(df_group, threshold=5, window_size=25, magnet_thres=0.5, **kwargs): signal = df_group["Field OFF"].to_numpy() frequency = df_group["Frequency"].to_numpy() corr_signal = cross_correlate_lorentzian(signal, window_size) indices = peakutils.indexes(corr_signal, thres=threshold, **kwargs, thres_abs=True) peak_subset = df_group.iloc[indices] if peak_subset["Field ON"].sum() != 0.: peak_subset.loc[:, "Ratio"] = peak_subset.loc[:,"Field ON"] / peak_subset.loc[:,"Field OFF"] peak_subset.loc[:, "Magnetic"] = peak_subset.loc[:,"Ratio"] < magnet_thres return peak_subset
Function designed to take advantage of split-combine-apply techniques to analyze concatenated spectra, or a single spectrum. The spectrum is cross-correlated with a window corresponding to a second-derivative Lorentzian line shape, with the parameters corresponding to the actual downward facing peak such that the cross-correlation is upwards facing for a match. This X-correlation analysis is only done for the Field OFF data; every peak should appear in the Field OFF, and should disappear under the presence of a magnetic field. If the Field ON spectrum is non-zero, the peak finding is followed up by calculating the ratio of the intensity at the same position for ON/OFF; if the ratio is below a threshold, we consider it a magnetic line. Parameters ---------- df_group : pandas DataFrame Dataframe containing the millimeter-wave spectra. threshold : int, optional Absolute intensity units threshold for peak detection. This value corresponds to the value used in the X-correlation spectrum, by default 5 window_size : int, optional Size of the second derivative Lorentizan window function, by default 25 magnet_thres : float, optional Threshold for determining if a peak is magnetic, given as the ratio of ON/OFF. A value of 1 means the line is nominally unaffected by a magnetic field, and less than 1 corresponds to larger responses to magnetic fields. By default 0.5 Returns ------- pandas DataFrame DataFrame holding the detected peaks and their associated magnet tests, if applicable.
https://github.com/laserkelvin/pyspectools/blob/f8f38136d362c061cefc71fede56848829467666/pyspectools/mmw/mmw_analysis.py#L172-L223
from typing import List from pathlib import Path import numpy as np import lmfit import pandas as pd import os import peakutils from matplotlib import pyplot as plt from . import fft_routines from . import interpolation from ..lineshapes import sec_deriv_lorentzian def parse_data(filepath): settings = dict() intensity = list() read_params = False read_int = False read_zeeman = False finished = False fieldoff_intensities = list() fieldon_intensities = list() with open(filepath) as read_file: for line in read_file: if "*****" in line: read_int = False if finished is True: break if "Scan" in line: if "[Field ON]" in line: read_zeeman = True scan_details = line.split() settings["ID"] = int(scan_details[1]) read_params = True read_int = False continue if read_int is True: if read_zeeman is False: fieldoff_intensities += [float(value) for value in line.split()] else: fieldon_intensities += [float(value) for value in line.split()] finished = True if read_params is True and len(line.split()) > 1: scan_params = line.split() shift = 1 settings["Frequency"] = float(scan_params[0]) settings["Frequency step"] = float(scan_params[1]) if len(scan_params) == 4: settings["Multiplier"] = 1. shift = 0 else: settings["Multiplier"] = float(scan_params[2]) settings["Center"] = float(scan_params[2 + shift]) settings["Points"] = int(scan_params[3 + shift]) read_params = False read_int = True continue fieldoff_intensities = np.array(fieldoff_intensities) fieldon_intensities = np.array(fieldon_intensities) settings["Frequency step"] = settings["Frequency step"] * settings["Multiplier"] side_length = settings["Frequency step"] * (settings["Points"] // 2) start_freq = settings["Frequency"] - side_length end_freq = settings["Frequency"] + side_length frequency = np.linspace(start_freq, end_freq, settings["Points"]) return frequency, fieldoff_intensities, fieldon_intensities, settings def open_mmw(filepath, **kwargs): frequency, fieldoff_intensities, fieldon_intensities, settings = parse_data(filepath) npoints = settings.get("Points") fieldoff_intensities = fieldoff_intensities[:npoints] if fieldon_intensities.size > 1: fieldon_intensities = fieldon_intensities[:npoints] sample_rate = 1. / settings["Frequency step"] * 1e6 param_dict = { "window_function": None, "cutoff": [50, 690], "sample_rate": sample_rate } param_dict.update(**kwargs) fieldoff_intensities = fft_routines.fft_filter(fieldoff_intensities, **param_dict) if fieldon_intensities.size > 1: fieldon_intensities = fft_routines.fft_filter(fieldon_intensities, **param_dict) intensity = fieldoff_intensities - fieldon_intensities else: fieldon_intensities = np.zeros(fieldoff_intensities.size) intensity = fieldoff_intensities mmw_df = pd.DataFrame( data={"Frequency": frequency, "Field OFF": fieldoff_intensities, "Field ON": fieldon_intensities, "OFF - ON": intensity }, ) return mmw_df def test_filtering(path: str, **kwargs): frequency, off, on, settings = parse_data(path) filtered = fft_routines.fft_filter(off, **kwargs) fig, axarray = plt.subplots(2, 1, figsize=(10, 5)) ax = axarray[0] ax.set_title("Frequency domain") ax.plot(frequency, filtered) ax.set_xlabel("Frequency (MHz)") ax = axarray[1] ax.set_title("Time domain") cutoff = kwargs.get("cutoff", np.arange(30, 500)) ax.plot(np.fft.fft(filtered)[np.arange(*cutoff)]) return fig, ax
MIT License
rspivak/slimit
src/slimit/parser.py
Parser.p_array_literal_2
python
def p_array_literal_2(self, p): items = p[2] if len(p) == 6: items.extend(p[4]) p[0] = ast.Array(items=items)
array_literal : LBRACKET element_list RBRACKET | LBRACKET element_list COMMA elision_opt RBRACKET
https://github.com/rspivak/slimit/blob/3533eba9ad5b39f3a015ae6269670022ab310847/src/slimit/parser.py#L250-L257
__author__ = 'Ruslan Spivak <[email protected]>' import ply.yacc from slimit import ast from slimit.lexer import Lexer try: from slimit import lextab, yacctab except ImportError: lextab, yacctab = 'lextab', 'yacctab' class Parser(object): def __init__(self, lex_optimize=True, lextab=lextab, yacc_optimize=True, yacctab=yacctab, yacc_debug=False): self.lex_optimize = lex_optimize self.lextab = lextab self.yacc_optimize = yacc_optimize self.yacctab = yacctab self.yacc_debug = yacc_debug self.lexer = Lexer() self.lexer.build(optimize=lex_optimize, lextab=lextab) self.tokens = self.lexer.tokens self.parser = ply.yacc.yacc( module=self, optimize=yacc_optimize, debug=yacc_debug, tabmodule=yacctab, start='program') self._error_tokens = {} def _has_been_seen_before(self, token): if token is None: return False key = token.type, token.value, token.lineno, token.lexpos return key in self._error_tokens def _mark_as_seen(self, token): if token is None: return key = token.type, token.value, token.lineno, token.lexpos self._error_tokens[key] = True def _raise_syntax_error(self, token): raise SyntaxError( 'Unexpected token (%s, %r) at %s:%s between %s and %s' % ( token.type, token.value, token.lineno, token.lexpos, self.lexer.prev_token, self.lexer.token()) ) def parse(self, text, debug=False): return self.parser.parse(text, lexer=self.lexer, debug=debug) def p_empty(self, p): pass def p_auto_semi(self, p): pass def p_error(self, token): if self._has_been_seen_before(token): self._raise_syntax_error(token) if token is None or token.type != 'SEMI': next_token = self.lexer.auto_semi(token) if next_token is not None: self._mark_as_seen(token) self.parser.errok() return next_token self._raise_syntax_error(token) def p_program(self, p): p[0] = ast.Program(p[1]) def p_source_elements(self, p): p[0] = p[1] def p_source_element_list(self, p): if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[2]) p[0] = p[1] def p_source_element(self, p): p[0] = p[1] def p_statement(self, p): p[0] = p[1] def p_block(self, p): p[0] = ast.Block(p[2]) def p_literal(self, p): p[0] = p[1] def p_boolean_literal(self, p): p[0] = ast.Boolean(p[1]) def p_null_literal(self, p): p[0] = ast.Null(p[1]) def p_numeric_literal(self, p): p[0] = ast.Number(p[1]) def p_string_literal(self, p): p[0] = ast.String(p[1]) def p_regex_literal(self, p): p[0] = ast.Regex(p[1]) def p_identifier(self, p): p[0] = ast.Identifier(p[1]) def p_primary_expr(self, p): p[0] = p[1] def p_primary_expr_no_brace_1(self, p): p[1]._mangle_candidate = True p[1]._in_expression = True p[0] = p[1] def p_primary_expr_no_brace_2(self, p): p[0] = ast.This() def p_primary_expr_no_brace_3(self, p): p[0] = p[1] def p_primary_expr_no_brace_4(self, p): p[2]._parens = True p[0] = p[2] def p_array_literal_1(self, p): p[0] = ast.Array(items=p[2])
MIT License
luna-klatzer/openhiven.py
openhivenpy/gateway/messagebroker.py
EventConsumer._cleanup
python
def _cleanup(self) -> None: del self.workers self.workers = {} del self._tasks self._tasks = {}
Removes all workers and removes the data that still exists
https://github.com/luna-klatzer/openhiven.py/blob/9184d6a77bde0ee3847dcb9ea7d399217a36c95d/openhivenpy/gateway/messagebroker.py#L442-L448
from __future__ import annotations import asyncio import logging from typing import Optional, List, Coroutine, Tuple, Dict from typing import TYPE_CHECKING from .. import utils from ..base_types import HivenObject if TYPE_CHECKING: from .. import HivenClient from ..exceptions import EventConsumerLoopError, WorkerTaskError from ..events import DispatchEventListener __all__ = ['DynamicEventBuffer', 'MessageBroker'] logger = logging.getLogger(__name__) async def _wait_until_done(task: asyncio.Task) -> None: while not task.done(): await asyncio.sleep(.05) class DynamicEventBuffer(list, HivenObject): def __init__(self, event: str, *args, **kwargs): self.event = event super().__init__(*args, **kwargs) def __repr__(self): info = [ ('event', self.event) ] return '<{} {}>'.format(self.__class__.__name__, ' '.join('%s=%s' % t for t in info)) def add_new_event( self, data: dict, args: Optional[tuple] = None, kwargs: Optional[dict] = None ): if kwargs is None: kwargs: Dict = {} if args is None: args: Tuple = () self.append( { 'data': data, 'args': args, 'kwargs': kwargs } ) def get_next_event(self) -> dict: return self.pop(0) class MessageBroker(HivenObject): def __init__(self, client: HivenClient): self.event_buffers = {} self.client = client self.event_consumer = EventConsumer(self) self.worker_loop: Optional[asyncio.Task] = None @property def running(self) -> bool: if self.worker_loop: return not getattr(self.worker_loop, 'done')() else: return False @property def _force_closing(self) -> bool: return getattr(self.client.connection, '_force_closing', False) def create_buffer(self, event: str, args, kwargs) -> DynamicEventBuffer: new_buffer = DynamicEventBuffer(event, *args, **kwargs) self.event_buffers[event] = new_buffer return new_buffer def get_buffer(self, event: str, args: Optional[tuple] = None, kwargs: Optional[dict] = None) -> DynamicEventBuffer: if kwargs is None: kwargs: Dict = {} if args is None: args: Tuple = () buffer = self.event_buffers.get(event) if buffer is not None: return buffer else: return self.create_buffer(event, args, kwargs) def _cleanup_buffers(self) -> None: del self.event_buffers self.event_buffers = {} async def close_loop(self) -> None: if self._force_closing: await self.event_consumer.close() if self.worker_loop.cancelled(): return self.worker_loop.cancel() await _wait_until_done(self.worker_loop) else: await _wait_until_done(self.worker_loop) await self.event_consumer.close() self._cleanup_buffers() async def run(self) -> None: self.worker_loop = asyncio.create_task( self.event_consumer.run_all_workers() ) try: await self.worker_loop except asyncio.CancelledError: logger.debug("Event Consumer stopped! All workers are cancelled!") except Exception as e: raise EventConsumerLoopError( "The event_consumer process loop failed to be kept alive" ) from e class Worker(HivenObject): def __init__(self, event: str, message_broker): self.assigned_event = event self.message_broker: MessageBroker = message_broker self.client: HivenClient = message_broker.client self._sequence_loop: Optional[asyncio.Task] = None self._listener_tasks: List[asyncio.Task] = [] self._cancel_called = False def __repr__(self): info = [ ('event', self.assigned_event), ('done', self.done()) ] return '<{} {}>'.format( self.__class__.__name__, ' '.join('%s=%s' % t for t in info) ) @property def assigned_event_buffer(self) -> DynamicEventBuffer: return self.message_broker.event_buffers.get(self.assigned_event) @property def closing(self) -> bool: return getattr(self.client.connection, '_closing', False) @property def force_closing(self) -> bool: return getattr(self.message_broker, '_force_closing', False) @staticmethod async def _gather_tasks(tasks: List[Coroutine]) -> None: await asyncio.gather(*tasks) def done(self) -> bool: return all([ self._tasks_done, self._sequence_loop.done() if self._sequence_loop else False, self._cancel_called ]) async def cancel(self) -> None: for task in self._listener_tasks: if task.done(): continue task.cancel() await _wait_until_done(task) if not self._sequence_loop.cancelled(): self._sequence_loop.cancel() await _wait_until_done(self._sequence_loop) self._cancel_called = True logger.debug(f"{repr(self)} cancelled") def _tasks_done(self) -> bool: return all(t.done() for t in self._listener_tasks) async def _wait_until_finished(self) -> None: while True: if self._tasks_done(): return await asyncio.sleep(.05) async def _loop_sequence(self) -> None: while not self.closing: if self.assigned_event_buffer: await self.run_one_sequence() await asyncio.sleep(.50) if self.force_closing: await self.cancel() else: await self._wait_until_finished() @utils.wrap_with_logging async def run_forever(self) -> Tuple: self._sequence_loop = asyncio.create_task(self._loop_sequence()) try: return await self._sequence_loop except asyncio.CancelledError: pass except Exception as e: raise WorkerTaskError(f"{repr(self)} failed to run") from e @utils.wrap_with_logging async def run_one_sequence(self): if self.assigned_event_buffer: event: dict = self.assigned_event_buffer.get_next_event() listeners: List[ DispatchEventListener] = self.client.active_listeners.get( self.assigned_event) if not listeners: return args: Tuple = event['args'] kwargs = event['kwargs'] try: tasks: List[Coroutine] = [ listener(*args, **kwargs) for listener in listeners ] if self.client.queue_events: task = asyncio.create_task(self._gather_tasks(tasks)) self._listener_tasks.append(task) await task else: task = asyncio.create_task(self._gather_tasks(tasks)) self._listener_tasks.append(task) except asyncio.CancelledError: logger.debug( f"Worker {repr(self)} was cancelled and " f"did not finish its tasks!" ) except Exception as e: self.assigned_event_buffer.add_new_event(**event) raise RuntimeError( f"Failed to run listener tasks assigned to {repr(self)}" ) from e class EventConsumer(HivenObject): def __init__(self, message_broker: MessageBroker): self.workers: Dict[Worker] = {} self.message_broker = message_broker self.client = message_broker.client self._tasks: Optional[Dict[Worker, asyncio.Task]] = {} def get_worker(self, event) -> Worker: worker = self.workers.get(event) if not worker or getattr(worker, '_cancel_called', False) is True: worker = Worker(event, self.message_broker) self.workers[event] = worker return worker def tasks_done(self) -> bool: return all([ *(t.done() for t in self._tasks.values()), *(w.done() for w in self.workers.values()) ]) async def close(self) -> None: for w in self.workers.values(): w: Worker if w.done(): continue await w.cancel() for t in self._tasks.values(): t: asyncio.Task if t.done(): continue t.cancel() while not self.tasks_done(): await asyncio.sleep(.05) logger.debug(f"All workers and tasks of {repr(self)} were cancelled") self._cleanup()
MIT License
longcw/faster_rcnn_pytorch
faster_rcnn/datasets/nthu.py
nthu._get_default_path
python
def _get_default_path(self): return os.path.join(ROOT_DIR, 'data', 'NTHU')
Return the default path where nthu is expected to be installed.
https://github.com/longcw/faster_rcnn_pytorch/blob/d8f842dfa51e067105e6949999277a08daa3d743/faster_rcnn/datasets/nthu.py#L92-L96
import os import PIL import numpy as np import scipy.sparse import subprocess import cPickle import math import glob from .imdb import imdb from .imdb import ROOT_DIR from ..fast_rcnn.config import cfg class nthu(imdb): def __init__(self, image_set, nthu_path=None): imdb.__init__(self, 'nthu_' + image_set) self._image_set = image_set self._nthu_path = self._get_default_path() if nthu_path is None else nthu_path self._data_path = os.path.join(self._nthu_path, 'data') self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist') self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes))) self._image_ext = '.jpg' self._image_index = self._load_image_set_index() if cfg.IS_RPN: self._roidb_handler = self.gt_roidb else: self._roidb_handler = self.region_proposal_roidb self._num_subclasses = 227 + 36 + 36 + 1 filename = os.path.join(self._nthu_path, 'mapping.txt') assert os.path.exists(filename), 'Path does not exist: {}'.format(filename) mapping = np.zeros(self._num_subclasses, dtype=np.int) with open(filename) as f: for line in f: words = line.split() subcls = int(words[0]) mapping[subcls] = self._class_to_ind[words[1]] self._subclass_mapping = mapping self.config = {'top_k': 100000} self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int) self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int) self._num_boxes_proposal = 0 assert os.path.exists(self._nthu_path), 'NTHU path does not exist: {}'.format(self._nthu_path) assert os.path.exists(self._data_path), 'Path does not exist: {}'.format(self._data_path) def image_path_at(self, i): return self.image_path_from_index(self.image_index[i]) def image_path_from_index(self, index): prefix = self._image_set image_path = os.path.join(self._data_path, prefix, index + self._image_ext) assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path) return image_path def _load_image_set_index(self): image_set_file = os.path.join(self._data_path, self._image_set + '.txt') assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.rstrip('\n') for x in f.readlines()] return image_index
MIT License
alexa/alexa-apis-for-python
ask-smapi-model/ask_smapi_model/v1/skill/simulations/input.py
Input.__init__
python
def __init__(self, content=None): self.__discriminator_value = None self.content = content
:param content: A string corresponding to the utterance text of what a customer would say to Alexa. :type content: (optional) str
https://github.com/alexa/alexa-apis-for-python/blob/bfe5e694daaca71bfb1a4199ca8d2514f1cac6c9/ask-smapi-model/ask_smapi_model/v1/skill/simulations/input.py#L44-L53
import pprint import re import six import typing from enum import Enum if typing.TYPE_CHECKING: from typing import Dict, List, Optional, Union, Any from datetime import datetime class Input(object): deserialized_types = { 'content': 'str' } attribute_map = { 'content': 'content' } supports_multiple_types = False
Apache License 2.0
boxed/mutmut
mutmut/__init__.py
guess_paths_to_mutate
python
def guess_paths_to_mutate(): this_dir = os.getcwd().split(os.sep)[-1] if isdir('lib'): return 'lib' elif isdir('src'): return 'src' elif isdir(this_dir): return this_dir elif isdir(this_dir.replace('-', '_')): return this_dir.replace('-', '_') elif isdir(this_dir.replace(' ', '_')): return this_dir.replace(' ', '_') elif isdir(this_dir.replace('-', '')): return this_dir.replace('-', '') elif isdir(this_dir.replace(' ', '')): return this_dir.replace(' ', '') raise FileNotFoundError( 'Could not figure out where the code to mutate is. ' 'Please specify it on the command line using --paths-to-mutate, ' 'or by adding "paths_to_mutate=code_dir" in setup.cfg to the [mutmut] section.')
Guess the path to source code to mutate :rtype: str
https://github.com/boxed/mutmut/blob/cce78241aa116d14c4ae38ecd1b2c84e659126d7/mutmut/__init__.py#L896-L919
import fnmatch import itertools import multiprocessing import os import re import shlex import subprocess import sys from configparser import ( ConfigParser, NoOptionError, NoSectionError, ) from copy import copy as copy_obj from functools import wraps from io import ( open, TextIOBase, ) from os.path import isdir from shutil import ( move, copy, ) from threading import ( Timer, Thread, ) from time import time from parso import parse from parso.python.tree import Name, Number, Keyword __version__ = '2.3.0' if os.getcwd() not in sys.path: sys.path.insert(0, os.getcwd()) try: import mutmut_config except ImportError: mutmut_config = None class RelativeMutationID(object): def __init__(self, line, index, line_number, filename=None): self.line = line self.index = index self.line_number = line_number self.filename = filename def __repr__(self): return 'MutationID(line="{}", index={}, line_number={}, filename={})'.format(self.line, self.index, self.line_number, self.filename) def __eq__(self, other): return (self.line, self.index, self.line_number) == (other.line, other.index, other.line_number) def __hash__(self): return hash((self.line, self.index, self.line_number)) ALL = RelativeMutationID(filename='%all%', line='%all%', index=-1, line_number=-1) class InvalidASTPatternException(Exception): pass class ASTPattern(object): def __init__(self, source, **definitions): if definitions is None: definitions = {} source = source.strip() self.definitions = definitions self.module = parse(source) self.markers = [] def get_leaf(line, column, of_type=None): r = self.module.children[0].get_leaf_for_position((line, column)) while of_type is not None and r.type != of_type: r = r.parent return r def parse_markers(node): if hasattr(node, '_split_prefix'): for x in node._split_prefix(): parse_markers(x) if hasattr(node, 'children'): for x in node.children: parse_markers(x) if node.type == 'comment': line, column = node.start_pos for match in re.finditer(r'\^(?P<value>[^\^]*)', node.value): name = match.groupdict()['value'].strip() d = definitions.get(name, {}) assert set(d.keys()) | {'of_type', 'marker_type'} == {'of_type', 'marker_type'} self.markers.append(dict( node=get_leaf(line - 1, column + match.start(), of_type=d.get('of_type')), marker_type=d.get('marker_type'), name=name, )) parse_markers(self.module) pattern_nodes = [x['node'] for x in self.markers if x['name'] == 'match' or x['name'] == ''] if len(pattern_nodes) != 1: raise InvalidASTPatternException("Found more than one match node. Match nodes are nodes with an empty name or with the explicit name 'match'") self.pattern = pattern_nodes[0] self.marker_type_by_id = {id(x['node']): x['marker_type'] for x in self.markers} def matches(self, node, pattern=None, skip_child=None): if pattern is None: pattern = self.pattern check_value = True check_children = True if pattern.type == 'name' and pattern.value.startswith('_') and pattern.value[1:] in ('any', node.type): check_value = False elif id(pattern) in self.marker_type_by_id: if self.marker_type_by_id[id(pattern)] in (pattern.type, 'any'): check_value = False check_children = False elif pattern.type != node.type: return False if check_children and hasattr(pattern, 'children'): if len(pattern.children) != len(node.children): return False for pattern_child, node_child in zip(pattern.children, node.children): if node_child is skip_child: continue if not self.matches(node=node_child, pattern=pattern_child, skip_child=node_child): return False if check_value and hasattr(pattern, 'value'): if pattern.value != node.value: return False if pattern.parent.type != 'file_input': if skip_child != node: return self.matches(node=node.parent, pattern=pattern.parent, skip_child=node) return True dunder_whitelist = [ 'all', 'version', 'title', 'package_name', 'author', 'description', 'email', 'version', 'license', 'copyright', ] class SkipException(Exception): pass UNTESTED = 'untested' OK_KILLED = 'ok_killed' OK_SUSPICIOUS = 'ok_suspicious' BAD_TIMEOUT = 'bad_timeout' BAD_SURVIVED = 'bad_survived' SKIPPED = 'skipped' MUTANT_STATUSES = { "killed": OK_KILLED, "timeout": BAD_TIMEOUT, "suspicious": OK_SUSPICIOUS, "survived": BAD_SURVIVED, "skipped": SKIPPED, "untested": UNTESTED, } def number_mutation(value, **_): suffix = '' if value.upper().endswith('L'): suffix = value[-1] value = value[:-1] if value.upper().endswith('J'): suffix = value[-1] value = value[:-1] if value.startswith('0o'): base = 8 value = value[2:] elif value.startswith('0x'): base = 16 value = value[2:] elif value.startswith('0b'): base = 2 value = value[2:] elif value.startswith('0') and len(value) > 1 and value[1] != '.': base = 8 value = value[1:] else: base = 10 try: parsed = int(value, base=base) except ValueError: parsed = float(value) result = repr(parsed + 1) if not result.endswith(suffix): result += suffix return result def string_mutation(value, **_): prefix = value[:min(x for x in [value.find('"'), value.find("'")] if x != -1)] value = value[len(prefix):] if value.startswith('"""') or value.startswith("'''"): return prefix + value return prefix + value[0] + 'XX' + value[1:-1] + 'XX' + value[-1] def partition_node_list(nodes, value): for i, n in enumerate(nodes): if hasattr(n, 'value') and n.value == value: return nodes[:i], n, nodes[i + 1:] assert False, "didn't find node to split on" def lambda_mutation(children, **_): pre, op, post = partition_node_list(children, value=':') if len(post) == 1 and getattr(post[0], 'value', None) == 'None': return pre + [op] + [Number(value=' 0', start_pos=post[0].start_pos)] else: return pre + [op] + [Keyword(value=' None', start_pos=post[0].start_pos)] NEWLINE = {'formatting': [], 'indent': '', 'type': 'endl', 'value': ''} def argument_mutation(children, context, **_): if len(context.stack) >= 3 and context.stack[-3].type in ('power', 'atom_expr'): stack_pos_of_power_node = -3 elif len(context.stack) >= 4 and context.stack[-4].type in ('power', 'atom_expr'): stack_pos_of_power_node = -4 else: return power_node = context.stack[stack_pos_of_power_node] if power_node.children[0].type == 'name' and power_node.children[0].value in context.dict_synonyms: c = children[0] if c.type == 'name': children = children[:] children[0] = Name(c.value + 'XX', start_pos=c.start_pos, prefix=c.prefix) return children def keyword_mutation(value, context, **_): if len(context.stack) > 2 and context.stack[-2].type in ('comp_op', 'sync_comp_for') and value in ('in', 'is'): return if len(context.stack) > 1 and context.stack[-2].type == 'for_stmt': return return { 'not': '', 'is': 'is not', 'in': 'not in', 'break': 'continue', 'continue': 'break', 'True': 'False', 'False': 'True', }.get(value) import_from_star_pattern = ASTPattern(""" from _name import * # ^ """) def operator_mutation(value, node, **_): if import_from_star_pattern.matches(node=node): return if value in ('*', '**') and node.parent.type == 'param': return if value == '*' and node.parent.type == 'parameters': return if value in ('*', '**') and node.parent.type in ('argument', 'arglist'): return return { '+': '-', '-': '+', '*': '/', '/': '*', '//': '/', '%': '/', '<<': '>>', '>>': '<<', '&': '|', '|': '&', '^': '&', '**': '*', '~': '', '+=': ['-=', '='], '-=': ['+=', '='], '*=': ['/=', '='], '/=': ['*=', '='], '//=': ['/=', '='], '%=': ['/=', '='], '<<=': ['>>=', '='], '>>=': ['<<=', '='], '&=': ['|=', '='], '|=': ['&=', '='], '^=': ['&=', '='], '**=': ['*=', '='], '~=': '=', '<': '<=', '<=': '<', '>': '>=', '>=': '>', '==': '!=', '!=': '==', '<>': '==', }.get(value) def and_or_test_mutation(children, node, **_): children = children[:] children[1] = Keyword( value={'and': ' or', 'or': ' and'}[children[1].value], start_pos=node.start_pos, ) return children def expression_mutation(children, **_): def handle_assignment(children): mutation_index = -1 if getattr(children[mutation_index], 'value', '---') != 'None': x = ' None' else: x = ' ""' children = children[:] children[mutation_index] = Name(value=x, start_pos=children[mutation_index].start_pos) return children if children[0].type == 'operator' and children[0].value == ':': if len(children) > 2 and children[2].value == '=': children = children[:] children[1:] = handle_assignment(children[1:]) elif children[1].type == 'operator' and children[1].value == '=': children = handle_assignment(children) return children def decorator_mutation(children, **_): assert children[-1].type == 'newline' return children[-1:] array_subscript_pattern = ASTPattern(""" _name[_any] # ^ """) function_call_pattern = ASTPattern(""" _name(_any) # ^ """) def name_mutation(node, value, **_): simple_mutants = { 'True': 'False', 'False': 'True', 'deepcopy': 'copy', 'None': '""', } if value in simple_mutants: return simple_mutants[value] if array_subscript_pattern.matches(node=node): return 'None' if function_call_pattern.matches(node=node): return 'None' mutations_by_type = { 'operator': dict(value=operator_mutation), 'keyword': dict(value=keyword_mutation), 'number': dict(value=number_mutation), 'name': dict(value=name_mutation), 'string': dict(value=string_mutation), 'argument': dict(children=argument_mutation), 'or_test': dict(children=and_or_test_mutation), 'and_test': dict(children=and_or_test_mutation), 'lambdef': dict(children=lambda_mutation), 'expr_stmt': dict(children=expression_mutation), 'decorator': dict(children=decorator_mutation), 'annassign': dict(children=expression_mutation), } def should_exclude(context, config): if config is None or config.covered_lines_by_filename is None: return False try: covered_lines = config.covered_lines_by_filename[context.filename] except KeyError: if config.coverage_data is not None: covered_lines = config.coverage_data.get(os.path.abspath(context.filename)) config.covered_lines_by_filename[context.filename] = covered_lines else: covered_lines = None if covered_lines is None: return True current_line = context.current_line_index + 1 if current_line not in covered_lines: return True return False class Context(object): def __init__(self, source=None, mutation_id=ALL, dict_synonyms=None, filename=None, config=None, index=0): self.index = index self.remove_newline_at_end = False self._source = None self._set_source(source) self.mutation_id = mutation_id self.performed_mutation_ids = [] assert isinstance(mutation_id, RelativeMutationID) self.current_line_index = 0 self.filename = filename self.stack = [] self.dict_synonyms = (dict_synonyms or []) + ['dict'] self._source_by_line_number = None self._pragma_no_mutate_lines = None self._path_by_line = None self.config = config self.skip = False def exclude_line(self): return self.current_line_index in self.pragma_no_mutate_lines or should_exclude(context=self, config=self.config) @property def source(self): if self._source is None: with open(self.filename) as f: self._set_source(f.read()) return self._source def _set_source(self, source): if source and source[-1] != '\n': source += '\n' self.remove_newline_at_end = True self._source = source @property def source_by_line_number(self): if self._source_by_line_number is None: self._source_by_line_number = self.source.split('\n') return self._source_by_line_number @property def current_source_line(self): return self.source_by_line_number[self.current_line_index] @property def mutation_id_of_current_index(self): return RelativeMutationID(filename=self.filename, line=self.current_source_line, index=self.index, line_number=self.current_line_index) @property def pragma_no_mutate_lines(self): if self._pragma_no_mutate_lines is None: self._pragma_no_mutate_lines = { i for i, line in enumerate(self.source_by_line_number) if '# pragma:' in line and 'no mutate' in line.partition('# pragma:')[-1] } return self._pragma_no_mutate_lines def should_mutate(self, node): if self.config and node.type not in self.config.mutation_types_to_apply: return False if self.mutation_id == ALL: return True return self.mutation_id in (ALL, self.mutation_id_of_current_index) def mutate(context): try: result = parse(context.source, error_recovery=False) except Exception: print('Failed to parse {}. Internal error from parso follows.'.format(context.filename)) print('----------------------------------') raise mutate_list_of_nodes(result, context=context) mutated_source = result.get_code().replace(' not not ', ' ') if context.remove_newline_at_end: assert mutated_source[-1] == '\n' mutated_source = mutated_source[:-1] if context.performed_mutation_ids: if context.source == mutated_source: raise RuntimeError( "Mutation context states that a mutation occurred but the " "mutated source remains the same as original") context.mutated_source = mutated_source return mutated_source, len(context.performed_mutation_ids) def mutate_node(node, context): context.stack.append(node) try: if node.type in ('tfpdef', 'import_from', 'import_name'): return if node.type == 'atom_expr' and node.children and node.children[0].type == 'name' and node.children[0].value == '__import__': return if node.start_pos[0] - 1 != context.current_line_index: context.current_line_index = node.start_pos[0] - 1 context.index = 0 if node.type == 'expr_stmt': if node.children[0].type == 'name' and node.children[0].value.startswith('__') and node.children[0].value.endswith('__'): if node.children[0].value[2:-2] in dunder_whitelist: return if node.type == 'annassign' and len(node.children) == 2: return if hasattr(node, 'children'): mutate_list_of_nodes(node, context=context) if context.performed_mutation_ids and context.mutation_id != ALL: return mutation = mutations_by_type.get(node.type) if mutation is None: return for key, value in sorted(mutation.items()): old = getattr(node, key) if context.exclude_line(): continue new = value( context=context, node=node, value=getattr(node, 'value', None), children=getattr(node, 'children', None), ) if isinstance(new, list) and not isinstance(old, list): new_list = new else: new_list = [new] for new in reversed(new_list): assert not callable(new) if new is not None and new != old: if hasattr(mutmut_config, 'pre_mutation_ast'): mutmut_config.pre_mutation_ast(context=context) if context.should_mutate(node): context.performed_mutation_ids.append(context.mutation_id_of_current_index) setattr(node, key, new) context.index += 1 if context.performed_mutation_ids and context.mutation_id != ALL: return finally: context.stack.pop() def mutate_list_of_nodes(node, context): return_annotation_started = False for child_node in node.children: if child_node.type == 'operator' and child_node.value == '->': return_annotation_started = True if return_annotation_started and child_node.type == 'operator' and child_node.value == ':': return_annotation_started = False if return_annotation_started: continue mutate_node(child_node, context=context) if context.performed_mutation_ids and context.mutation_id != ALL: return def list_mutations(context): assert context.mutation_id == ALL mutate(context) return context.performed_mutation_ids def mutate_file(backup, context): with open(context.filename) as f: original = f.read() if backup: with open(context.filename + '.bak', 'w') as f: f.write(original) mutated, _ = mutate(context) with open(context.filename, 'w') as f: f.write(mutated) return original, mutated def queue_mutants(*, progress, config, mutants_queue, mutations_by_file): from mutmut.cache import get_cached_mutation_statuses try: index = 0 for filename, mutations in mutations_by_file.items(): cached_mutation_statuses = get_cached_mutation_statuses(filename, mutations, config.hash_of_tests) with open(filename) as f: source = f.read() for mutation_id in mutations: cached_status = cached_mutation_statuses.get(mutation_id) if cached_status != UNTESTED: progress.register(cached_status) continue context = Context( mutation_id=mutation_id, filename=filename, dict_synonyms=config.dict_synonyms, config=copy_obj(config), source=source, index=index, ) mutants_queue.put(('mutant', context)) index += 1 finally: mutants_queue.put(('end', None)) def check_mutants(mutants_queue, results_queue, cycle_process_after): def feedback(line): results_queue.put(('progress', line, None, None)) did_cycle = False try: count = 0 while True: command, context = mutants_queue.get() if command == 'end': break status = run_mutation(context, feedback) results_queue.put(('status', status, context.filename, context.mutation_id)) count += 1 if count == cycle_process_after: results_queue.put(('cycle', None, None, None)) did_cycle = True break finally: if not did_cycle: results_queue.put(('end', None, None, None)) def run_mutation(context: Context, callback) -> str: from mutmut.cache import cached_mutation_status cached_status = cached_mutation_status(context.filename, context.mutation_id, context.config.hash_of_tests) if cached_status != UNTESTED and context.config.total != 1: return cached_status config = context.config if hasattr(mutmut_config, 'pre_mutation'): context.current_line_index = context.mutation_id.line_number try: mutmut_config.pre_mutation(context=context) except SkipException: return SKIPPED if context.skip: return SKIPPED if config.pre_mutation: result = subprocess.check_output(config.pre_mutation, shell=True).decode().strip() if result and not config.swallow_output: callback(result) try: mutate_file( backup=True, context=context ) start = time() try: survived = tests_pass(config=config, callback=callback) if survived and config.test_command != config._default_test_command and config.rerun_all: config.test_command = config._default_test_command survived = tests_pass(config=config, callback=callback) except TimeoutError: return BAD_TIMEOUT time_elapsed = time() - start if not survived and time_elapsed > config.test_time_base + (config.baseline_time_elapsed * config.test_time_multipler): return OK_SUSPICIOUS if survived: return BAD_SURVIVED else: return OK_KILLED except SkipException: return SKIPPED finally: move(context.filename + '.bak', context.filename) config.test_command = config._default_test_command if config.post_mutation: result = subprocess.check_output(config.post_mutation, shell=True).decode().strip() if result and not config.swallow_output: callback(result) class Config(object): def __init__(self, swallow_output, test_command, covered_lines_by_filename, baseline_time_elapsed, test_time_multiplier, test_time_base, dict_synonyms, total, using_testmon, tests_dirs, hash_of_tests, pre_mutation, post_mutation, coverage_data, paths_to_mutate, mutation_types_to_apply, no_progress, rerun_all): self.swallow_output = swallow_output self.test_command = self._default_test_command = test_command self.covered_lines_by_filename = covered_lines_by_filename self.baseline_time_elapsed = baseline_time_elapsed self.test_time_multipler = test_time_multiplier self.test_time_base = test_time_base self.dict_synonyms = dict_synonyms self.total = total self.using_testmon = using_testmon self.tests_dirs = tests_dirs self.hash_of_tests = hash_of_tests self.post_mutation = post_mutation self.pre_mutation = pre_mutation self.coverage_data = coverage_data self.paths_to_mutate = paths_to_mutate self.mutation_types_to_apply = mutation_types_to_apply self.no_progress = no_progress self.rerun_all = rerun_all def tests_pass(config: Config, callback) -> bool: if config.using_testmon: copy('.testmondata-initial', '.testmondata') use_special_case = True if use_special_case and config.test_command.startswith(hammett_prefix): return hammett_tests_pass(config, callback) returncode = popen_streaming_output(config.test_command, callback, timeout=config.baseline_time_elapsed * 10) return returncode != 1 def config_from_setup_cfg(**defaults): def decorator(f): @wraps(f) def wrapper(*args, **kwargs): config_parser = ConfigParser() config_parser.read('setup.cfg') def s(key, default): try: return config_parser.get('mutmut', key) except (NoOptionError, NoSectionError): return default for k in list(kwargs.keys()): if not kwargs[k]: kwargs[k] = s(k, defaults.get(k)) f(*args, **kwargs) return wrapper return decorator def status_printer(): last_len = [0] def p(s): s = next(spinner) + ' ' + s len_s = len(s) output = '\r' + s + (' ' * max(last_len[0] - len_s, 0)) sys.stdout.write(output) sys.stdout.flush() last_len[0] = len_s return p
BSD 3-Clause New or Revised License
deepmind/bsuite
bsuite/experiments/summary_analysis.py
bsuite_radar_plot
python
def bsuite_radar_plot(summary_data: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None): fig = plt.figure(figsize=(8, 8), facecolor='white') ax = fig.add_subplot(111, polar=True) try: ax.set_axis_bgcolor('white') except AttributeError: ax.set_facecolor('white') all_tags = sorted(summary_data['tag'].unique()) if sweep_vars is None: summary_data['agent'] = 'agent' elif len(sweep_vars) == 1: summary_data['agent'] = summary_data[sweep_vars[0]].astype(str) else: summary_data['agent'] = (summary_data[sweep_vars].astype(str) .apply(lambda x: x.name + '=' + x, axis=0) .apply(lambda x: '\n'.join(x), axis=1) ) if len(summary_data.agent.unique()) > 5: print('WARNING: We do not recommend radar plot for more than 5 agents.') thetas = np.linspace(0, 2*np.pi, 100) ax.fill(thetas, [0.25,] * 100, color='k', alpha=0.05) ax.fill(thetas, [0.5,] * 100, color='k', alpha=0.05) ax.fill(thetas, [0.75,] * 100, color='k', alpha=0.03) ax.fill(thetas, [1.,] * 100, color='k', alpha=0.01) palette = lambda x: plotting.CATEGORICAL_COLOURS[x] if sweep_vars: sweep_data_ = summary_data.groupby('agent') for aid, (agent, sweep_df) in enumerate(sweep_data_): _radar(sweep_df, ax, agent, all_tags, color=palette(aid)) if len(sweep_vars) == 1: label = sweep_vars[0] if label == 'experiment': label = 'agent' legend = ax.legend(loc=(1.1, 0.), ncol=1, title=label) ax.get_legend().get_title().set_fontsize('20') ax.get_legend().get_title().set_fontname('serif') ax.get_legend().get_title().set_color('k') ax.get_legend().get_title().set_alpha(0.75) legend._legend_box.align = 'left' else: legend = ax.legend(loc=(1.1, 0.), ncol=1,) plt.setp(legend.texts, fontname='serif') frame = legend.get_frame() frame.set_color('white') for text in legend.get_texts(): text.set_color('grey') else: _radar(summary_data, ax, '', all_tags, color=palette(0)) for line in ax.xaxis.get_gridlines(): line.set_color('grey') line.set_alpha(0.95) line.set_linestyle(':') line.set_linewidth(2) for line in ax.yaxis.get_gridlines(): line.set_color('grey') line.set_alpha(0.95) line.set_linestyle(':') line.set_linewidth(2) plt.xticks(color='grey', fontname='serif') ax.set_rlabel_position(0) plt.yticks( [0, 0.25, 0.5, 0.75, 1], ['', '.25', '.5', '.75', '1'], color='k', alpha=0.75, fontsize=16, fontname='serif') ax.set_axisbelow(False) return fig
Output a radar plot of bsuite data from bsuite_summary by tag.
https://github.com/deepmind/bsuite/blob/afdeae850b08108d2247a1802567bb7f404d9833/bsuite/experiments/summary_analysis.py#L342-L421
from typing import Callable, Mapping, NamedTuple, Optional, Sequence, Union from bsuite.experiments.bandit import analysis as bandit_analysis from bsuite.experiments.bandit_noise import analysis as bandit_noise_analysis from bsuite.experiments.bandit_scale import analysis as bandit_scale_analysis from bsuite.experiments.cartpole import analysis as cartpole_analysis from bsuite.experiments.cartpole_noise import analysis as cartpole_noise_analysis from bsuite.experiments.cartpole_scale import analysis as cartpole_scale_analysis from bsuite.experiments.cartpole_swingup import analysis as cartpole_swingup_analysis from bsuite.experiments.catch import analysis as catch_analysis from bsuite.experiments.catch_noise import analysis as catch_noise_analysis from bsuite.experiments.catch_scale import analysis as catch_scale_analysis from bsuite.experiments.deep_sea import analysis as deep_sea_analysis from bsuite.experiments.deep_sea_stochastic import analysis as deep_sea_stochastic_analysis from bsuite.experiments.discounting_chain import analysis as discounting_chain_analysis from bsuite.experiments.memory_len import analysis as memory_len_analysis from bsuite.experiments.memory_size import analysis as memory_size_analysis from bsuite.experiments.mnist import analysis as mnist_analysis from bsuite.experiments.mnist_noise import analysis as mnist_noise_analysis from bsuite.experiments.mnist_scale import analysis as mnist_scale_analysis from bsuite.experiments.mountain_car import analysis as mountain_car_analysis from bsuite.experiments.mountain_car_noise import analysis as mountain_car_noise_analysis from bsuite.experiments.mountain_car_scale import analysis as mountain_car_scale_analysis from bsuite.experiments.umbrella_distract import analysis as umbrella_distract_analysis from bsuite.experiments.umbrella_length import analysis as umbrella_length_analysis from bsuite.utils import plotting import matplotlib.pyplot as plt import numpy as np import pandas as pd import plotnine as gg class BSuiteSummary(NamedTuple): score: Callable[[pd.DataFrame], float] type: str tags: Sequence[str] episode: int def _parse_bsuite(package) -> BSuiteSummary: return BSuiteSummary( score=package.score, type=package.TAGS[0], tags=package.TAGS, episode=package.NUM_EPISODES, ) BSUITE_INFO = dict( bandit=_parse_bsuite(bandit_analysis), bandit_noise=_parse_bsuite(bandit_noise_analysis), bandit_scale=_parse_bsuite(bandit_scale_analysis), cartpole=_parse_bsuite(cartpole_analysis), cartpole_noise=_parse_bsuite(cartpole_noise_analysis), cartpole_scale=_parse_bsuite(cartpole_scale_analysis), cartpole_swingup=_parse_bsuite(cartpole_swingup_analysis), catch=_parse_bsuite(catch_analysis), catch_noise=_parse_bsuite(catch_noise_analysis), catch_scale=_parse_bsuite(catch_scale_analysis), deep_sea=_parse_bsuite(deep_sea_analysis), deep_sea_stochastic=_parse_bsuite(deep_sea_stochastic_analysis), discounting_chain=_parse_bsuite(discounting_chain_analysis), memory_len=_parse_bsuite(memory_len_analysis), memory_size=_parse_bsuite(memory_size_analysis), mnist=_parse_bsuite(mnist_analysis), mnist_noise=_parse_bsuite(mnist_noise_analysis), mnist_scale=_parse_bsuite(mnist_scale_analysis), mountain_car=_parse_bsuite(mountain_car_analysis), mountain_car_noise=_parse_bsuite(mountain_car_noise_analysis), mountain_car_scale=_parse_bsuite(mountain_car_scale_analysis), umbrella_distract=_parse_bsuite(umbrella_distract_analysis), umbrella_length=_parse_bsuite(umbrella_length_analysis), ) ALL_TAGS = set() for bsuite_summary in BSUITE_INFO.values(): ALL_TAGS = ALL_TAGS.union(set(bsuite_summary.tags)) def _is_finished(df: pd.DataFrame, n_min: int) -> bool: max_time = df.groupby('bsuite_id')['episode'].max().reset_index() return max_time['episode'].min() >= n_min def _bsuite_score_single(df: pd.DataFrame, experiment_info: Mapping[str, BSuiteSummary], verbose: bool = False) -> pd.DataFrame: data = [] for env_name, env_data in df.groupby('bsuite_env'): if env_name not in experiment_info: if verbose: print('WARNING: {}_score not found in load.py and so is excluded.' .format(env_name)) else: b_summary = experiment_info[env_name] data.append({ 'bsuite_env': env_name, 'score': b_summary.score(env_data), 'type': b_summary.type, 'tags': str(b_summary.tags), 'finished': _is_finished(env_data, b_summary.episode), }) return pd.DataFrame(data) def bsuite_score(df: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame: score_fun = lambda x: _bsuite_score_single(x, BSUITE_INFO) if sweep_vars: score_df = df.groupby(sweep_vars).apply(score_fun).reset_index() else: score_df = score_fun(df) for col in df.columns: if col in ['level_0', 'level_1', 'level_2']: score_df.drop(col, axis=1, inplace=True) return score_df def _summarize_single_by_tag(score_df: pd.DataFrame, unique_tags: Sequence[str], tags_column: str) -> pd.DataFrame: df = score_df.copy() for tag in unique_tags: df[tag] = df[tags_column].str.contains(tag) data = [] for tag in unique_tags: ave_score = df.loc[df[tag], 'score'].mean() data.append({'tag': tag, 'score': ave_score}) return pd.DataFrame(data) def ave_score_by_tag(score_df: pd.DataFrame, sweep_vars: Sequence[str]) -> pd.DataFrame: summary_fun = lambda x: _summarize_single_by_tag(x, list(ALL_TAGS), 'tags') if sweep_vars: summary_df = score_df.groupby(sweep_vars).apply(summary_fun).reset_index() else: summary_df = summary_fun(score_df) return summary_df def _gen_ordered_experiments() -> Sequence[str]: basics = ['bandit', 'mnist', 'catch', 'mountain_car', 'cartpole'] noise = [env + '_noise' for env in basics] scale = [env + '_scale' for env in basics] explore = ['deep_sea', 'deep_sea_stochastic', 'cartpole_swingup'] credit = ['umbrella_length', 'umbrella_distract', 'discounting_chain'] memory = ['memory_len', 'memory_size'] return basics + noise + scale + explore + credit + memory _ORDERED_EXPERIMENTS = _gen_ordered_experiments() _ORDERED_TYPES = [ 'basic', 'noise', 'scale', 'exploration', 'credit_assignment', 'memory'] def _clean_bar_plot_data( df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> pd.DataFrame: df = df_in.copy() df['env'] = pd.Categorical( df.bsuite_env, categories=_ORDERED_EXPERIMENTS, ordered=True) df['type'] = pd.Categorical( df['type'], categories=_ORDERED_TYPES, ordered=True) if sweep_vars is None: df['agent'] = 'agent' elif len(sweep_vars) == 1: df['agent'] = df[sweep_vars[0]].astype(str) else: df['agent'] = (df[sweep_vars].astype(str) .apply(lambda x: x.name + '=' + x, axis=0) .apply(lambda x: '\n'.join(x), axis=1) ) return df def bsuite_bar_plot(df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: df = _clean_bar_plot_data(df_in, sweep_vars) p = (gg.ggplot(df) + gg.aes(x='env', y='score', colour='type', fill='type') + gg.geom_bar(position='dodge', stat='identity') + gg.geom_hline(yintercept=1., linetype='dashed', alpha=0.5) + gg.scale_colour_manual(plotting.CATEGORICAL_COLOURS) + gg.scale_fill_manual(plotting.CATEGORICAL_COLOURS) + gg.xlab('experiment') + gg.theme(axis_text_x=gg.element_text(angle=25, hjust=1)) ) if not all(df.finished): p += gg.aes(alpha='finished') p += gg.scale_alpha_discrete(range=[0.3, 1.0]) if sweep_vars: p += gg.facet_wrap(sweep_vars, labeller='label_both', ncol=1) n_hypers = df[sweep_vars].drop_duplicates().shape[0] else: n_hypers = 1 return p + gg.theme(figure_size=(14, 3 * n_hypers + 1)) def _bar_plot_compare(df: pd.DataFrame) -> gg.ggplot: p = (gg.ggplot(df) + gg.aes(x='agent', y='score', colour='agent', fill='agent') + gg.geom_bar(position='dodge', stat='identity') + gg.geom_hline(yintercept=1., linetype='dashed', alpha=0.5) + gg.theme(axis_text_x=gg.element_text(angle=25, hjust=1)) + gg.scale_colour_manual(plotting.CATEGORICAL_COLOURS) + gg.scale_fill_manual(plotting.CATEGORICAL_COLOURS) ) if not all(df.finished): p += gg.aes(alpha='finished') p += gg.scale_alpha_discrete(range=[0.3, 1.0]) return p def bsuite_bar_plot_compare( df_in: pd.DataFrame, sweep_vars: Optional[Sequence[str]] = None) -> gg.ggplot: df = _clean_bar_plot_data(df_in, sweep_vars) p = _bar_plot_compare(df) p += gg.facet_wrap('env', labeller='label_both') p += gg.theme(figure_size=(18, 16)) return p def plot_single_experiment( summary_df: pd.DataFrame, bsuite_env: str, sweep_vars: Optional[Sequence[str]] = None) -> Union[gg.ggplot, None]: if len(summary_df) == 0: print('WARNING: you have no bsuite summary data, please reload.') return env_df = summary_df[summary_df.bsuite_env == bsuite_env] if len(env_df) == 0: print('Warning, you have no data for bsuite_env={}'.format(bsuite_env)) print('Your dataframe only includes bsuite_env={}' .format(summary_df.bsuite_env.unique())) return df = _clean_bar_plot_data(env_df, sweep_vars) n_agent = len(df.agent.unique()) p = _bar_plot_compare(df) plot_width = min(2 + n_agent, 12) p += gg.theme(figure_size=(plot_width, 6)) p += gg.ggtitle('bsuite score for {} experiment'.format(bsuite_env)) print('tags={}'.format(df.tags.iloc[0])) return p def _tag_pretify(tag): return tag.replace('_', ' ').title() def _radar( df: pd.DataFrame, ax: plt.Axes, label: str, all_tags: Sequence[str], color: str, alpha: float = 0.2, edge_alpha: float = 0.85, zorder: int = 2, edge_style: str = '-'): tmp = df.groupby('tag').mean().reset_index() values = [] for curr_tag in all_tags: score = 0. selected = tmp[tmp['tag'] == curr_tag] if len(selected) == 1: score = float(selected['score']) else: print('{} bsuite scores found for tag {!r} with setting {!r}. ' 'Replacing with zero.'.format(len(selected), curr_tag, label)) values.append(score) values = np.maximum(values, 0.05) values = np.concatenate((values, [values[0]])) angles = np.linspace(0, 2*np.pi, len(all_tags), endpoint=False) angles = np.concatenate((angles, [angles[0]])) ax.plot(angles, values, '-', linewidth=5, label=label, c=color, alpha=edge_alpha, zorder=zorder, linestyle=edge_style) ax.fill(angles, values, alpha=alpha, color=color, zorder=zorder) axis_angles = angles[:-1] * 180/np.pi ax.set_thetagrids( axis_angles, map(_tag_pretify, all_tags), fontsize=18) text_angles = np.rad2deg(angles) for label, angle in zip(ax.get_xticklabels()[:-1], text_angles[:-1]): if 90 <= angle <= 270: label.set_horizontalalignment('right') else: label.set_horizontalalignment('left')
Apache License 2.0
cfedermann/appraise
appraise/wmt15/models.py
RankingResult.export_to_csv
python
def export_to_csv(self, expand_multi_systems=True): item = self.item hit = self.item.hit values = [] iso639_3_to_name_mapping = {'ces': 'Czech', 'cze': 'Czech', 'deu': 'German', 'ger': 'German', 'eng': 'English', 'spa': 'Spanish', 'fra': 'French', 'fre': 'French', 'rus': 'Russian', 'fin': 'Finnish'} _src_lang = hit.hit_attributes['source-language'] _trg_lang = hit.hit_attributes['target-language'] _systems = [] for translation in item.translations: _systems.append(translation[1]['system']) values.append(iso639_3_to_name_mapping[_src_lang]) values.append(iso639_3_to_name_mapping[_trg_lang]) values.append(item.source[1]['id']) values.append('-1') values.append(item.source[1]['id']) values.append(self.user.username) base_values = values _system_names = [] _system_ranks = [] for _result_index, _system in enumerate(_systems): if expand_multi_systems: _local_systems = _system.split(',') _local_results = [str(self.results[_result_index])] * len(_local_systems) _system_names.extend(_local_systems) _system_ranks.extend(_local_results) else: _system_names.append(_system.replace(',', '+')) _system_ranks.append(str(self.results[_result_index])) if len(_system_names) % 5 > 0: _missing_systems = 5 - len(_system_names) % 5 for x in range(_missing_systems): _system_names.append('PLACEHOLDER') _system_ranks.append('-1') all_values = [] for _base_index in range(len(_system_names))[::5]: current_values = list(base_values) current_ranks = [] for _current_index in range(len(_system_names))[_base_index:_base_index+5]: current_values.append('-1') current_values.append(str(_system_names[_current_index])) current_ranks.append(_system_ranks[_current_index]) current_values.extend(current_ranks) all_values.append(u",".join(current_values)) return u"\n".join(all_values)
Exports this RankingResult in CSV format.
https://github.com/cfedermann/appraise/blob/2cce477efd5594699d6e0fa58f6312df60e05394/appraise/wmt15/models.py#L604-L709
ο»Ώ import logging import uuid from datetime import datetime from xml.etree.ElementTree import fromstring, ParseError, tostring from django.dispatch import receiver from django.contrib.auth.models import User, Group from django.core.urlresolvers import reverse from django.db import models from django.template import Context from django.template.loader import get_template from appraise.wmt15.validators import validate_hit_xml, validate_segment_xml from appraise.settings import LOG_LEVEL, LOG_HANDLER from appraise.utils import datetime_to_seconds, AnnotationTask logging.basicConfig(level=LOG_LEVEL) LOGGER = logging.getLogger('appraise.wmt15.models') LOGGER.addHandler(LOG_HANDLER) LANGUAGE_PAIR_CHOICES = ( ('eng2ces', 'English β†’ Czech'), ('eng2deu', 'English β†’ German'), ('eng2fin', 'English β†’ Finnish'), ('eng2fra', 'English β†’ French'), ('eng2rus', 'English β†’ Russian'), ('ces2eng', 'Czech β†’ English'), ('deu2eng', 'German β†’ English'), ('fin2eng', 'Finnish β†’ English'), ('fra2eng', 'French β†’ English'), ('rus2eng', 'Russian β†’ English'), ) class HIT(models.Model): hit_id = models.CharField( max_length=8, db_index=True, unique=True, editable=False, help_text="Unique identifier for this HIT instance.", verbose_name="HIT identifier" ) block_id = models.IntegerField( db_index=True, help_text="Block ID for this HIT instance.", verbose_name="HIT block identifier" ) hit_xml = models.TextField( help_text="XML source for this HIT instance.", validators=[validate_hit_xml], verbose_name="HIT source XML" ) language_pair = models.CharField( max_length=7, choices=LANGUAGE_PAIR_CHOICES, db_index=True, help_text="Language pair choice for this HIT instance.", verbose_name="Language pair" ) hit_attributes = {} users = models.ManyToManyField( User, blank=True, db_index=True, null=True, help_text="Users who work on this HIT instance." ) active = models.BooleanField( db_index=True, default=True, help_text="Indicates that this HIT instance is still in use.", verbose_name="Active?" ) mturk_only = models.BooleanField( db_index=True, default=False, help_text="Indicates that this HIT instance is ONLY usable via MTurk.", verbose_name="MTurk only?" ) completed = models.BooleanField( db_index=True, default=False, help_text="Indicates that this HIT instance is completed.", verbose_name="Completed?" ) assigned = models.DateTimeField(blank=True, null=True, editable=False) finished = models.DateTimeField(blank=True, null=True, editable=False) class Meta: ordering = ('id', 'hit_id', 'language_pair', 'block_id') verbose_name = "HIT instance" verbose_name_plural = "HIT instances" def __init__(self, *args, **kwargs): super(HIT, self).__init__(*args, **kwargs) if not self.hit_id: self.hit_id = self.__class__._create_hit_id() self.reload_dynamic_fields() def __unicode__(self): return u'<HIT id="{0}" hit="{1}" block="{2}" language-pair="{3}">' .format(self.id, self.hit_id, self.block_id, self.language_pair) @classmethod def _create_hit_id(cls): new_id = uuid.uuid4().hex[:8] while cls.objects.filter(hit_id=new_id): new_id = uuid.uuid4().hex[:8] return new_id @classmethod def compute_remaining_hits(cls, language_pair=None): hits_qs = cls.objects.filter(active=True, mturk_only=False, completed=False) if language_pair: hits_qs = hits_qs.filter(language_pair=language_pair) available = 0 for hit in hits_qs: if hit.users.count() < 1: available = available + 1 else: hit.completed = True hit.save() return available @classmethod def compute_status_for_user(cls, user, language_pair=None): hits_qs = cls.objects.filter(users=user) if language_pair: hits_qs = hits_qs.filter(language_pair=language_pair) _completed_hits = hits_qs.count() _durations = [] for hit in hits_qs: _results = RankingResult.objects.filter(user=user, item__hit=hit) _durations.extend(_results.values_list('duration', flat=True)) _durations = [datetime_to_seconds(d) for d in _durations if d] _total_duration = sum(_durations) _average_duration = _total_duration / float(_completed_hits or 1) current_status = [] current_status.append(_completed_hits) current_status.append(_average_duration) current_status.append(_total_duration) return current_status @classmethod def compute_status_for_group(cls, group, language_pair=None): combined = [0, 0, 0] for user in group.user_set.all(): _user_status = cls.compute_status_for_user(user, language_pair) combined[0] = combined[0] + _user_status[0] combined[1] = combined[1] + _user_status[1] combined[2] = combined[2] + _user_status[2] combined[1] = combined[2] / float(combined[0] or 1) return combined def save(self, *args, **kwargs): if not self.id: self.full_clean() super(HIT, self).save(*args, **kwargs) _tree = fromstring(self.hit_xml.encode("utf-8")) for _child in _tree: new_item = RankingTask(hit=self, item_xml=tostring(_child)) new_item.save() try: related_result = RankingResult.objects.filter(item__hit=self).latest('completion') self.finished = related_result.completion except RankingResult.DoesNotExist: pass super(HIT, self).save(*args, **kwargs) def get_absolute_url(self): hit_handler_view = 'appraise.wmt15.views.hit_handler' kwargs = {'hit_id': self.hit_id} return reverse(hit_handler_view, kwargs=kwargs) def get_status_url(self): status_handler_view = 'appraise.wmt15.views.status_view' kwargs = {'hit_id': self.hit_id} return reverse(status_handler_view, kwargs=kwargs) def reload_dynamic_fields(self): if self.hit_xml: try: _hit_xml = fromstring(self.hit_xml.encode("utf-8")) self.hit_attributes = {} for key, value in _hit_xml.attrib.items(): self.hit_attributes[key] = value except (ParseError), msg: self.hit_attributes = {'note': msg} def export_to_xml(self): template = get_template('wmt15/task_result.xml') self.reload_dynamic_fields() _attr = self.hit_attributes.items() attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr]) results = [] for item in RankingTask.objects.filter(hit=self): item.reload_dynamic_fields() try: source_id = item.source[1]["id"] except: source_id = -1 _results = [] source_id = getattr(item, 'source[1]["id"]', -1) for _result in item.rankingresult_set.all(): _results.append(_result.export_to_xml()) results.append((source_id, _results)) context = {'hit_id': self.hit_id, 'attributes': attributes, 'results': results} return template.render(Context(context)) def export_to_apf(self): results = [] for item in RankingTask.objects.filter(hit=self): for _result in item.rankingresult_set.all(): _apf_output = _result.export_to_apf() if _apf_output: results.append(_apf_output) return u"\n".join(results) def compute_agreement_scores(self): _raw = self.export_to_apf().split('\n') if not len(_raw): return None _data = [_line.split(',') for _line in _raw] try: _data = [(x[0], x[1], x[2]) for x in _data] except IndexError: return None _task = AnnotationTask(data=_data) try: _alpha = _task.alpha() _kappa = _task.kappa() _pi = _task.pi() _S = _task.S() except ZeroDivisionError, msg: LOGGER.debug(msg) return None return (_alpha, _kappa, _pi, _S) class RankingTask(models.Model): hit = models.ForeignKey( HIT, db_index=True ) item_xml = models.TextField( help_text="XML source for this RankingTask instance.", validators=[validate_segment_xml], verbose_name="RankingTask source XML" ) attributes = None source = None reference = None translations = None class Meta: ordering = ('id',) verbose_name = "RankingTask instance" verbose_name_plural = "RankingTask instances" def __init__(self, *args, **kwargs): super(RankingTask, self).__init__(*args, **kwargs) self.reload_dynamic_fields() def __unicode__(self): return u'<ranking-task id="{0}">'.format(self.id) def save(self, *args, **kwargs): self.full_clean() super(RankingTask, self).save(*args, **kwargs) def reload_dynamic_fields(self): if self.item_xml: try: _item_xml = fromstring(self.item_xml) self.attributes = _item_xml.attrib _source = _item_xml.find('source') if _source is not None: self.source = (_source.text, _source.attrib) _reference = _item_xml.find('reference') if _reference is not None: self.reference = (_reference.text, _reference.attrib) self.translations = [] for _translation in _item_xml.iterfind('translation'): self.translations.append((_translation.text, _translation.attrib)) except ParseError: self.source = None self.reference = None self.translations = None class RankingResult(models.Model): item = models.ForeignKey( RankingTask, db_index=True ) user = models.ForeignKey( User, db_index=True ) duration = models.TimeField(blank=True, null=True, editable=False) completion = models.DateTimeField(auto_now_add=True, blank=True, null=True, editable=False) def readable_duration(self): return '{}'.format(self.duration) raw_result = models.TextField(editable=False, blank=False) results = None systems = 0 class Meta: ordering = ('id',) verbose_name = "RankingResult object" verbose_name_plural = "RankingResult objects" def __init__(self, *args, **kwargs): super(RankingResult, self).__init__(*args, **kwargs) self.reload_dynamic_fields() def __unicode__(self): return u'<ranking-result id="{0}">'.format(self.id) def reload_dynamic_fields(self): if self.raw_result and self.raw_result != 'SKIPPED': try: self.results = self.raw_result.split(',') self.results = [int(x) for x in self.results] self.systems = sum([len(x[1]['system'].split(',')) for x in self.item.translations]) except Exception, msg: self.results = msg def export_to_xml(self): return self.export_to_ranking_xml() def export_to_ranking_xml(self): template = get_template('wmt15/ranking_result.xml') _attr = self.item.attributes.items() attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr]) skipped = self.results is None translations = [] if not skipped: for index, translation in enumerate(self.item.translations): _items = translation[1].items() _attr = ' '.join(['{}="{}"'.format(k, v) for k, v in _items]) _rank = self.results[index] translations.append((_attr, _rank)) context = { 'attributes': attributes, 'duration': '{}'.format(self.duration), 'skipped': skipped, 'translations': translations, 'user': self.user, } return template.render(Context(context)) def export_to_ranking_csv(self): ranking_csv_data = [] try: ranking_csv_data.append(self.item.source[1]["id"]) except: ranking_csv_data.append(-1) iso639_3_to_name_mapping = {'ces': 'Czech', 'cze': 'Czech', 'deu': 'German', 'ger': 'German', 'eng': 'English', 'spa': 'Spanish', 'fra': 'French', 'fre': 'French', 'rus': 'Russian', 'fin': 'Finnish'} _src_lang = self.item.hit.hit_attributes['source-language'] _trg_lang = self.item.hit.hit_attributes['target-language'] ranking_csv_data.append(iso639_3_to_name_mapping[_src_lang]) ranking_csv_data.append(iso639_3_to_name_mapping[_trg_lang]) ranking_csv_data.append(self.user.username) ranking_csv_data.append(datetime_to_seconds(self.duration)) skipped = self.results is None translations = [] if not skipped: for index, translation in enumerate(self.item.translations): _word_count = len(translation[0].split()) _rank = self.results[index] translations.append((_rank, _word_count)) for rank, word_count in translations: ranking_csv_data.append(rank) ranking_csv_data.append(word_count) return ranking_csv_data
BSD 3-Clause New or Revised License
oarriaga/paz
paz/models/segmentation/unet.py
build_UNET
python
def build_UNET(num_classes, backbone, branch_tensors, decoder, decoder_filters, activation, name): inputs, x = backbone.input, backbone.output if isinstance(backbone.layers[-1], MaxPooling2D): x = convolution_block(x, 512) x = convolution_block(x, 512) for branch, filters in zip(branch_tensors, decoder_filters): x = decoder(x, filters, branch) kwargs = {'use_bias': True, 'kernel_initializer': 'glorot_uniform'} x = Conv2D(num_classes, 3, (1, 1), 'same', **kwargs)(x) outputs = Activation(activation, name='masks')(x) model = Model(inputs, outputs, name=name) return model
Build UNET with a given ``backbone`` model. # Arguments num_classes: Integer used for output number of channels. backbone: Instantiated backbone model. branch_tensors: List of tensors from ``backbone`` model decoder: Function used for upsampling and decoding the output. decoder_filters: List of integers used in each application of decoder. activation: Output activation of the model. name: String. indicating the name of the model. # Returns A UNET Keras/tensorflow model.
https://github.com/oarriaga/paz/blob/5fcfa78768c3e5b2ee3f58aaf928709f05d750f4/paz/models/segmentation/unet.py#L127-L155
from tensorflow.keras.layers import Conv2DTranspose, Concatenate, UpSampling2D from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation from tensorflow.keras.layers import MaxPooling2D, Input from tensorflow.keras import Model from tensorflow.keras.applications import VGG16, VGG19 from tensorflow.keras.applications import ResNet50V2 def convolution_block(inputs, filters, kernel_size=3, activation='relu'): kwargs = {'use_bias': False, 'kernel_initializer': 'he_uniform'} x = Conv2D(filters, kernel_size, (1, 1), 'same', **kwargs)(inputs) x = BatchNormalization()(x) x = Activation(activation)(x) return x def upsample_block(x, filters, branch): x = UpSampling2D(size=2)(x) x = Concatenate(axis=3)([x, branch]) x = convolution_block(x, filters) x = convolution_block(x, filters) return x def transpose_block(x, filters, branch): x = Conv2DTranspose(filters, 4, (2, 2), 'same', use_bias=False)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Concatenate(axis=3)([x, branch]) x = convolution_block(x, filters) return x def freeze_model(model): for layer in model.layers: layer.trainable = False return model def get_tensors(model, layer_names): tensors = [] for layer_name in layer_names: tensors.append(model.get_layer(layer_name).output) return model, tensors def build_backbone(BACKBONE, shape, branch_names, weights, frozen=False, input_tensor=None): kwargs = {'include_top': False, 'input_shape': shape, 'weights': weights} if input_tensor is not None: kwargs.pop('input_shape') kwargs['input_tensor'] = input_tensor backbone = BACKBONE(**kwargs) if frozen: backbone = freeze_model(backbone) backbone, branch_tensors = get_tensors(backbone, branch_names) return backbone, branch_tensors
MIT License
openstack/swift
swift/common/daemon.py
DaemonStrategy.run
python
def run(self, once=False, **kwargs): self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False
Daemonize and execute our strategy
https://github.com/openstack/swift/blob/dbd0960aeebedc0487699d3ca2a4d6f21e7ed524/swift/common/daemon.py#L154-L163
import errno import os import sys import time import signal from re import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils class Daemon(object): WORKERS_HEALTHCHECK_INTERVAL = 5.0 def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def post_multiprocess_run(self): pass def get_worker_args(self, once=False, **kwargs): return [] def is_healthy(self): return True class DaemonStrategy(object): def __init__(self, daemon, logger): self.daemon = daemon self.logger = logger self.running = False self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.clean_up_daemon_hygiene() utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True utils.systemd_notify(self.logger) def _run_inline(self, once=False, **kwargs): self.daemon.run(once=once, **kwargs)
Apache License 2.0
coin3d/pivy
scons/scons-local-1.2.0.d20090919/SCons/Util.py
splitext
python
def splitext(path): sep = rightmost_separator(path, os.sep) dot = string.rfind(path, '.') if dot > sep and not containsOnly(path[dot:], "0123456789."): return path[:dot],path[dot:] else: return path,""
Same as os.path.splitext() but faster.
https://github.com/coin3d/pivy/blob/a88a54e594977d573747f762823d9a24b10e3b23/scons/scons-local-1.2.0.d20090919/SCons/Util.py#L89-L97
from __future__ import print_function __revision__ = "src/engine/SCons/Util.py 4369 2009/09/19 15:58:29 scons" import copy import os import os.path import re import string import sys import types from UserDict import UserDict from UserList import UserList from UserString import UserString DictType = dict InstanceType = types.InstanceType ListType = list StringType = bytes TupleType = tuple def dictify(keys, values, result={}): for k, v in zip(keys, values): result[k] = v return result _altsep = os.altsep if _altsep is None and sys.platform == 'win32': _altsep = '/' if _altsep: def rightmost_separator(path, sep, _altsep=_altsep): rfind = string.rfind return max(rfind(path, sep), rfind(path, _altsep)) else: rightmost_separator = string.rfind def containsAny(str, set): for c in set: if c in str: return 1 return 0 def containsAll(str, set): for c in set: if c not in str: return 0 return 1 def containsOnly(str, set): for c in str: if c not in set: return 0 return 1
ISC License
eric3911/mini_ssd
object_detection/utils/vrd_evaluation.py
_VRDDetectionEvaluation.add_single_ground_truth_image_info
python
def add_single_ground_truth_image_info( self, image_key, groundtruth_box_tuples, groundtruth_class_tuples): if image_key in self._groundtruth_box_tuples: logging.warn( 'image %s has already been added to the ground truth database.', image_key) return self._groundtruth_box_tuples[image_key] = groundtruth_box_tuples self._groundtruth_class_tuples[image_key] = groundtruth_class_tuples self._update_groundtruth_statistics(groundtruth_class_tuples)
Adds groundtruth for a single image to be used for evaluation. Args: image_key: A unique string/integer identifier for the image. groundtruth_box_tuples: A numpy array of structures with the shape [M, 1], representing M tuples, each tuple containing the same number of named bounding boxes. Each box is of the format [y_min, x_min, y_max, x_max]. groundtruth_class_tuples: A numpy array of structures shape [M, 1], representing the class labels of the corresponding bounding boxes and possibly additional classes.
https://github.com/eric3911/mini_ssd/blob/6fb6e1bce3ab6e4adb832b37e78325803c7424b6/object_detection/utils/vrd_evaluation.py#L447-L470
from abc import abstractmethod import collections import logging import numpy as np from object_detection.core import standard_fields from object_detection.utils import metrics from object_detection.utils import object_detection_evaluation from object_detection.utils import per_image_vrd_evaluation vrd_box_data_type = np.dtype([('subject', 'f4', (4,)), ('object', 'f4', (4,))]) single_box_data_type = np.dtype([('box', 'f4', (4,))]) label_data_type = np.dtype([('subject', 'i4'), ('object', 'i4'), ('relation', 'i4')]) class VRDDetectionEvaluator(object_detection_evaluation.DetectionEvaluator): def __init__(self, matching_iou_threshold=0.5, metric_prefix=None): super(VRDDetectionEvaluator, self).__init__([]) self._matching_iou_threshold = matching_iou_threshold self._evaluation = _VRDDetectionEvaluation( matching_iou_threshold=self._matching_iou_threshold) self._image_ids = set([]) self._metric_prefix = (metric_prefix + '_') if metric_prefix else '' self._evaluatable_labels = {} self._negative_labels = {} @abstractmethod def _process_groundtruth_boxes(self, groundtruth_box_tuples): raise NotImplementedError( '_process_groundtruth_boxes method should be implemented in subclasses' 'of VRDDetectionEvaluator.') @abstractmethod def _process_detection_boxes(self, detections_box_tuples): raise NotImplementedError( '_process_detection_boxes method should be implemented in subclasses' 'of VRDDetectionEvaluator.') def add_single_ground_truth_image_info(self, image_id, groundtruth_dict): if image_id in self._image_ids: raise ValueError('Image with id {} already added.'.format(image_id)) groundtruth_class_tuples = ( groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes]) groundtruth_box_tuples = ( groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes]) self._evaluation.add_single_ground_truth_image_info( image_key=image_id, groundtruth_box_tuples=self._process_groundtruth_boxes( groundtruth_box_tuples), groundtruth_class_tuples=groundtruth_class_tuples) self._image_ids.update([image_id]) all_classes = [] for field in groundtruth_box_tuples.dtype.fields: all_classes.append(groundtruth_class_tuples[field]) groudtruth_positive_classes = np.unique(np.concatenate(all_classes)) verified_labels = groundtruth_dict.get( standard_fields.InputDataFields.groundtruth_image_classes, np.array([], dtype=int)) self._evaluatable_labels[image_id] = np.unique( np.concatenate((verified_labels, groudtruth_positive_classes))) self._negative_labels[image_id] = np.setdiff1d(verified_labels, groudtruth_positive_classes) def add_single_detected_image_info(self, image_id, detections_dict): if image_id not in self._image_ids: logging.warn('No groundtruth for the image with id %s.', image_id) self._image_ids.update([image_id]) self._negative_labels[image_id] = np.array([]) self._evaluatable_labels[image_id] = np.array([]) num_detections = detections_dict[ standard_fields.DetectionResultFields.detection_boxes].shape[0] detection_class_tuples = detections_dict[ standard_fields.DetectionResultFields.detection_classes] detection_box_tuples = detections_dict[ standard_fields.DetectionResultFields.detection_boxes] negative_selector = np.zeros(num_detections, dtype=bool) selector = np.ones(num_detections, dtype=bool) for field in detection_box_tuples.dtype.fields: negative_selector |= np.isin(detection_class_tuples[field], self._negative_labels[image_id]) selector &= np.isin(detection_class_tuples[field], self._evaluatable_labels[image_id]) selector |= negative_selector self._evaluation.add_single_detected_image_info( image_key=image_id, detected_box_tuples=self._process_detection_boxes( detection_box_tuples[selector]), detected_scores=detections_dict[ standard_fields.DetectionResultFields.detection_scores][selector], detected_class_tuples=detection_class_tuples[selector]) def evaluate(self, relationships=None): (weighted_average_precision, mean_average_precision, average_precisions, _, _, recall_50, recall_100, _, _) = ( self._evaluation.evaluate()) vrd_metrics = { (self._metric_prefix + 'weightedAP@{}IOU'.format( self._matching_iou_threshold)): weighted_average_precision, self._metric_prefix + 'mAP@{}IOU'.format(self._matching_iou_threshold): mean_average_precision, self._metric_prefix + 'Recall@50@{}IOU'.format( self._matching_iou_threshold): recall_50, self._metric_prefix + 'Recall@100@{}IOU'.format( self._matching_iou_threshold): recall_100, } if relationships: for key, average_precision in average_precisions.iteritems(): vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format( self._matching_iou_threshold, relationships[key])] = average_precision else: for key, average_precision in average_precisions.iteritems(): vrd_metrics[self._metric_prefix + 'AP@{}IOU/{}'.format( self._matching_iou_threshold, key)] = average_precision return vrd_metrics def clear(self): self._evaluation = _VRDDetectionEvaluation( matching_iou_threshold=self._matching_iou_threshold) self._image_ids.clear() self._negative_labels.clear() self._evaluatable_labels.clear() class VRDRelationDetectionEvaluator(VRDDetectionEvaluator): def __init__(self, matching_iou_threshold=0.5): super(VRDRelationDetectionEvaluator, self).__init__( matching_iou_threshold=matching_iou_threshold, metric_prefix='VRDMetric_Relationships') def _process_groundtruth_boxes(self, groundtruth_box_tuples): return groundtruth_box_tuples def _process_detection_boxes(self, detections_box_tuples): return detections_box_tuples class VRDPhraseDetectionEvaluator(VRDDetectionEvaluator): def __init__(self, matching_iou_threshold=0.5): super(VRDPhraseDetectionEvaluator, self).__init__( matching_iou_threshold=matching_iou_threshold, metric_prefix='VRDMetric_Phrases') def _process_groundtruth_boxes(self, groundtruth_box_tuples): first_box_key = groundtruth_box_tuples.dtype.fields.keys()[0] miny = groundtruth_box_tuples[first_box_key][:, 0] minx = groundtruth_box_tuples[first_box_key][:, 1] maxy = groundtruth_box_tuples[first_box_key][:, 2] maxx = groundtruth_box_tuples[first_box_key][:, 3] for fields in groundtruth_box_tuples.dtype.fields: miny = np.minimum(groundtruth_box_tuples[fields][:, 0], miny) minx = np.minimum(groundtruth_box_tuples[fields][:, 1], minx) maxy = np.maximum(groundtruth_box_tuples[fields][:, 2], maxy) maxx = np.maximum(groundtruth_box_tuples[fields][:, 3], maxx) data_result = [] for i in range(groundtruth_box_tuples.shape[0]): data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],)) result = np.array(data_result, dtype=[('box', 'f4', (4,))]) return result def _process_detection_boxes(self, detections_box_tuples): first_box_key = detections_box_tuples.dtype.fields.keys()[0] miny = detections_box_tuples[first_box_key][:, 0] minx = detections_box_tuples[first_box_key][:, 1] maxy = detections_box_tuples[first_box_key][:, 2] maxx = detections_box_tuples[first_box_key][:, 3] for fields in detections_box_tuples.dtype.fields: miny = np.minimum(detections_box_tuples[fields][:, 0], miny) minx = np.minimum(detections_box_tuples[fields][:, 1], minx) maxy = np.maximum(detections_box_tuples[fields][:, 2], maxy) maxx = np.maximum(detections_box_tuples[fields][:, 3], maxx) data_result = [] for i in range(detections_box_tuples.shape[0]): data_result.append(([miny[i], minx[i], maxy[i], maxx[i]],)) result = np.array(data_result, dtype=[('box', 'f4', (4,))]) return result VRDDetectionEvalMetrics = collections.namedtuple('VRDDetectionEvalMetrics', [ 'weighted_average_precision', 'mean_average_precision', 'average_precisions', 'precisions', 'recalls', 'recall_50', 'recall_100', 'median_rank_50', 'median_rank_100' ]) class _VRDDetectionEvaluation(object): def __init__(self, matching_iou_threshold=0.5): self._per_image_eval = per_image_vrd_evaluation.PerImageVRDEvaluation( matching_iou_threshold=matching_iou_threshold) self._groundtruth_box_tuples = {} self._groundtruth_class_tuples = {} self._num_gt_instances = 0 self._num_gt_imgs = 0 self._num_gt_instances_per_relationship = {} self.clear_detections() def clear_detections(self): self._detection_keys = set() self._scores = [] self._relation_field_values = [] self._tp_fp_labels = [] self._average_precisions = {} self._precisions = [] self._recalls = []
MIT License
dingmyu/hr-nas
common.py
reduce_and_flush_meters
python
def reduce_and_flush_meters(meters, method='avg'): if not FLAGS.use_distributed: results = flush_scalar_meters(meters) else: results = {} assert isinstance(meters, dict), "meters should be a dict." for name in sorted(meters.keys()): meter = meters[name] if not isinstance(meter, ScalarMeter): continue if method == 'avg': method_fun = torch.mean elif method == 'sum': method_fun = torch.sum elif method == 'max': method_fun = torch.max elif method == 'min': method_fun = torch.min else: raise NotImplementedError( 'flush method: {} is not yet implemented.'.format(method)) tensor = torch.tensor(meter.values).cuda() gather_tensors = [ torch.ones_like(tensor) for _ in range(udist.get_world_size()) ] dist.all_gather(gather_tensors, tensor) value = method_fun(torch.cat(gather_tensors)) meter.flush(value) results[name] = value return results
Sync and flush meters.
https://github.com/dingmyu/hr-nas/blob/003c3b6bd0168751c884b6999ffc8c13b36a39e2/common.py#L124-L155
import copy import importlib import logging import math import os import torch import torch.distributed as dist from torch.utils.tensorboard import SummaryWriter from utils import distributed as udist from utils.model_profiling import model_profiling from utils.config import FLAGS from utils.meters import ScalarMeter from utils.meters import flush_scalar_meters from utils.common import get_params_by_name import models.mobilenet_base as mb import torch.nn.functional as F summary_writer = None class SummaryWriterManager(object): @udist.master_only def __enter__(self): global summary_writer if summary_writer is not None: raise RuntimeError('Should only init `summary_writer` once') summary_writer = SummaryWriter(os.path.join(FLAGS.log_dir, 'log')) @udist.master_only def __exit__(self, exc_type, exc_value, exc_traceback): global summary_writer if summary_writer is None: raise RuntimeError('`summary_writer` lost') summary_writer.close() summary_writer = None def setup_ema(model): from utils import optim ema = None if FLAGS.moving_average_decay > 0.0: if FLAGS.moving_average_decay_adjust: moving_average_decay = optim.ExponentialMovingAverage.adjust_momentum( FLAGS.moving_average_decay, FLAGS.moving_average_decay_base_batch / FLAGS.batch_size) else: moving_average_decay = FLAGS.moving_average_decay if udist.is_master(): logging.info('Moving average for model parameters: {}'.format( moving_average_decay)) ema = optim.ExponentialMovingAverage(moving_average_decay) for name, param in model.named_parameters(): ema.register(name, param) for name, buffer in model.named_buffers(): if 'running_var' in name or 'running_mean' in name: ema.register(name, buffer) return ema def forward_loss(model, criterion, input, target, meter, task='classification', distill=False, kl=True): output = model(input) if distill: for block_name, block in model.module.get_named_block_list().items(): block.index = 1 output_whole = model(input) for block_name, block in model.module.get_named_block_list().items(): block.index = 0 if task == 'segmentation': if distill and kl: input_log_softmax = F.log_softmax(output, dim=1) target_softmax = F.softmax(output_whole.detach(), dim=1) loss, acc = criterion(output, target) loss = loss + F.kl_div(input_log_softmax, target_softmax, reduction='mean') else: loss, acc = criterion(output, target) meter['loss'].cache(loss) meter['acc'].cache(acc) if distill: loss_whole, acc_whole = criterion(output_whole, target) meter['loss_whole'].cache(loss_whole) meter['acc_whole'].cache(acc_whole) loss = loss + loss_whole else: if distill and kl: input_log_softmax = F.log_softmax(output, dim=1) target_softmax = F.softmax(output_whole.detach(), dim=1) loss = criterion(output, target) + F.kl_div(input_log_softmax, target_softmax, reduction='mean') else: loss = criterion(output, target) meter['loss'].cache(loss) _, pred = output.topk(max(FLAGS.topk)) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) for k in FLAGS.topk: correct_k = correct[:k].float().sum(0) error_list = list(1. - correct_k.cpu().detach().numpy()) meter['top{}_error'.format(k)].cache_list(error_list) if distill: loss_whole = criterion(output_whole, target) meter['loss_whole'].cache(loss_whole) _, pred = output_whole.topk(max(FLAGS.topk)) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) for k in FLAGS.topk: correct_k = correct[:k].float().sum(0) error_list = list(1. - correct_k.cpu().detach().numpy()) meter['top{}_error_whole'.format(k)].cache_list(error_list) loss = loss + loss_whole return loss
MIT License
srstevenson/nb-clean
noxfile.py
list_source_files
python
def list_source_files() -> List[str]: paths = [path for path in SOURCES if pathlib.Path(path).is_file()] paths.extend( [ os.fspath(path) for source in SOURCES for path in pathlib.Path(source).rglob("*.py") if pathlib.Path(source).is_dir() ] ) return paths
Expand directories in SOURCES to constituent files.
https://github.com/srstevenson/nb-clean/blob/58dfdd2d0ed175b3ff80dec73bc1a37c84a9e070/noxfile.py#L12-L23
import os import pathlib from typing import List import nox SOURCES = ["noxfile.py", "src", "tests"]
ISC License