{"code": "def FromMany(cls, samples):\n \n if not samples:\n raise ValueError(\"Empty `samples` argument\")\n\n \n \n cpu_percent = sum(sample.cpu_percent for sample in samples) / len(samples)\n\n return CpuSample(\n timestamp=max(sample.timestamp for sample in samples),\n cpu_percent=cpu_percent,\n user_cpu_time=max(sample.user_cpu_time for sample in samples),\n system_cpu_time=max(sample.system_cpu_time for sample in samples))", "docstring": "Constructs a single sample that best represents a list of samples.\n\nArgs:\nsamples: An iterable collection of `CpuSample` instances.\n\nReturns:\nA `CpuSample` instance representing `samples`.\n\nRaises:\nValueError: If `samples` is empty.", "source": "juraj-google-style"} {"code": "def timestamp(stamp, tolerance=150):\n \n try:\n tolerance = datetime.timedelta(0, tolerance)\n timestamp_low = dateutil.parser.parse(stamp)\n timestamp_high = timestamp_low + tolerance\n now = datetime.datetime.now(timestamp_low.tzinfo)\n except ValueError:\n return False\n\n return now >= timestamp_low and now <= timestamp_high", "docstring": "Validate timestamp specified by request.\n\nSee `validate.request` for additional info.\n\nArgs:\nstamp: str. Time request was made as ISO 8601 timestamp.\ntolerance: int. Number of seconds request remains valid from timestamp.\n\nReturns\nbool: True if valid, False otherwise.", "source": "juraj-google-style"} {"code": "def _set_initial_contents(self, contents):\n contents = self._encode_contents(contents)\n changed = (self._byte_contents != contents)\n st_size = len(contents)\n if self._byte_contents:\n self.size = 0\n current_size = (self.st_size or 0)\n self.filesystem.change_disk_usage((st_size - current_size), self.name, self.st_dev)\n self._byte_contents = contents\n self.st_size = st_size\n self.epoch += 1\n return changed", "docstring": "Sets the file contents and size.\nCalled internally after initial file creation.\n\nArgs:\ncontents: string, new content of file.\n\nReturns:\nTrue if the contents have been changed.\n\nRaises:\nIOError: if the st_size is not a non-negative integer,\nor if st_size exceeds the available file system space", "source": "codesearchnet"} {"code": "def get_all_users(configuration=None, **kwargs):\n \n \n user = User(configuration=configuration)\n user['id'] = 'all users' \n result = user._write_to_hdx('list', kwargs, 'id')\n users = list()\n if result:\n for userdict in result:\n user = User(userdict, configuration=configuration)\n users.append(user)\n else:\n logger.debug(result)\n return users", "docstring": "Get all users in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nq (str): Restrict to names containing a string. Defaults to all users.\norder_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'.\n\nReturns:\nList[User]: List of all users in HDX", "source": "juraj-google-style"} {"code": "def deprecated_graph_mode_only(func: Union[_TC, _F]) -> Union[_TC, _F]:\n if tf_inspect.isclass(func):\n setup = func.__dict__.get('setUp')\n if setup is not None:\n setattr(func, 'setUp', deprecated_graph_mode_only(setup))\n for name, value in func.__dict__.copy().items():\n if callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix):\n setattr(func, name, deprecated_graph_mode_only(value))\n return func\n\n def decorated(*args, **kwargs):\n if context.executing_eagerly():\n with context.graph_mode():\n return func(*args, **kwargs)\n else:\n return func(*args, **kwargs)\n return tf_decorator.make_decorator(func, decorated)", "docstring": "Execute the decorated test in graph mode.\n\nThis is a decorator intended to be applied to tests that are not compatible\nwith eager mode. When this decorator is applied, the test body will be run in\nan environment where API calls construct graphs instead of executing eagerly.\n\n`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n`run_in_graph_and_eager_modes` are available decorators for different\nv1/v2/eager/graph combinations.\n\nArgs:\nfunc: function or class to be annotated.\nIf `func` is a function this returns the decorator applied to `func`.\nIf `func` is a unit test class this returns that class with the decorator\napplied to all test functions within that class.\n\nReturns:\nReturns a function or class that will run the decorated test(s)\nin graph mode.", "source": "github-repos"} {"code": "def isOpeningTag(self):\n if (self.isTag() and (not self.isComment()) and (not self.isEndTag()) and (not self.isNonPairTag())):\n return True\n return False", "docstring": "Detect whether this tag is opening or not.\n\nReturns:\nbool: True if it is opening.", "source": "codesearchnet"} {"code": "def create_initial_tree(channel):\n \n \n config.LOGGER.info(\" Setting up initial channel structure... \")\n tree = ChannelManager(channel)\n\n \n config.LOGGER.info(\" Validating channel structure...\")\n channel.print_tree()\n tree.validate()\n config.LOGGER.info(\" Tree is valid\\n\")\n return tree", "docstring": "create_initial_tree: Create initial tree structure\nArgs:\nchannel (Channel): channel to construct\nReturns: tree manager to run rest of steps", "source": "juraj-google-style"} {"code": "def delete_issue(self, issue_id, params=None):\n \n return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params)", "docstring": "Deletes an individual issue.\n\nIf the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete\nan issue without deleting its sub-tasks.\n\nArgs:\nissue_id:\nparams:\n\nReturns:", "source": "juraj-google-style"} {"code": "def add_string(self, data):\n \n lines = []\n while data:\n match = self._line_end_re.search(data)\n if match is None:\n chunk = data\n else:\n chunk = data[:match.end()]\n\n data = data[len(chunk):]\n\n if self._buf and self._buf[-1].endswith(b('\\r')) and not chunk.startswith(b('\\n')):\n \n \n \n \n \n \n \n \n \n \n lines.append(self._finish_line())\n\n self._buf.append(chunk)\n if chunk.endswith(b('\\n')):\n lines.append(self._finish_line())\n\n return lines", "docstring": "Process some data splitting it into complete lines and buffering the rest\n\nArgs:\ndata: A `str` in Python 2 or `bytes` in Python 3\nReturns:\nlist of complete lines ending with a carriage return (eg. a progress\nbar) or a newline.", "source": "juraj-google-style"} {"code": "def ParseOptions(cls, options, configuration_object):\n \n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n process_memory_limit = cls._ParseNumericOption(\n options, 'process_memory_limit')\n\n if process_memory_limit and process_memory_limit < 0:\n raise errors.BadConfigOption(\n 'Invalid process memory limit value cannot be negative.')\n\n setattr(configuration_object, '_process_memory_limit', process_memory_limit)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"} {"code": "def _send_impression_event(self, experiment, variation, user_id, attributes):\n impression_event = self.event_builder.create_impression_event(experiment, variation.id, user_id, attributes)\n self.logger.debug(('Dispatching impression event to URL %s with params %s.' % (impression_event.url, impression_event.params)))\n try:\n self.event_dispatcher.dispatch_event(impression_event)\n except:\n self.logger.exception('Unable to dispatch impression event!')\n self.notification_center.send_notifications(enums.NotificationTypes.ACTIVATE, experiment, user_id, attributes, variation, impression_event)", "docstring": "Helper method to send impression event.\n\nArgs:\nexperiment: Experiment for which impression event is being sent.\nvariation: Variation picked for user for the given experiment.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.", "source": "codesearchnet"} {"code": "def tag(self, image, repository, tag=None, force=False):\n params = {'tag': tag, 'repo': repository, 'force': (1 if force else 0)}\n url = self._url('/images/{0}/tag', image)\n res = self._post(url, params=params)\n self._raise_for_status(res)\n return (res.status_code == 201)", "docstring": "Tag an image into a repository. Similar to the ``docker tag`` command.\n\nArgs:\nimage (str): The image to tag\nrepository (str): The repository to set for the tag\ntag (str): The tag name\nforce (bool): Force\n\nReturns:\n(bool): ``True`` if successful\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',\nforce=True)", "source": "codesearchnet"} {"code": "def print_layer_summary_with_connections(layer):\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n connections = []\n for node in layer._inbound_nodes:\n if relevant_nodes and node not in relevant_nodes:\n continue\n for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():\n connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index))\n name = layer.name\n cls_name = layer.__class__.__name__\n if not connections:\n first_connection = ''\n else:\n first_connection = connections[0]\n fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]\n print_row(fields, positions)\n if len(connections) > 1:\n for i in range(1, len(connections)):\n fields = ['', '', '', connections[i]]\n print_row(fields, positions)", "docstring": "Prints a summary for a single layer (including topological connections).\n\nArgs:\nlayer: target layer.", "source": "github-repos"} {"code": "def destroy_s3_event(app, env, region):\n \n\n \n \n \n \n generated = get_details(app=app, env=env)\n\n bucket = generated.s3_app_bucket()\n\n session = boto3.Session(profile_name=env, region_name=region)\n s3_client = session.client('s3')\n\n config = {}\n\n s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=config)\n LOG.debug(\"Deleted Lambda S3 notification\")\n\n return True", "docstring": "Destroy S3 event.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "juraj-google-style"} {"code": "def timezone(self, value=0.0):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `timezone`'.format(value))\n if (value < (- 12.0)):\n raise ValueError('value need to be greater or equal -12.0 for field `timezone`')\n if (value > 12.0):\n raise ValueError('value need to be smaller 12.0 for field `timezone`')\n self._timezone = value", "docstring": "Corresponds to IDD Field `timezone` Time relative to GMT.\n\nArgs:\nvalue (float): value for IDD Field `timezone`\nUnit: hr - not on standard units list???\nDefault value: 0.0\nvalue >= -12.0\nvalue <= 12.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def adjust_column_width(worksheet):\n dims = {}\n padding = 1\n for row in worksheet.rows:\n for cell in row:\n if (not cell.value):\n continue\n dims[cell.column] = max(dims.get(cell.column, 0), len(str(cell.value)))\n for (col, value) in list(dims.items()):\n worksheet.column_dimensions[col].width = (value + padding)", "docstring": "Adjust column width in worksheet.\n\nArgs:\nworksheet: worksheet to be adjusted", "source": "codesearchnet"} {"code": "def reliability_curve(self):\n total = self.frequencies['Total_Freq'].sum()\n curve = pd.DataFrame(columns=['Bin_Start', 'Bin_End', 'Bin_Center', 'Positive_Relative_Freq', 'Total_Relative_Freq'])\n curve['Bin_Start'] = self.thresholds[:(- 1)]\n curve['Bin_End'] = self.thresholds[1:]\n curve['Bin_Center'] = (0.5 * (self.thresholds[:(- 1)] + self.thresholds[1:]))\n curve['Positive_Relative_Freq'] = (self.frequencies['Positive_Freq'] / self.frequencies['Total_Freq'])\n curve['Total_Relative_Freq'] = (self.frequencies['Total_Freq'] / total)\n return curve", "docstring": "Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq\n\nReturns:\npandas.DataFrame", "source": "codesearchnet"} {"code": "def get_el_amount(self, element):\n return (sum([(self._all_comp[i][element] * abs(self._coeffs[i])) for i in range(len(self._all_comp))]) / 2)", "docstring": "Returns the amount of the element in the reaction.\n\nArgs:\nelement (Element/Specie): Element in the reaction\n\nReturns:\nAmount of that element in the reaction.", "source": "codesearchnet"} {"code": "def __init__(self, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, require_hyper_name: bool=False, per_thread: bool=True, dna_spec: Optional[geno.DNASpec]=None) -> None:\n self._where = where\n self._require_hyper_name: bool = require_hyper_name\n self._name_to_hyper: Dict[str, base.HyperPrimitive] = dict()\n self._annoymous_hyper_name_accumulator = DynamicEvaluationContext._AnnoymousHyperNameAccumulator()\n self._hyper_dict = symbolic.Dict() if dna_spec is None else None\n self._dna_spec: Optional[geno.DNASpec] = dna_spec\n self._per_thread = per_thread\n self._decision_getter = None", "docstring": "Create a dynamic evaluation context.\n\nArgs:\nwhere: A callable object that decide whether a hyper primitive should be\nincluded when being instantiated under `collect`.\nIf None, all hyper primitives under `collect` will be\nincluded.\nrequire_hyper_name: If True, all hyper primitives (e.g. pg.oneof) must\ncome with a `name`. This option helps to eliminate errors when a\nfunction that contains hyper primitive definition may be called multiple\ntimes. Since hyper primitives sharing the same name will be registered\nto the same decision point, repeated call to the hyper primitive\ndefinition will not matter.\nper_thread: If True, the context manager will be applied to current thread\nonly. Otherwise, it will be applied on current process.\ndna_spec: External provided search space. If None, the dynamic evaluation\ncontext can be used to create new search space via `colelct` context\nmanager. Otherwise, current context will use the provided DNASpec to\napply decisions.", "source": "github-repos"} {"code": "def build_kw_dict(kw_list):\n kw_dict = OrderedDict()\n sorted_list = sorted(kw_list, key=(lambda x: x.get('zahlavi').encode('utf-8')))\n for keyword_data in sorted_list:\n if ('zahlavi' not in keyword_data):\n continue\n zahlavi = keyword_data['zahlavi'].encode('utf-8')\n old_record = kw_dict.get(zahlavi)\n if (not old_record):\n kw_dict[zahlavi] = keyword_data\n continue\n key = 'angl_ekvivalent'\n if ((not old_record.get(key)) and keyword_data.get(key)):\n kw_dict[zahlavi] = keyword_data\n continue\n key = 'zdroj_angl_ekvivalentu'\n if ((not old_record.get(key)) and keyword_data.get(key)):\n kw_dict[zahlavi] = keyword_data\n continue\n if (len(str(keyword_data)) > len(str(old_record))):\n kw_dict[zahlavi] = keyword_data\n continue\n return kw_dict", "docstring": "Build keyword dictionary from raw keyword data. Ignore invalid or\ninvalidated records.\n\nArgs:\nkw_list (list): List of dicts from :func:`read_kw_file`.\n\nReturns:\nOrderedDict: dictionary with keyword data.", "source": "codesearchnet"} {"code": "def _on_connection_error(self, connection, error_message):\n \n self._channel = None\n if isinstance(error_message, pika_errs.AMQPConnectionError):\n error_message = repr(error_message.args[0])\n _log.error(error_message)\n self.call_later(1, self.reconnect)", "docstring": "Callback invoked when the connection failed to be established.\n\nArgs:\nconnection (pika.connection.SelectConnection): The connection that\nfailed to open.\nerror_message (str): The reason the connection couldn't be opened.", "source": "juraj-google-style"} {"code": "def normalize_to_element(self, element, factor=1):\n all_comp = self._all_comp\n coeffs = self._coeffs\n current_el_amount = (sum([(all_comp[i][element] * abs(coeffs[i])) for i in range(len(all_comp))]) / 2)\n scale_factor = (factor / current_el_amount)\n self._coeffs = [(c * scale_factor) for c in coeffs]", "docstring": "Normalizes the reaction to one of the elements.\nBy default, normalizes such that the amount of the element is 1.\nAnother factor can be specified.\n\nArgs:\nelement (Element/Specie): Element to normalize to.\nfactor (float): Factor to normalize to. Defaults to 1.", "source": "codesearchnet"} {"code": "def __init__(self, logger=logging):\n \n self.logger = logger\n self.interfaces = self._CreateInterfaceMap()", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"} {"code": "def _reset_non_empty(self, indices):\n \n reset_video_op = tf.cond(\n self._video_condition,\n lambda: tf.py_func(self._video_reset_writer, [], []),\n tf.no_op)\n with tf.control_dependencies([reset_video_op]):\n inc_op = tf.assign_add(self._episode_counter, 1)\n with tf.control_dependencies([self.history_buffer.reset(indices),\n inc_op]):\n initial_frame_dump_op = tf.cond(\n self._video_condition,\n lambda: tf.py_func(self._video_dump_frames, \n [self.history_buffer.get_all_elements()], []),\n tf.no_op)\n observ_assign_op = self._observ.assign(\n self.history_buffer.get_all_elements()[:, -1, ...])\n with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]):\n reset_model_op = tf.assign(self._reset_model, tf.constant(1.0))\n with tf.control_dependencies([reset_model_op]):\n return tf.gather(self._observ.read_value(), indices)", "docstring": "Reset the batch of environments.\n\nArgs:\nindices: The batch indices of the environments to reset; defaults to all.\n\nReturns:\nBatch tensor of the new observations.", "source": "juraj-google-style"} {"code": "def get_country_by_id(self, country_id) -> 'Country':\n VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)\n if (country_id not in self._countries_by_id.keys()):\n for country in self.countries:\n if (country.country_id == country_id):\n return country\n raise ValueError(country_id)\n else:\n return self._countries_by_id[country_id]", "docstring": "Gets a country in this coalition by its ID\n\nArgs:\ncountry_id: country Id\n\nReturns: Country", "source": "codesearchnet"} {"code": "def forward(self, hidden_state, output_hidden_states: bool=False):\n all_hidden_states = []\n embedding = hidden_state\n for mod in self.mixers:\n embedding = mod(embedding)\n if output_hidden_states:\n all_hidden_states.append(embedding)\n if output_hidden_states:\n return (embedding, all_hidden_states)\n else:\n return (embedding, None)", "docstring": "Args:\nhidden_state (`torch.Tensor`): The input tensor.\noutput_hidden_states (`bool`, *optional*, defaults to False.):\nWhether to output the hidden states as well.\n\nReturns:\n`torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to\n`True`.", "source": "github-repos"} {"code": "def transform(self, args):\n \n if self.parse_error():\n \n AliasManager.write_alias_config_hash(empty_hash=True)\n return args\n\n \n if self.detect_alias_config_change():\n self.load_full_command_table()\n self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections())\n build_tab_completion_table(self.alias_table)\n else:\n self.load_collided_alias()\n\n transformed_commands = []\n alias_iter = enumerate(args, 1)\n for alias_index, alias in alias_iter:\n is_collided_alias = alias in self.collided_alias and alias_index in self.collided_alias[alias]\n \n \n is_named_arg = alias_index > 1 and args[alias_index - 2].startswith('-')\n is_named_arg_flag = alias.startswith('-')\n excluded_commands = is_alias_command(['remove', 'export'], transformed_commands)\n if not alias or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands:\n transformed_commands.append(alias)\n continue\n\n full_alias = self.get_full_alias(alias)\n\n if self.alias_table.has_option(full_alias, 'command'):\n cmd_derived_from_alias = self.alias_table.get(full_alias, 'command')\n telemetry.set_alias_hit(full_alias)\n else:\n transformed_commands.append(alias)\n continue\n\n pos_args_table = build_pos_args_table(full_alias, args, alias_index)\n if pos_args_table:\n logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table)\n transformed_commands += render_template(cmd_derived_from_alias, pos_args_table)\n\n \n for pos_arg in pos_args_table: \n next(alias_iter)\n else:\n logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias)\n transformed_commands += shlex.split(cmd_derived_from_alias)\n\n return self.post_transform(transformed_commands)", "docstring": "Transform any aliases in args to their respective commands.\n\nArgs:\nargs: A list of space-delimited command input extracted directly from the console.\n\nReturns:\nA list of transformed commands according to the alias configuration file.", "source": "juraj-google-style"} {"code": "def _wrap_definition_section(source, width):\n \n \n index = source.index('\\n') + 1\n definitions, max_len = _get_definitions(source[index:])\n sep = '\\n' + ' ' * (max_len + 4)\n lines = [source[:index].strip()]\n for arg, desc in six.iteritems(definitions):\n wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4))\n lines.append(' {arg:{size}} {desc}'.format(\n arg=arg,\n size=str(max_len),\n desc=wrapped_desc\n ))\n return '\\n'.join(lines)", "docstring": "Wrap the given definition section string to the current terminal size.\n\nNote:\nAuto-adjusts the spacing between terms and definitions.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "juraj-google-style"} {"code": "def actnorm_center(name, x, reverse=False, init=False):\n \n shape = common_layers.shape_list(x)\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n assert len(shape) == 2 or len(shape) == 4\n if len(shape) == 2:\n x_mean = tf.reduce_mean(x, [0], keepdims=True)\n b = get_variable_ddi(\"b\", (1, shape[1]), initial_value=-x_mean,\n init=init)\n elif len(shape) == 4:\n x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)\n b = get_variable_ddi(\n \"b\", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init)\n\n if not reverse:\n x += b\n else:\n x -= b\n return x", "docstring": "Add a bias to x.\n\nInitialize such that the output of the first minibatch is zero centered\nper channel.\n\nArgs:\nname: scope\nx: 2-D or 4-D Tensor.\nreverse: Forward or backward operation.\ninit: data-dependent initialization.\n\nReturns:\nx_center: (x + b), if reverse is True and (x - b) otherwise.", "source": "juraj-google-style"} {"code": "def with_forward_compatibility_horizons(*horizons: Optional[tuple[int, int, int]]) -> Callable[[Callable[..., Any]], Callable[..., None]]:\n if not horizons:\n raise ValueError('Expected at least one horizon.')\n for horizon in horizons:\n if not (horizon is None or (len(horizon) == 3 and all((isinstance(x, int) for x in horizon)))):\n raise ValueError('Bad horizon value: %r' % horizon)\n\n def decorator(f: Callable[..., Any]) -> Callable[..., None]:\n if tf_inspect.isclass(f):\n raise ValueError('`with_forward_compatibility_horizons` only supports test methods.')\n\n def decorated(*args, **kwargs):\n for horizon in horizons:\n if horizon is None:\n f(*args, **kwargs)\n else:\n year, month, day = horizon\n with forward_compatibility_horizon(year, month, day):\n f(*args, **kwargs)\n return tf_decorator.make_decorator(f, decorated)\n return decorator", "docstring": "Executes the decorated test with the specified forward-compat horizons.\n\nArgs:\n*horizons: A list of (year, month, day) tuples. If the list includes\n`None`, then the test will also be run with no forward-compatibility\nhorizon set.\n\nReturns:\nA decorator that will execute the test with the specified horizons.", "source": "github-repos"} {"code": "def from_config(cls, config_dict: dict, schema_path: str=None):\n if (schema_path is None):\n schema_path = join(dirname(__file__), 'schema', 'configure_sbi.json')\n with open(schema_path, 'r') as file:\n schema = json.loads(file.read())\n validate(config_dict, schema)\n config_dict['status'] = 'created'\n if ('subarray_id' not in config_dict):\n config_dict['subarray_id'] = 'None'\n timestamp = datetime.datetime.utcnow().isoformat()\n config_dict['created'] = timestamp\n config_dict['updated'] = timestamp\n pb_list = copy.deepcopy(config_dict['processing_blocks'])\n config_dict.pop('processing_blocks', None)\n config_dict['processing_block_ids'] = []\n for pb in pb_list:\n config_dict['processing_block_ids'].append(pb['id'])\n key = SchedulingObject.get_key(SBI_KEY, config_dict['id'])\n DB.save_dict(key, config_dict, hierarchical=False)\n key = '{}:active'.format(SBI_KEY)\n DB.append_to_list(key, config_dict['id'])\n sbi = SchedulingObject(SBI_KEY, config_dict['id'])\n sbi.set_status('created')\n for pb in pb_list:\n pb['sbi_id'] = config_dict['id']\n cls._add_pb(pb)\n return cls(config_dict['id'])", "docstring": "Create an SBI object from the specified configuration dict.\n\nNOTE(BM) This should really be done as a single atomic db transaction.\n\nArgs:\nconfig_dict(dict): SBI configuration dictionary\nschema_path(str, optional): Path to the SBI config schema.", "source": "codesearchnet"} {"code": "def _readvalue(sock, buf, size):\n chunks = []\n rlen = (size + 2)\n while ((rlen - len(buf)) > 0):\n if buf:\n rlen -= len(buf)\n chunks.append(buf)\n buf = _recv(sock, RECV_SIZE)\n if (not buf):\n raise MemcacheUnexpectedCloseError()\n if (rlen == 1):\n chunks[(- 1)] = chunks[(- 1)][:(- 1)]\n else:\n chunks.append(buf[:(rlen - 2)])\n return (buf[rlen:], b''.join(chunks))", "docstring": "Read specified amount of bytes from the socket.\n\nRead size bytes, followed by the \"\\r\\n\" characters, from the socket,\nand return those bytes and any trailing bytes read after the \"\\r\\n\".\n\nArgs:\nsock: Socket object, should be connected.\nbuf: String, zero or more characters, returned from an earlier\ncall to _readline or _readvalue (pass an empty string on the\nfirst call).\nsize: Integer, number of bytes to read from the socket.\n\nReturns:\nA tuple of (buf, value) where value is the bytes read from the\nsocket (there will be exactly size bytes) and buf is trailing\ncharacters read after the \"\\r\\n\" following the bytes (but not\nincluding the \\r\\n).", "source": "codesearchnet"} {"code": "def List(self, request, global_params=None):\n config = self.GetMethodConfig('List')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all models in the specified dataset. Requires the READER dataset role.\n\nArgs:\nrequest: (BigqueryModelsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListModelsResponse) The response message.", "source": "github-repos"} {"code": "def sort_dict(d, desc=True):\n sort = sorted(d.items(), key=(lambda x: x[1]), reverse=desc)\n return OrderedDict(sort)", "docstring": "Sort an ordered dictionary by value, descending.\n\nArgs:\nd (OrderedDict): An ordered dictionary.\ndesc (bool): If true, sort desc.\n\nReturns:\nOrderedDict: The sorted dictionary.", "source": "codesearchnet"} {"code": "def size_filter(labeled_grid, min_size):\n \n out_grid = np.zeros(labeled_grid.shape, dtype=int)\n slices = find_objects(labeled_grid)\n j = 1\n for i, s in enumerate(slices):\n box = labeled_grid[s]\n size = np.count_nonzero(box.ravel() == (i + 1))\n if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:\n out_grid[np.where(labeled_grid == i + 1)] = j\n j += 1\n return out_grid", "docstring": "Remove labeled objects that do not meet size threshold criteria.\n\nArgs:\nlabeled_grid: 2D output from label method.\nmin_size: minimum size of object in pixels.\n\nReturns:\nlabeled grid with smaller objects removed.", "source": "juraj-google-style"} {"code": "def get_filename(self, tag):\n if (tag.find('filename', recursive=False) is not None):\n return tag.filename.contents[0]\n elif (tag.find('anchorfile', recursive=False) is not None):\n return ((tag.anchorfile.contents[0] + '", "docstring": "Extract and return a documentation filename from a tag.\n\nOverride as necessary, though this default implementation probably\ncovers all the cases of interest.\n\nArgs:\ntag: A BeautifulSoup Tag that satisfies match_criterion.\n\nReturns:\nA string that would be appropriate to use as the documentation\nfilename for an entry in a Zeal database.", "source": "codesearchnet"} {"code": "def validate_is_primary(self, is_primary):\n if (is_primary and (not (self.instance and self.instance.is_verified))):\n raise serializers.ValidationError(_('Unverified email addresses may not be used as the primary address.'))\n return is_primary", "docstring": "Validate the provided 'is_primary' parameter.\n\nReturns:\nThe validated 'is_primary' value.\n\nRaises:\nserializers.ValidationError:\nIf the user attempted to mark an unverified email as\ntheir primary email address.", "source": "codesearchnet"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n eos = [self.eos_token_id]\n if token_ids_1 is None:\n return len(token_ids_0 + eos) * [0]\n return len(token_ids_0 + eos + token_ids_1 + eos) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"} {"code": "def render(self,\n trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],\n batch: Optional[int] = None) -> None:\n \n raise NotImplementedError", "docstring": "Renders the simulated `trajectories` for the given `batch`.\n\nArgs:\ntrajectories: NonFluents, states, actions, interms and rewards.\nbatch: Number of batches to render.", "source": "juraj-google-style"} {"code": "def visit_comparison(self, comparison: _evaluation.ComparisonNode) -> _sql_data_types.Select:\n lhs_result = self.visit(comparison.left)\n rhs_result = self.visit(comparison.right)\n lhs_subquery = lhs_result.as_operand()\n rhs_subquery = rhs_result.as_operand()\n sql_value = f'({lhs_subquery} {comparison.op} {rhs_subquery})'\n sql_alias = 'comparison_'\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias=sql_alias), from_part=None)", "docstring": "Translates a FHIRPath comparison to Standard SQL.\n\nEach operand is expected to be a collection of a single element. Operands\ncan be strings, integers, decimals, dates, datetimes, and times. Comparison\nwill perform implicit conversion between applicable types.\n\nArgs:\ncomparison: The `Comparison` Expression node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"} {"code": "def add_delta_deltas(filterbanks, name=None):\n \n delta_filter = np.array([2, 1, 0, -1, -2])\n delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, \"full\")\n\n delta_filter_stack = np.array(\n [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,\n list(delta_delta_filter)],\n dtype=np.float32).T[:, None, None, :]\n\n delta_filter_stack /= np.sqrt(\n np.sum(delta_filter_stack**2, axis=0, keepdims=True))\n\n filterbanks = tf.nn.conv2d(\n filterbanks, delta_filter_stack, [1, 1, 1, 1], \"SAME\", data_format=\"NHWC\",\n name=name)\n return filterbanks", "docstring": "Compute time first and second-order derivative channels.\n\nArgs:\nfilterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\nname: scope name\n\nReturns:\nfloat32 tensor with shape [batch_size, len, num_bins, 3]", "source": "juraj-google-style"} {"code": "def NormalizePath(path):\n \n path = os.path.normpath(path)\n\n for sys_path in sys.path:\n if not sys_path:\n continue\n\n \n sys_path = os.path.join(sys_path, '')\n\n if path.startswith(sys_path):\n return path[len(sys_path):]\n\n return path", "docstring": "Removes any Python system path prefix from the given path.\n\nPython keeps almost all paths absolute. This is not what we actually\nwant to return. This loops through system paths (directories in which\nPython will load modules). If \"path\" is relative to one of them, the\ndirectory prefix is removed.\n\nArgs:\npath: absolute path to normalize (relative paths will not be altered)\n\nReturns:\nRelative path if \"path\" is within one of the sys.path directories or\nthe input otherwise.", "source": "juraj-google-style"} {"code": "def to_json(self, is_admin=False):\n if is_admin:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': (True if (self.enabled == 1) else False), 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties}}\n else:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts}", "docstring": "Returns a dict representation of the object\n\nArgs:\nis_admin (`bool`): If true, include information about the account that should be avaiable only to admins\n\nReturns:\n`dict`", "source": "codesearchnet"} {"code": "def gpio_properties(self):\n res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n if (res < 0):\n raise errors.JLinkException(res)\n num_props = res\n buf = (structs.JLinkGPIODescriptor * num_props)()\n res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n if (res < 0):\n raise errors.JLinkException(res)\n return list(buf)", "docstring": "Returns the properties of the user-controllable GPIOs.\n\nProvided the device supports user-controllable GPIOs, they will be\nreturned by this method.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of ``JLinkGPIODescriptor`` instances totalling the number of\nrequested properties.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"} {"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n if token_ids_1 is not None:\n output += token_ids_1 + [self.sep_token_id]\n return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A SqueezeBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"} {"code": "def size():\n try:\n assert ((os != 'nt') and sys.stdout.isatty())\n (rows, columns) = os.popen('stty size', 'r').read().split()\n except (AssertionError, AttributeError, ValueError):\n (rows, columns) = (DEFAULT_HEIGHT, DEFAULT_WIDTH)\n return (int(rows), int(columns))", "docstring": "Determines the height and width of the console window\n\nReturns:\ntuple of int: The height in lines, then width in characters", "source": "codesearchnet"} {"code": "def project_texture_on_surface(texture, surface, angle=DEFAULT_ANGLE):\n projected_surface = project_surface(surface, angle)\n (texture_x, _) = texture\n texture_y = map_texture_to_surface(texture, projected_surface)\n return (texture_x, texture_y)", "docstring": "Maps a texture onto a surface, then projects to 2D and returns a layer.\n\nArgs:\ntexture (texture): the texture to project\nsurface (surface): the surface to project onto\nangle (float): the projection angle in degrees (0 = top-down, 90 = side view)\n\nReturns:\nlayer: A layer.", "source": "codesearchnet"} {"code": "def states():\n states = {}\n fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')\n with open(fname, 'rU') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n states[row[0]] = row[1]\n return states", "docstring": "Get a dictionary of Backpage city names mapped to their respective states.\n\nReturns:\ndictionary of Backpage city names mapped to their states", "source": "codesearchnet"} {"code": "def list_metadata(self, resource):\n \n self.metadata_service.set_auth(self._token_metadata)\n return self.metadata_service.list(resource)", "docstring": "List all keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on a failure.", "source": "juraj-google-style"} {"code": "def __init__(self, distributed_variables=None, name=None, **unused_kwargs):\n if not ops.executing_eagerly_outside_functions():\n raise ValueError('PackedDistributedVariable should be created in eager mode.')\n if not distributed_variables:\n raise ValueError('Expect a non-empty list of variables to pack.')\n for i, var in enumerate(distributed_variables):\n if not resource_variable_ops.is_resource_variable(var):\n raise ValueError('Expect a list of ResourceVariables to pack, but the %d-th variable is %s' % (i, type(var)))\n self._distributed_variables = distributed_variables\n self._devices = [v.device for v in distributed_variables]\n with ops.init_scope():\n with ops.name_scope(name, 'Variable', skip_on_eager=False) as name:\n handle = ops.pack_eager_tensors([var.handle for var in distributed_variables])\n handle_name = ops.name_from_scope_name(name)\n unique_id = '%s_%d' % (handle_name, ops.uid())\n super(PackedDistributedVariable, self).__init__(trainable=distributed_variables[0].trainable, shape=distributed_variables[0].shape, dtype=distributed_variables[0].dtype, handle=handle, synchronization=distributed_variables[0].synchronization, constraint=distributed_variables[0].constraint, aggregation=distributed_variables[0].aggregation, distribute_strategy=distributed_variables[0]._distribute_strategy, name=name, unique_id=unique_id, handle_name=handle_name, graph_element=None, initial_value=None, initializer_op=None, is_initialized_op=None, cached_value=None, caching_device=None, is_distributed_variables=True)", "docstring": "Packs a list of variables which are distributed across devices.\n\nArgs:\ndistributed_variables: A list of distributed Variables to pack.\nname: Optional name for the variable. Defaults to `'Variable'` and gets\nuniquified automatically.", "source": "github-repos"} {"code": "def Validate(self, value):\n if (value is None):\n return None\n if (not isinstance(value, self.rdfclass)):\n try:\n r = self.rdfclass()\n r.FromDict(value)\n return r\n except (AttributeError, TypeError, rdfvalue.InitializeError):\n raise TypeValueError(('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)))\n return value", "docstring": "Validate the value.\n\nArgs:\nvalue: Value is expected to be a dict-like object that a given RDFStruct\ncan be initialized from.\n\nRaises:\nTypeValueError: If the value is not a valid dict-like object that a given\nRDFStruct can be initialized from.\n\nReturns:\nA valid instance of self.rdfclass or None.", "source": "codesearchnet"} {"code": "def load_project_tests(test_path, dot_env_path=None):\n \n \n debugtalk_path = locate_debugtalk_py(test_path)\n\n if debugtalk_path:\n \n project_working_directory = os.path.dirname(debugtalk_path)\n else:\n \n project_working_directory = os.getcwd()\n\n \n sys.path.insert(0, project_working_directory)\n\n \n \n \n \n dot_env_path = dot_env_path or os.path.join(project_working_directory, \".env\")\n project_mapping[\"env\"] = load_dot_env_file(dot_env_path)\n\n if debugtalk_path:\n \n debugtalk_functions = load_debugtalk_functions()\n else:\n debugtalk_functions = {}\n\n \n\n project_mapping[\"PWD\"] = project_working_directory\n built_in.PWD = project_working_directory\n project_mapping[\"functions\"] = debugtalk_functions\n\n \n tests_def_mapping[\"api\"] = load_api_folder(os.path.join(project_working_directory, \"api\"))\n tests_def_mapping[\"PWD\"] = project_working_directory", "docstring": "load api, testcases, .env, debugtalk.py functions.\napi/testcases folder is relative to project_working_directory\n\nArgs:\ntest_path (str): test file/folder path, locate pwd from this path.\ndot_env_path (str): specified .env file path\n\nReturns:\ndict: project loaded api/testcases definitions, environments and debugtalk.py functions.", "source": "juraj-google-style"} {"code": "def predict(fqdn, result, *argl, **argd):\n out = None\n if (len(argl) > 0):\n machine = argl[0]\n if isclassifier(machine):\n out = classify_predict(fqdn, result, None, *argl, **argd)\n elif isregressor(machine):\n out = regress_predict(fqdn, result, None, *argl, **argd)\n return out", "docstring": "Analyzes the result of a generic predict operation performed by\n`sklearn`.\n\nArgs:\nfqdn (str): full-qualified name of the method that was called.\nresult: result of calling the method with `fqdn`.\nargl (tuple): positional arguments passed to the method call.\nargd (dict): keyword arguments passed to the method call.", "source": "codesearchnet"} {"code": "def verify_path(path, is_collection):\n num_elements = len(path)\n if (num_elements == 0):\n raise ValueError('Document or collection path cannot be empty')\n if is_collection:\n if ((num_elements % 2) == 0):\n raise ValueError('A collection must have an odd number of path elements')\n elif ((num_elements % 2) == 1):\n raise ValueError('A document must have an even number of path elements')\n for element in path:\n if (not isinstance(element, six.string_types)):\n msg = BAD_PATH_TEMPLATE.format(element, type(element))\n raise ValueError(msg)", "docstring": "Verifies that a ``path`` has the correct form.\n\nChecks that all of the elements in ``path`` are strings.\n\nArgs:\npath (Tuple[str, ...]): The components in a collection or\ndocument path.\nis_collection (bool): Indicates if the ``path`` represents\na document or a collection.\n\nRaises:\nValueError: if\n\n* the ``path`` is empty\n* ``is_collection=True`` and there are an even number of elements\n* ``is_collection=False`` and there are an odd number of elements\n* an element is not a string", "source": "codesearchnet"} {"code": "def ParseMany(text):\n \n precondition.AssertType(text, Text)\n\n if compatibility.PY2:\n text = text.encode(\"utf-8\")\n\n return list(yaml.safe_load_all(text))", "docstring": "Parses many YAML documents into a list of Python objects.\n\nArgs:\ntext: A YAML source with multiple documents embedded.\n\nReturns:\nA list of Python data structures corresponding to the YAML documents.", "source": "juraj-google-style"} {"code": "def success(channel, post):\n datapacks = [('Game', post[0], True), ('Upvotes', post[2], True)]\n gui = ui_embed.UI(channel, 'Link', post[1], modulename=modulename, colour=16746496, thumbnail=post[1], datapacks=datapacks)\n return gui", "docstring": "Creates an embed UI containing the Reddit posts\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\npost (tuple): Tuples of (field, value, percentile)\n\nReturns:", "source": "codesearchnet"} {"code": "def distance(self, other):\n return distance(self.lat, self.lon, None, other.lat, other.lon, None)", "docstring": "Distance between points\n\nArgs:\nother (:obj:`Point`)\nReturns:\nfloat: Distance in km", "source": "codesearchnet"} {"code": "def _AddForwardedIps(self, forwarded_ips, interface):\n \n for address in forwarded_ips:\n self.ip_forwarding_utils.AddForwardedIp(address, interface)", "docstring": "Configure the forwarded IP address on the network interface.\n\nArgs:\nforwarded_ips: list, the forwarded IP address strings to configure.\ninterface: string, the output device to use.", "source": "juraj-google-style"} {"code": "def get_cpu_props(cls, family, arch='x86'):\n \n\n cpus = cls.get_cpus_by_arch(arch)\n try:\n return cpus.xpath('model[@name=\"{0}\"]'.format(family))[0]\n except IndexError:\n raise LagoException('No such CPU family: {0}'.format(family))", "docstring": "Get CPU info XML\n\nArgs:\nfamily(str): CPU family\narch(str): CPU arch\n\nReturns:\nlxml.etree.Element: CPU xml\n\nRaises:\n:exc:`~LagoException`: If no such CPU family exists", "source": "juraj-google-style"} {"code": "def assert_lines_equal_ignoring_whitespace(test, expected_lines, actual_lines):\n test.assertEqual(len(expected_lines), len(actual_lines), 'Mismatch in the number of lines: %d vs %d' % (len(expected_lines), len(actual_lines)))\n for expected_line, actual_line in zip(expected_lines, actual_lines):\n test.assertEqual(''.join(expected_line.split()), ''.join(actual_line.split()))", "docstring": "Assert equality in lines, ignoring all whitespace.\n\nArgs:\ntest: An instance of unittest.TestCase or its subtypes (e.g.,\nTensorFlowTestCase).\nexpected_lines: Expected lines as an iterable of strings.\nactual_lines: Actual lines as an iterable of strings.", "source": "github-repos"} {"code": "def __init__(self, proj_info):\n \n self._proj_info = proj_info\n self.__docfolder = DOC_FOLDER\n self.__htmlfolder = HTML_FOLDER\n\n self.conf_fpath = os.path.abspath(\n os.path.join(self.__docfolder, 'conf.py'))\n self.code_fdpath = os.path.abspath(\n os.path.join(SRC_FOLDER, self.proj_info.project_name))\n\n self._sphinx_quickstart_cmd = [\n 'sphinx-quickstart', self.__docfolder, '-p',\n self.proj_info.project_name, '-a', self.proj_info.author_fakename,\n '-v', self.proj_info.project_version, '-r',\n self.proj_info.project_version, '-l', 'en', '--ext-autodoc',\n '--makefile', '--quiet'\n ]\n self._sphinx_apidoc_cmd = [\n 'sphinx-apidoc', self.code_fdpath, '-o', self.__docfolder, '-M',\n '--force'\n ]\n\n \n self._sphinx_buildhtml_cmd = [\n 'sphinx-build', '-b', 'html', self.__docfolder, self.__htmlfolder\n ]\n\n \n mkdir_exist(self.__docfolder)\n mkdir_exist(self.__htmlfolder)", "docstring": "TODO: to be defined1.\n\nArgs:\nproj_info (ProjectInfo): TODO", "source": "juraj-google-style"} {"code": "def populate_audit_fields(self, event):\n \n event.updated = self._data\n event.original = self.get_original()._data", "docstring": "Populates the the audit JSON fields with raw data from the model, so\nall changes can be tracked and diffed.\n\nArgs:\nevent (Event): The Event instance to attach the data to\ninstance (fleaker.db.Model): The newly created/updated model", "source": "juraj-google-style"} {"code": "def _validate_isvalid_composition(self, isvalid_composition, field, value):\n \n sum_amount = 0.0\n if value['kind'] in ['mass fraction', 'mole fraction']:\n low_lim = 0.0\n up_lim = 1.0\n total_amount = 1.0\n elif value['kind'] in ['mole percent']:\n low_lim = 0.0\n up_lim = 100.0\n total_amount = 100.0\n else:\n self._error(field, 'composition kind must be \"mole percent\", \"mass fraction\", or '\n '\"mole fraction\"')\n return False\n\n for sp in value['species']:\n amount = sp['amount'][0]\n sum_amount += amount\n\n \n if amount < low_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be greater than {:.1f}'.format(low_lim)\n )\n elif amount > up_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be less than {:.1f}'.format(up_lim)\n )\n\n \n if not np.isclose(total_amount, sum_amount):\n self._error(field, 'Species ' + value['kind'] +\n 's do not sum to {:.1f}: '.format(total_amount) +\n '{:f}'.format(sum_amount)\n )", "docstring": "Checks for valid specification of composition.\n\nArgs:\nisvalid_composition (bool): flag from schema indicating\ncomposition to be checked.\nfield (str): 'composition'\nvalue (dict): dictionary of composition\n\nThe rule's arguments are validated against this schema:\n{'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "juraj-google-style"} {"code": "def _get_url_scheme_regexes():\n output = sh(\"hg showconfig | grep '^schemes.'\", shell=True).split('\\n')\n log.debug(output)\n schemes = (l.split('.', 1)[1].split('=') for l in output if ('=' in l))\n regexes = sorted(((k, v, re.compile((v.replace('{1}', '(.*)') + '(.*)'))) for (k, v) in schemes), key=(lambda x: (len(x[0]), x)), reverse=True)\n return regexes", "docstring": "Get configured mercurial schemes and convert them to regexes\n\nReturns:\ntuple: (scheme_name, scheme_value, compiled scheme_regex)", "source": "codesearchnet"} {"code": "def increment_lessons(self, measure_vals, reward_buff_sizes=None):\n ret = {}\n if reward_buff_sizes:\n for (brain_name, buff_size) in reward_buff_sizes.items():\n if self._lesson_ready_to_increment(brain_name, buff_size):\n measure_val = measure_vals[brain_name]\n ret[brain_name] = self.brains_to_curriculums[brain_name].increment_lesson(measure_val)\n else:\n for (brain_name, measure_val) in measure_vals.items():\n ret[brain_name] = self.brains_to_curriculums[brain_name].increment_lesson(measure_val)\n return ret", "docstring": "Attempts to increments all the lessons of all the curriculums in this\nMetaCurriculum. Note that calling this method does not guarantee the\nlesson of a curriculum will increment. The lesson of a curriculum will\nonly increment if the specified measure threshold defined in the\ncurriculum has been reached and the minimum number of episodes in the\nlesson have been completed.\n\nArgs:\nmeasure_vals (dict): A dict of brain name to measure value.\nreward_buff_sizes (dict): A dict of brain names to the size of their\ncorresponding reward buffers.\n\nReturns:\nA dict from brain name to whether that brain's lesson number was\nincremented.", "source": "codesearchnet"} {"code": "def current_api_key():\n if app.config.get('IGNORE_AUTH'):\n return models.ApiKey(id='anonymous_superuser', secret='', superuser=True)\n ops = _get_api_key_ops()\n api_key = ops.get()\n logging.debug('Authenticated as API key=%r', api_key.id)\n return api_key", "docstring": "Determines the API key for the current request.\n\nReturns:\nThe ApiKey instance.", "source": "codesearchnet"} {"code": "def assert_finite(x, data=None, summarize=None, message=None, name=None):\n with tf.compat.v2.name_scope((name or 'assert_finite')):\n x_ = tf.get_static_value(x)\n if (x_ is not None):\n if (~ np.all(np.isfinite(x_))):\n raise ValueError(message)\n return x\n assertion = tf.compat.v1.assert_equal(tf.math.is_finite(x), tf.ones_like(x, tf.bool), data=data, summarize=summarize, message=message)\n with tf.control_dependencies([assertion]):\n return tf.identity(x)", "docstring": "Assert all elements of `x` are finite.\n\nArgs:\nx: Numeric `Tensor`.\ndata: The tensors to print out if the condition is False. Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).\nDefaults to \"assert_finite\".\n\nReturns:\nOp raising `InvalidArgumentError` unless `x` has specified rank or lower.\nIf static checks determine `x` has correct rank, a `no_op` is returned.\n\nRaises:\nValueError: If static checks determine `x` has wrong rank.", "source": "codesearchnet"} {"code": "def encode_message(self, message):\n \n message.check_initialized()\n\n return json.dumps(message, cls=MessageJSONEncoder,\n protojson_protocol=self)", "docstring": "Encode Message instance to JSON string.\n\nArgs:\nMessage instance to encode in to JSON string.\n\nReturns:\nString encoding of Message instance in protocol JSON format.\n\nRaises:\nmessages.ValidationError if message is not initialized.", "source": "juraj-google-style"} {"code": "class RootMeanSquaredError(reduction_metrics.Mean):\n\n def __init__(self, name='root_mean_squared_error', dtype=None):\n super().__init__(name, dtype=dtype)\n self._direction = 'down'\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n \n y_true = ops.convert_to_tensor(y_true, self._dtype)\n y_pred = ops.convert_to_tensor(y_pred, self._dtype)\n y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n error_sq = ops.square(y_pred - y_true)\n return super().update_state(error_sq, sample_weight=sample_weight)\n\n def result(self):\n return ops.sqrt(super().result())", "docstring": "Computes root mean squared error metric between `y_true` and `y_pred`.\n\nFormula:\n\n```python\nloss = sqrt(mean((y_pred - y_true) ** 2))\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.RootMeanSquaredError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n0.5\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n... sample_weight=[1, 0])\n>>> m.result()\n0.70710677\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[keras.metrics.RootMeanSquaredError()])\n```", "source": "github-repos"} {"code": "def send_notifications(self, notification_type, *args):\n \n\n if notification_type in self.notifications:\n for notification_id, callback in self.notifications[notification_type]:\n try:\n callback(*args)\n except:\n self.logger.exception('Problem calling notify callback!')", "docstring": "Fires off the notification for the specific event. Uses var args to pass in a\narbitrary list of parameter according to which notification type was fired.\n\nArgs:\nnotification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)\nargs: variable list of arguments to the callback.", "source": "juraj-google-style"} {"code": "def _get_node(self, token: str) -> dict:\n node = self.data\n for char in token:\n if char not in node:\n break\n node = node[char]\n return node", "docstring": "Retrieves the node corresponding to the given token in the Trie.\n\nArgs:\ntoken (str): The token for which the corresponding node needs to be retrieved.\n\nReturns:\ndict: The node in the Trie corresponding to the given token.", "source": "github-repos"} {"code": "def matrix_rank(a, tol=None, validate_args=False, name=None):\n with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]):\n a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a')\n assertions = _maybe_validate_matrix(a, validate_args)\n if assertions:\n with tf.control_dependencies(assertions):\n a = tf.identity(a)\n s = tf.linalg.svd(a, compute_uv=False)\n if (tol is None):\n if a.shape[(- 2):].is_fully_defined():\n m = np.max(a.shape[(- 2):].as_list())\n else:\n m = tf.reduce_max(input_tensor=tf.shape(input=a)[(- 2):])\n eps = np.finfo(a.dtype.as_numpy_dtype).eps\n tol = ((eps * tf.cast(m, a.dtype)) * tf.reduce_max(input_tensor=s, axis=(- 1), keepdims=True))\n return tf.reduce_sum(input_tensor=tf.cast((s > tol), tf.int32), axis=(- 1))", "docstring": "Compute the matrix rank; the number of non-zero SVD singular values.\n\nArguments:\na: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\npseudo-inverted.\ntol: Threshold below which the singular value is counted as \"zero\".\nDefault value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).\nvalidate_args: When `True`, additional assertions might be embedded in the\ngraph.\nDefault value: `False` (i.e., no graph assertions are added).\nname: Python `str` prefixed to ops created by this function.\nDefault value: \"matrix_rank\".\n\nReturns:\nmatrix_rank: (Batch of) `int32` scalars representing the number of non-zero\nsingular values.", "source": "codesearchnet"} {"code": "def stat(self, follow_symlinks=True):\n \n return self._system.stat(\n path=self._path, client_kwargs=self._client_kwargs,\n header=self._header)", "docstring": "Return a stat_result object for this entry.\n\nThe result is cached on the os.DirEntry object.\n\nArgs:\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nReturns:\nos.stat_result: Stat result object", "source": "juraj-google-style"} {"code": "def _get_request(self, auth=None):\n self.request = HSRequest((auth or self.auth), self.env)\n self.request.response_callback = self.response_callback\n return self.request", "docstring": "Return an http request object\n\nauth: Auth data to use\n\nReturns:\nA HSRequest object", "source": "codesearchnet"} {"code": "def _process(self, input):\n input = re.sub('<[^>]*>', ' ', input)\n punct = list(string.punctuation)\n for symbol in punct:\n input = input.replace(symbol, (' %s ' % symbol))\n input = filter((lambda x: (x != u'')), input.lower().split(' '))\n return input", "docstring": "Takes in html-mixed body text as a string and returns a list of strings,\nlower case and with punctuation given spacing.\n\nCalled by self._gen_sentence()\n\nArgs:\ninpnut (string): body text", "source": "codesearchnet"} {"code": "def format_to_string(self, pretty: bool=False) -> str:\n trace = {}\n trace['traceEvents'] = self._metadata + self._events\n if pretty:\n return json.dumps(trace, indent=4, separators=(',', ': '))\n else:\n return json.dumps(trace, separators=(',', ':'))", "docstring": "Formats the chrome trace to a string.\n\nArgs:\npretty: (Optional.) If True, produce human-readable JSON output.\n\nReturns:\nA JSON-formatted string in Chrome Trace format.", "source": "github-repos"} {"code": "def resolve(self, file_path, follow_symlinks=True, allow_fd=False):\n \n if isinstance(file_path, int):\n if allow_fd and sys.version_info >= (3, 3):\n return self.get_open_file(file_path).get_object()\n raise TypeError('path should be string, bytes or '\n 'os.PathLike (if supported), not int')\n\n if follow_symlinks:\n file_path = make_string_path(file_path)\n return self.get_object_from_normpath(self.resolve_path(file_path))\n return self.lresolve(file_path)", "docstring": "Search for the specified filesystem object, resolving all links.\n\nArgs:\nfile_path: Specifies the target FakeFile object to retrieve.\nfollow_symlinks: If `False`, the link itself is resolved,\notherwise the object linked to.\nallow_fd: If `True`, `file_path` may be an open file descriptor\n\nReturns:\nThe FakeFile object corresponding to `file_path`.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"} {"code": "def concatenate(self, other):\n other = as_shape(other)\n if ((self._dims is None) or (other.dims is None)):\n return unknown_shape()\n else:\n return TensorShape((self._dims + other.dims))", "docstring": "Returns the concatenation of the dimension in `self` and `other`.\n\n*N.B.* If either `self` or `other` is completely unknown,\nconcatenation will discard information about the other shape. In\nfuture, we might support concatenation that preserves this\ninformation for use with slicing.\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` whose dimensions are the concatenation of the\ndimensions in `self` and `other`.", "source": "codesearchnet"} {"code": "def get_policies(self):\n prefix = (_IDENTITY_NS + _POLICY_NS)\n policylist_list = [_create_from_bytes(d, identity_pb2.PolicyList) for (_, d) in self._state_view.leaves(prefix=prefix)]\n policies = []\n for policy_list in policylist_list:\n for policy in policy_list.policies:\n policies.append(policy)\n return sorted(policies, key=(lambda p: p.name))", "docstring": "Returns all the Policies under the Identity namespace.\n\nReturns:\n(list): A list containing all the Policies under the Identity\nnamespace.", "source": "codesearchnet"} {"code": "def chr_range(*args, **kw):\n if (len(args) == 1):\n (stop,) = args\n (start, step) = (0, 1)\n elif (len(args) == 2):\n (start, stop) = args\n step = 1\n elif (len(args) == 3):\n (start, stop, step) = args\n else:\n raise ValueError('incorrect args')\n chr_ = six.unichr\n base = ord(kw.get('base', 'a'))\n if isinstance(start, int):\n start = (base + start)\n if isinstance(stop, int):\n stop = (base + stop)\n if isinstance(start, six.string_types):\n start = ord(start)\n if isinstance(stop, six.string_types):\n stop = ord(stop)\n if (step is None):\n step = 1\n list_ = list(map(six.text_type, map(chr_, range(start, stop, step))))\n return list_", "docstring": "r\"\"\"\nLike range but returns characters\n\nArgs:\nstart (None): (default = None)\nstop (None): (default = None)\nstep (None): (default = None)\n\nKwargs:\nbase (str): charater to start with (default='a')\n\nReturns:\nlist: list of characters\n\nCommandLine:\npython -m utool.util_str --exec-chr_range\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import * # NOQA\n>>> import utool as ut\n>>> args = (5,)\n>>> result = ut.repr2(chr_range(2, base='a'))\n>>> print(chr_range(0, 5))\n>>> print(chr_range(0, 50))\n>>> print(chr_range(0, 5, 2))\n>>> print(result)\n['a', 'b']", "source": "codesearchnet"} {"code": "def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n step = 0\n logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)\n for i, output_logit in enumerate(start_or_end_logits):\n batch_size = output_logit.shape[0]\n cols = output_logit.shape[1]\n if step + batch_size < len(dataset):\n logits_concat[step:step + batch_size, :cols] = output_logit\n else:\n logits_concat[step:, :cols] = output_logit[:len(dataset) - step]\n step += batch_size\n return logits_concat", "docstring": "Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\nArgs:\nstart_or_end_logits(:obj:`tensor`):\nThis is the output predictions of the model. We can only enter either start or end logits.\neval_dataset: Evaluation dataset\nmax_len(:obj:`int`):\nThe maximum length of the output tensor. ( See the model.eval() part for more details )", "source": "github-repos"} {"code": "def is_attribute_applicable_to_object_type(self, attribute, object_type):\n rule_set = self._attribute_rule_sets.get(attribute)\n if (object_type in rule_set.applies_to_object_types):\n return True\n else:\n return False", "docstring": "Check if the attribute is supported by the given object type.\n\nArgs:\nattribute (string): The name of the attribute (e.g., 'Name').\nRequired.\nobject_type (ObjectType): An ObjectType enumeration\n(e.g., ObjectType.SYMMETRIC_KEY). Required.\nReturns:\nbool: True if the attribute is applicable to the object type.\nFalse otherwise.", "source": "codesearchnet"} {"code": "def _new_ass_hierarchy(self, file_ass):\n ret_struct = {'source': '', 'subhierarchy': {}, 'attrs': {}, 'snippets': {}}\n ret_struct['source'] = file_ass['source']\n self._ass_refresh_attrs(ret_struct, file_ass)\n for (name, subhierarchy) in file_ass['subhierarchy'].items():\n ret_struct['subhierarchy'][name] = self._new_ass_hierarchy(subhierarchy)\n return ret_struct", "docstring": "Returns a completely new cache hierarchy for given assistant file.\n\nArgs:\nfile_ass: the assistant from filesystem hierarchy to create cache hierarchy for\n(for format see what refresh_role accepts)\nReturns:\nthe newly created cache hierarchy", "source": "codesearchnet"} {"code": "def sample(self, n_samples):\n \n if self.tau > 1 or self.tau < -1:\n raise ValueError(\"The range for correlation measure is [-1,1].\")\n\n v = np.random.uniform(0, 1, n_samples)\n c = np.random.uniform(0, 1, n_samples)\n\n u = self.percent_point(c, v)\n return np.column_stack((u, v))", "docstring": "Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`\n\nArgs:\nn_samples: `int`, amount of samples to create.\n\nReturns:\nnp.ndarray: Array of length `n_samples` with generated data from the model.", "source": "juraj-google-style"} {"code": "def _construct_punctuation_token(self, d: Dict, nlp) -> List[Dict]:\n \n\n result = []\n if not d[\"token\"]:\n this_token = {attrs.IS_PUNCT: True}\n elif len(d[\"token\"]) == 1:\n this_token = {attrs.ORTH: d[\"token\"][0]}\n else:\n global FLAG_ID\n punct_set = set(d[\"token\"])\n\n def is_selected_punct(x):\n return x in punct_set\n\n FLAG_DICT[FLAG_ID] = nlp.vocab.add_flag(is_selected_punct)\n this_token = {FLAG_DICT[FLAG_ID]: True}\n FLAG_ID += 1\n result.append(this_token)\n result = self._add_common_constrain(result, d)\n return result", "docstring": "Construct a shape token\nArgs:\nd: Dict\nnlp\n\nReturns: List[Dict]", "source": "juraj-google-style"} {"code": "def condense(input_string):\n try:\n assert isinstance(input_string, basestring)\n except AssertionError:\n raise TypeError\n removed_leading_whitespace = re.sub('>\\\\s+', '>', input_string).strip()\n removed_trailing_whitespace = re.sub('\\\\s+<', '<', removed_leading_whitespace).strip()\n return removed_trailing_whitespace", "docstring": "Trims leadings and trailing whitespace between tags in an html document\n\nArgs:\ninput_string: A (possible unicode) string representing HTML.\n\nReturns:\nA (possibly unicode) string representing HTML.\n\nRaises:\nTypeError: Raised if input_string isn't a unicode string or string.", "source": "codesearchnet"} {"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch of numpy arrays.", "source": "github-repos"} {"code": "def reorder_resource_views(self, resource_views):\n if (not isinstance(resource_views, list)):\n raise HDXError('ResourceViews should be a list!')\n ids = list()\n for resource_view in resource_views:\n if isinstance(resource_view, str):\n resource_view_id = resource_view\n else:\n resource_view_id = resource_view['id']\n if (is_valid_uuid(resource_view_id) is False):\n raise HDXError(('%s is not a valid resource view id!' % resource_view))\n ids.append(resource_view_id)\n (_, result) = self._read_from_hdx('resource view', self.data['id'], 'id', ResourceView.actions()['reorder'], order=ids)", "docstring": "Order resource views in resource.\n\nArgs:\nresource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def tokens(self, tokenset='internal'):\n toks = self.get('tokens', {}).get(tokenset)\n if (toks is not None):\n if isinstance(toks, stringtypes):\n toks = YyTokenLattice.from_string(toks)\n elif isinstance(toks, Sequence):\n toks = YyTokenLattice.from_list(toks)\n return toks", "docstring": "Deserialize and return a YyTokenLattice object for the\ninitial or internal token set, if provided, from the YY\nformat or the JSON-formatted data; otherwise return the\noriginal string.\n\nArgs:\ntokenset (str): return `'initial'` or `'internal'` tokens\n(default: `'internal'`)\nReturns:\n:class:`YyTokenLattice`", "source": "codesearchnet"} {"code": "def c_to_f(temperature):\n if temperature is None:\n return None\n return temperature * 9 / 5 + 32", "docstring": "Converts temperature from celcius to fahrenheit\n\nArgs:\ntemperature: floating point representing the temperature in celcius\nReturns: temperature in fahrenheit", "source": "github-repos"} {"code": "def get_files_in_branch(profile, branch_sha):\n tree_sha = get_commit_tree(profile, branch_sha)\n files = get_files_in_tree(profile, tree_sha)\n tree = [prepare(x) for x in files]\n return tree", "docstring": "Get all files in a branch's tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch_sha\nThe SHA a branch's HEAD points to.\n\nReturns:\nA list of dicts containing info about each blob in the tree.", "source": "codesearchnet"} {"code": "def top_1_tpu(inputs):\n \n inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)\n mask = tf.to_int32(tf.equal(inputs_max, inputs))\n index = tf.range(tf.shape(inputs)[-1]) * mask\n return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)", "docstring": "find max and argmax over the last dimension.\n\nWorks well on TPU\n\nArgs:\ninputs: A tensor with shape [..., depth]\n\nReturns:\nvalues: a Tensor with shape [...]\nindices: a Tensor with shape [...]", "source": "juraj-google-style"} {"code": "def validate_inputs(x, y):\n if isinstance(x, iterator_ops.Iterator) or isinstance(y, iterator_ops.Iterator):\n raise ValueError('`DistributionStrategy` does not support inputs of type Iterator. You must pass a `tf.data.Dataset` object or a numpy array as input.')", "docstring": "Validate inputs when using DistributionStrategy.\n\nArgs:\nx: Model Inputs.\ny: Model Targets.\n\nRaises:\nValueError: if input is not a Dataset or a numpy array(when we use\nMirroredStrategy).", "source": "github-repos"} {"code": "def _get_environment_updates(self, display_all_distributions=False):\n updates = []\n for distribution in self.pip.get_installed_distributions():\n versions = self.get_available_versions(distribution.project_name)\n max_version = (max(versions.keys()) if versions else UNKNOW_NUM)\n update = None\n distribution_version = self._parse_version(distribution.version)\n if (versions and (max_version > distribution_version)):\n update = Update(distribution.project_name, distribution.version, versions[max_version], prelease=max_version[(- 1)])\n elif (display_all_distributions and (max_version == distribution_version)):\n update = Update(distribution.project_name, distribution.version, versions[max_version])\n elif display_all_distributions:\n update = Update(distribution.project_name, distribution.version, UNKNOWN)\n if update:\n updates.append(update)\n return sorted(updates, key=(lambda x: x.name))", "docstring": "Check all pacakges installed in the environment to see if there are\nany updates availalble.\n\nArgs:\ndisplay_all_distributions (bool): Return distribution even if it is\nup-to-date. Defaults to ``False``.\n\nReturns:\nlist: A list of Update objects ordered based on ``instance.name``.", "source": "codesearchnet"} {"code": "def experimental_make_numpy_dataset(self, numpy_input, session=None):\n _require_cross_replica_or_default_context_extended(self)\n return self._experimental_make_numpy_dataset(numpy_input, session=session)", "docstring": "Makes a dataset for input provided via a numpy array.\n\nThis avoids adding `numpy_input` as a large constant in the graph,\nand copies the data to the machine or machines that will be processing\nthe input.\n\nArgs:\nnumpy_input: A nest of NumPy input arrays that will be distributed evenly\nacross all replicas. Note that lists of Numpy arrays are stacked, as\nthat is normal `tf.data.Dataset` behavior.\nsession: (TensorFlow v1.x graph execution only) A session used for\ninitialization.\n\nReturns:\nA `tf.data.Dataset` representing `numpy_input`.", "source": "github-repos"} {"code": "def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_transpiler):\n pass_manager = PassManager()\n pass_manager.property_set['layout'] = initial_layout\n pass_manager.append(Unroller(basis_gates))\n pass_manager.append(TrivialLayout(coupling_map), condition=(lambda property_set: (not property_set['layout'])))\n pass_manager.append(CheckMap(coupling_map))\n pass_manager.append(DenseLayout(coupling_map), condition=(lambda property_set: (not property_set['is_swap_mapped'])))\n pass_manager.append(FullAncillaAllocation(coupling_map))\n pass_manager.append(EnlargeWithAncilla())\n pass_manager.append(Unroll3qOrMore())\n pass_manager.append(LegacySwap(coupling_map, trials=20, seed=seed_transpiler))\n pass_manager.append(Decompose(SwapGate))\n pass_manager.append(CXDirection(coupling_map))\n pass_manager.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))\n simplification_passes = [Optimize1qGates(), CXCancellation(), RemoveResetInZeroState()]\n pass_manager.append((simplification_passes + [Depth(), FixedPoint('depth')]), do_while=(lambda property_set: (not property_set['depth_fixed_point'])))\n return pass_manager", "docstring": "The default pass manager that maps to the coupling map.\n\nArgs:\nbasis_gates (list[str]): list of basis gate names supported by the target.\ncoupling_map (CouplingMap): coupling map to target in mapping.\ninitial_layout (Layout or None): initial layout of virtual qubits on physical qubits\nseed_transpiler (int or None): random seed for stochastic passes.\n\nReturns:\nPassManager: A pass manager to map and optimize.", "source": "codesearchnet"} {"code": "def _get_match(self, key):\n \n\n return self._get_string_match(key=key) or \\\n self._get_non_string_match(key=key)", "docstring": "Gets a MatchObject for the given key.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "juraj-google-style"} {"code": "def _ParseDataStreamWithParser(\n self, parser_mediator, parser, file_entry, data_stream_name):\n \n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n raise RuntimeError(\n 'Unable to retrieve file-like object from file entry.')\n\n try:\n self._ParseFileEntryWithParser(\n parser_mediator, parser, file_entry, file_object=file_object)\n\n finally:\n file_object.close()", "docstring": "Parses a data stream of a file entry with a specific parser.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nparser (BaseParser): parser.\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): data stream name.\n\nRaises:\nRuntimeError: if the file-like object is missing.", "source": "juraj-google-style"} {"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n \n \n super(DocumentSummaryInformationOLECFPlugin, self).Process(\n parser_mediator, **kwargs)\n\n if not root_item:\n raise ValueError('Root item not set.')\n\n root_creation_time, root_modification_time = self._GetTimestamps(root_item)\n\n for item_name in self.REQUIRED_ITEMS:\n item = root_item.get_sub_item_by_name(item_name)\n if not item:\n continue\n\n summary_information = OLECFDocumentSummaryInformation(item)\n event_data = summary_information.GetEventData(\n data_type='olecf:document_summary_info')\n event_data.name = 'Document Summary Information'\n\n if root_creation_time:\n date_time = dfdatetime_filetime.Filetime(\n timestamp=root_creation_time)\n event = OLECFDocumentSummaryInformationEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if root_modification_time:\n date_time = dfdatetime_filetime.Filetime(\n timestamp=root_modification_time)\n event = OLECFDocumentSummaryInformationEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a document summary information OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root item is not set.", "source": "juraj-google-style"} {"code": "async def send_message(self, name, level, message):\n \n\n if name not in self.services:\n raise ArgumentError(\"Unknown service name\", short_name=name)\n\n msg = self.services[name]['state'].post_message(level, message)\n await self._notify_update(name, 'new_message', msg.to_dict())", "docstring": "Post a message for a service.\n\nArgs:\nname (string): The short name of the service to query\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents", "source": "juraj-google-style"} {"code": "def compiled_sub_dn(self, prepend):\n \n prepend = prepend.strip()\n if prepend == '':\n return self.config.get('LDAP_BASE_DN')\n return '{prepend},{base}'.format(\n prepend=prepend,\n base=self.config.get('LDAP_BASE_DN')\n )", "docstring": "Returns:\nstr: A DN with the DN Base appended to the end.\n\nArgs:\nprepend (str): The dn to prepend to the base.", "source": "juraj-google-style"} {"code": "def removeUserGroups(self, users=None):\n \n admin = None\n userCommunity = None\n portal = None\n groupAdmin = None\n user = None\n userCommData = None\n group = None\n try:\n\n admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n if users is None:\n print (\"You have selected to remove all users groups, you must modify the code to do this\")\n usersObj = []\n commUsers = admin.portals.portalSelf.users(start=1, num=100)\n usersObj = commUsers['users']\n\n return\n else:\n usersObj = []\n userStr = users.split(',')\n for user in userStr:\n try:\n user = admin.community.users.user(str(user).strip())\n usersObj.append(user)\n except:\n print (\"%s does not exist\" % str(user).strip())\n if usersObj:\n for userCommData in usersObj:\n print (\"Loading groups for user: %s\" % userCommData.username)\n\n if userCommData.groups:\n for group in userCommData.groups:\n groupObj = admin.community.groups.group(groupId=group['id'])\n if groupObj.owner == userCommData.username:\n print (groupObj.delete())\n else:\n print (\"No Groups Found\")\n except:\n line, filename, synerror = trace()\n raise common.ArcRestHelperError({\n \"function\": \"removeUserGroups\",\n \"line\": line,\n \"filename\": filename,\n \"synerror\": synerror,\n }\n )\n finally:\n admin = None\n userCommunity = None\n portal = None\n groupAdmin = None\n user = None\n userCommData = None\n group = None\n\n del admin\n del userCommunity\n del portal\n del groupAdmin\n del user\n del userCommData\n del group\n\n gc.collect()", "docstring": "Removes users' groups.\n\nArgs:\nusers (str): A comma delimited list of user names.\nDefaults to ``None``.\n\nWarning:\nWhen ``users`` is not provided (``None``), all users\nin the organization will have their groups deleted!", "source": "juraj-google-style"} {"code": "def find_in_mailbox(cls, session, mailbox_or_id):\n if hasattr(mailbox_or_id, 'id'):\n mailbox_or_id = mailbox_or_id.id\n return cls(('/mailboxes/%d/users.json' % mailbox_or_id), session=session)", "docstring": "Get the users that are associated to a Mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox_or_id (MailboxRef or int): Mailbox of the ID of the\nmailbox to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.User): Users\niterator.", "source": "codesearchnet"} {"code": "def decode_array(bytestring: bytes) -> np.ndarray:\n return tf.make_ndarray(_CLS.FromString(bytestring))", "docstring": "Decodes a bytestring into a numpy array.\n\nThe bytestring should be a serialized `TensorProto` instance. For more details\nsee `tf.make_tensor_proto`.\n\nArgs:\nbytestring: The serialized `TensorProto`.\n\nReturns:\nA numpy array.", "source": "github-repos"} {"code": "def decode_field(self, field, value):\n \n if isinstance(field, messages.EnumField):\n try:\n return field.type(value)\n except TypeError:\n raise messages.DecodeError(\n 'Invalid enum value \"%s\"' % (value or ''))\n\n elif isinstance(field, messages.BytesField):\n try:\n return base64.b64decode(value)\n except (binascii.Error, TypeError) as err:\n raise messages.DecodeError('Base64 decoding error: %s' % err)\n\n elif isinstance(field, message_types.DateTimeField):\n try:\n return util.decode_datetime(value)\n except ValueError as err:\n raise messages.DecodeError(err)\n\n elif (isinstance(field, messages.MessageField) and\n issubclass(field.type, messages.Message)):\n return self.__decode_dictionary(field.type, value)\n\n elif (isinstance(field, messages.FloatField) and\n isinstance(value, (six.integer_types, six.string_types))):\n try:\n return float(value)\n except: \n pass\n\n elif (isinstance(field, messages.IntegerField) and\n isinstance(value, six.string_types)):\n try:\n return int(value)\n except: \n pass\n\n return value", "docstring": "Decode a JSON value to a python value.\n\nArgs:\nfield: A ProtoRPC field instance.\nvalue: A serialized JSON value.\n\nReturn:\nA Python value compatible with field.", "source": "juraj-google-style"} {"code": "def pnum_to_group(mesh_shape, group_dims, pnum):\n coord = pnum_to_processor_coordinates(mesh_shape, pnum)\n remaining_shape = Shape([d for (i, d) in enumerate(mesh_shape) if (i not in group_dims)])\n remaining_coord = [d for (i, d) in enumerate(coord) if (i not in group_dims)]\n return processor_coordinates_to_pnum(remaining_shape, remaining_coord)", "docstring": "Group number for grouped allreduce.\n\nArgs:\nmesh_shape: a Shape\ngroup_dims: a list of integers (the dimensions reduced over)\npnum: an integer\n\nReturns:\nan integer", "source": "codesearchnet"} {"code": "def exit_handler(signum, frame):\n \n\n LOGGER.debug('signal {} was caught'.format(signum))\n sys.exit(128 + signum)", "docstring": "Catch SIGTERM and SIGHUP and call \"sys.exit\" which raises\n\"SystemExit\" exception.\nThis will trigger all the cleanup code defined in ContextManagers\nand \"finally\" statements.\n\nFor more details about the arguments see \"signal\" documentation.\n\nArgs:\nsignum(int): The signal's number\nframe(frame): The current stack frame, can be None", "source": "juraj-google-style"} {"code": "def GetSubClasses():\n return utils.invert_dict(GetSuperClasses())", "docstring": "Get a reverse Python type hierarchy mapping.\n\nThis generates a dictionary that can be used to look up the (known)\nsubclasses of a type in the abstract base class hierarchy.\n\nReturns:\nA dictionary mapping a type, as string, to a list of direct\nsubclasses (also as strings).\nE.g. \"Sized\" -> [\"Set\", \"Mapping\", \"MappingView\", \"Sequence\"].", "source": "github-repos"} {"code": "def findall_operations(self, predicate: Callable[([ops.Operation], bool)]) -> Iterable[Tuple[(int, ops.Operation)]]:\n for (index, moment) in enumerate(self._moments):\n for op in moment.operations:\n if predicate(op):\n (yield (index, op))", "docstring": "Find the locations of all operations that satisfy a given condition.\n\nThis returns an iterator of (index, operation) tuples where each\noperation satisfies op_cond(operation) is truthy. The indices are\nin order of the moments and then order of the ops within that moment.\n\nArgs:\npredicate: A method that takes an Operation and returns a Truthy\nvalue indicating the operation meets the find condition.\n\nReturns:\nAn iterator (index, operation)'s that satisfy the op_condition.", "source": "codesearchnet"} {"code": "def identify_link_type(filename):\n mime_type = mimetypes.guess_type(filename)[0]\n if (not mime_type):\n return\n if (mime_type == 'text/css'):\n return LinkType.css\n elif (mime_type == 'application/javascript'):\n return LinkType.javascript\n elif ((mime_type == 'text/html') or mime_type.endswith('xml')):\n return LinkType.html\n elif (mime_type.startswith('video') or mime_type.startswith('image') or mime_type.startswith('audio') or mime_type.endswith('shockwave-flash')):\n return LinkType.media", "docstring": "Return link type guessed by filename extension.\n\nReturns:\nstr: A value from :class:`.item.LinkType`.", "source": "codesearchnet"} {"code": "def _convert_to_compatible_tensor(value, target, error_prefix):\n try:\n tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype)\n except TypeError as e:\n raise TypeError(('%s: %s' % (error_prefix, e)))\n if (_is_sparse(tensor) != _is_sparse(target)):\n if _is_sparse(tensor):\n raise TypeError(('%s: Is sparse. Expected dense.' % error_prefix))\n else:\n raise TypeError(('%s: Is dense. Expected sparse.' % error_prefix))\n if (not tensor.get_shape().is_compatible_with(target.get_shape())):\n raise TypeError(('%s: Shape %r is incompatible with %r' % (error_prefix, tensor.get_shape(), target.get_shape())))\n return tensor", "docstring": "Converts `value` into a tensor that can be feed into `tensor_info`.\n\nArgs:\nvalue: A value to convert into Tensor or SparseTensor.\ntarget: An object returned by `parse_tensor_info_map`.\nerror_prefix: A string to prefix on raised TypeErrors.\n\nRaises:\nTypeError: If it fails to convert.\n\nReturns:\nA Tensor or SparseTensor compatible with tensor_info.", "source": "codesearchnet"} {"code": "def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):\n if prev_bin_embedding is not None:\n if interpolate:\n prev_bin_embedding = nn.functional.interpolate(prev_bin_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_bin_embedding\n x = self.conv1(x)\n x = self.act1(x)\n x = self.conv2(x)\n attractors = self.act2(x)\n height, width = attractors.shape[-2:]\n bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode='bilinear', align_corners=True)\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n delta_c = func(inv_attractor(attractors.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)\n for i in range(self.n_attractors):\n delta_c += inv_attractor(attractors[:, i, ...].unsqueeze(1) - bin_centers)\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n bin_new_centers = bin_centers + delta_c\n bin_centers = bin_new_centers\n return (bin_new_centers, bin_centers)", "docstring": "The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers\nand the attractor points (the latter are predicted by the MLP).\n\nArgs:\nx (`torch.Tensor` of shape (batch_size, num_channels, height, width)`):\nFeature block.\nprev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`):\nPrevious bin centers normed.\nprev_bin_embedding (`torch.Tensor`, *optional*):\nOptional previous bin embeddings.\ninterpolate (`bool`, *optional*, defaults to `True`):\nWhether to interpolate the previous bin embeddings to the size of the input features.\n\nReturns:\n`Tuple[`torch.Tensor`, `torch.Tensor`]:\nNew bin centers unbounded. Two outputs just to keep the API consistent with the normed version.", "source": "github-repos"} {"code": "def _ensure_tf_install():\n try:\n import tensorflow.compat.v2 as tf\n except ImportError:\n print('\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TF Quant Finance library. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TF Quant Finance library, please install the most recent version of TensorFlow, by following instructions at https:\n raise\n import distutils.version\n if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(_REQUIRED_TENSORFLOW_VERSION):\n raise ImportError('This version of TF Quant Finance library requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required=_REQUIRED_TENSORFLOW_VERSION, present=tf.__version__))", "docstring": "Attempt to import tensorflow, and ensure its version is sufficient.\n\nRaises:\nImportError: if either tensorflow is not importable or its version is\ninadequate.", "source": "github-repos"} {"code": "def locked_put(self, credentials):\n filters = {self.key_name: self.key_value}\n query = self.session.query(self.model_class).filter_by(**filters)\n entity = query.first()\n if (not entity):\n entity = self.model_class(**filters)\n setattr(entity, self.property_name, credentials)\n self.session.add(entity)", "docstring": "Write a credentials to the SQLAlchemy datastore.\n\nArgs:\ncredentials: :class:`oauth2client.Credentials`", "source": "codesearchnet"} {"code": "def set_row_count(self, count):\n current_row_count = self.row_count()\n current_column_count = self.column_count()\n if (count > current_row_count):\n cl = (TableEditableItem if self._editable else TableItem)\n for i in range(current_row_count, count):\n tr = TableRow()\n for c in range(0, current_column_count):\n tr.append(cl(), str(c))\n if self._editable:\n tr.children[str(c)].onchange.connect(self.on_item_changed, int(i), int(c))\n self.append(tr, str(i))\n self._update_first_row()\n elif (count < current_row_count):\n for i in range(count, current_row_count):\n self.remove_child(self.children[str(i)])", "docstring": "Sets the table row count.\n\nArgs:\ncount (int): number of rows", "source": "codesearchnet"} {"code": "def union(self, other):\n if (not hasattr(other, '__iter__')):\n other = [other]\n bounds = self.bounds[:]\n for range in other:\n bounds += range.bounds\n bounds = self._union(bounds)\n range = VersionRange(None)\n range.bounds = bounds\n return range", "docstring": "OR together version ranges.\n\nCalculates the union of this range with one or more other ranges.\n\nArgs:\nother: VersionRange object (or list of) to OR with.\n\nReturns:\nNew VersionRange object representing the union.", "source": "codesearchnet"} {"code": "def create_string_array(self, key, value):\n \n data = None\n if key is not None and value is not None:\n if isinstance(value, (list)):\n data = self.db.create(key.strip(), json.dumps(value))\n else:\n \n data = self.db.create(key.strip(), value)\n else:\n self.tcex.log.warning(u'The key or value field was None.')\n return data", "docstring": "Create method of CRUD operation for string array data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"} {"code": "def multithread_predict_dataflow(dataflows, model_funcs):\n num_worker = len(model_funcs)\n assert (len(dataflows) == num_worker)\n if (num_worker == 1):\n return predict_dataflow(dataflows[0], model_funcs[0])\n kwargs = ({'thread_name_prefix': 'EvalWorker'} if (sys.version_info.minor >= 6) else {})\n with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n futures = []\n for (dataflow, pred) in zip(dataflows, model_funcs):\n futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n return all_results", "docstring": "Running multiple `predict_dataflow` in multiple threads, and aggregate the results.\n\nArgs:\ndataflows: a list of DataFlow to be used in :func:`predict_dataflow`\nmodel_funcs: a list of callable to be used in :func:`predict_dataflow`\n\nReturns:\nlist of dict, in the format used by\n`DetectionDataset.eval_or_save_inference_results`", "source": "codesearchnet"} {"code": "def _grappler_config(self, optimizers=None):\n if not optimizers:\n optimizers = []\n if not self.experimental_new_converter:\n optimizers.append('constfold')\n is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops)\n if is_only_flex_enabled:\n optimizers.append('layout')\n return _get_grappler_config(optimizers)", "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\nArgs:\noptimizers: List of strings that represents the list of optimizers.\n\nReturns:\ntf.ConfigProto.", "source": "github-repos"} {"code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_min(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Updates this variable with the min of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of min with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"} {"code": "def GetProgressTrackerSymbols(self):\n return self._progress_tracker_symbols", "docstring": "Returns the progress tracker characters object.\n\nReturns:\nA ProgressTrackerSymbols object for the console output device.", "source": "github-repos"} {"code": "def _remove_double_brackets(text):\n \n def replacement_fn(s):\n if u\":\" in s:\n \n return \"\"\n \n bar_pos = s.find(u\"|\")\n if bar_pos == -1:\n return s\n return s[bar_pos + 1:]\n return _find_and_replace(text, u\"[[\", u\"]]\", replacement_fn)", "docstring": "Remove double brackets (internal links) but leave the viewable text.\n\nArgs:\ntext: a unicode string\nReturns:\na unicode string", "source": "juraj-google-style"} {"code": "def forward(self, seq_length=None, position=None):\n if position is None and seq_length is None:\n raise ValueError('Either position or seq_length must be provided')\n if position is None:\n position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0)\n elif position.ndim != 2:\n raise ValueError(f'position must be 2-dimensional, got shape {position.shape}')\n scaled_time = position.view(*position.shape, 1) * self.inv_timescales.view(1, 1, -1)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)\n signal = F.pad(signal, (0, 0, 0, self.embedding_dims % 2))\n return signal", "docstring": "Generates a Tensor of sinusoids with different frequencies.\n\nArgs:\nseq_length: an optional Python int defining the output sequence length.\nif the `position` argument is specified.\nposition: [B, seq_length], optional position for each token in the\nsequence, only required when the sequence is packed.\n\nReturns:\n[B, seqlen, D] if `position` is specified, else [1, seqlen, D]", "source": "github-repos"} {"code": "def equals(self, rhs):\n try:\n return (round((rhs - self._float_value), self._places) == 0)\n except TypeError:\n return False", "docstring": "Check to see if RHS is almost equal to float_value\n\nArgs:\nrhs: the value to compare to float_value\n\nReturns:\nbool", "source": "codesearchnet"} {"code": "def add_timeout_arg(a_func, timeout, **kwargs):\n\n def inner(*args):\n 'Updates args with the timeout.'\n updated_args = (args + (timeout,))\n return a_func(*updated_args, **kwargs)\n return inner", "docstring": "Updates a_func so that it gets called with the timeout as its final arg.\n\nThis converts a callable, a_func, into another callable with an additional\npositional arg.\n\nArgs:\na_func (callable): a callable to be updated\ntimeout (int): to be added to the original callable as it final positional\narg.\nkwargs: Addtional arguments passed through to the callable.\n\nReturns:\ncallable: the original callable updated to the timeout arg", "source": "codesearchnet"} {"code": "def get_relative_name(prefix: str, absolute_name: str) -> str:\n if absolute_name.startswith('.'):\n return absolute_name\n prefix_path: list[str] = prefix.split('.') if prefix else []\n name_path: list[str] = absolute_name.split('.') if absolute_name else []\n num_match = 0\n for prefix_seg, name_seg in zip(prefix_path, name_path):\n if prefix_seg != name_seg:\n break\n num_match += 1\n if not num_match:\n return absolute_name\n name = '.'.join(name_path[num_match:])\n ndots = len(prefix_path) - num_match\n if ndots > 0:\n name = '.' * (ndots + 1) + name\n return name", "docstring": "Transfoms an absolute name to a relative one based on the given prefix.\n\nArgs:\nprefix: A dotted name, e.g. foo.bar.baz\nabsolute_name: A fully-qualified name, e.g. foo.bar.baz.x\n\nReturns:\nThe absolute name with the prefix removed, with a leading dot added\nfor each segment of the prefix not present in the absolute name.\ne.g. foo.bar.baz + foo.bar.hello.world -> ..hello.world\nIf the prefix is disjoint from the absolute name, the absolute name is\nreturned verbatim.\ne.g. foo.bar.baz + hello.world -> hello.world\nIf the given absolute name has one or more leading dots, it is returned\nverbatim.\ne.g. foo.bar + ..hello.world -> ..hello.world", "source": "github-repos"} {"code": "def sort_models(self):\n model_names = [table.name for table in self.Base.metadata.sorted_tables if (table.name in self.models)]\n logger.debug('Unsorted models: %s', model_names)\n model_count = len(model_names)\n swapped = True\n sort_round = 0\n while swapped:\n sort_round += 1\n logger.debug('Sorting round: %d (%s)', sort_round, model_names)\n sorted_models = []\n for i in range(model_count):\n model = self.models[model_names[i]]\n for foreign_model_name in model.foreign_models:\n if (foreign_model_name not in sorted_models):\n sorted_models.append(foreign_model_name)\n if (model.name not in sorted_models):\n sorted_models.append(model.name)\n if (model_names == sorted_models):\n swapped = False\n model_names = sorted_models\n logger.debug('Sorted models: %s (%d rounds)', model_names, sort_round)\n return model_names", "docstring": "Sorts the database models appropriately based on their relationships so that we load our data\nin the appropriate order.\n\nReturns:\nA sorted list containing the names of the models.", "source": "codesearchnet"} {"code": "def KillOldFlows(self):\n if (not self.IsRunning()):\n return False\n start_time = self.Get(self.Schema.LAST_RUN_TIME)\n lifetime = self.Get(self.Schema.CRON_ARGS).lifetime\n elapsed = (rdfvalue.RDFDatetime.Now() - start_time)\n if (lifetime and (elapsed > lifetime)):\n self.StopCurrentRun()\n stats_collector_instance.Get().IncrementCounter('cron_job_timeout', fields=[self.urn.Basename()])\n stats_collector_instance.Get().RecordEvent('cron_job_latency', elapsed.seconds, fields=[self.urn.Basename()])\n return True\n return False", "docstring": "Disable cron flow if it has exceeded CRON_ARGS.lifetime.\n\nReturns:\nbool: True if the flow is was killed.", "source": "codesearchnet"} {"code": "def dimension_name(dimension):\n \n if isinstance(dimension, Dimension):\n return dimension.name\n elif isinstance(dimension, basestring):\n return dimension\n elif isinstance(dimension, tuple):\n return dimension[0]\n elif isinstance(dimension, dict):\n return dimension['name']\n elif dimension is None:\n return None\n else:\n raise ValueError('%s type could not be interpreted as Dimension. '\n 'Dimensions must be declared as a string, tuple, '\n 'dictionary or Dimension type.'\n % type(dimension).__name__)", "docstring": "Return the Dimension.name for a dimension-like object.\n\nArgs:\ndimension: Dimension or dimension string, tuple or dict\n\nReturns:\nThe name of the Dimension or what would be the name if the\ninput as converted to a Dimension.", "source": "juraj-google-style"} {"code": "def get_heat_capacity(self, temperature, structure, n, u, cutoff=100.0):\n k = 1.38065e-23\n kt = (k * temperature)\n hbar_w = (1.05457e-34 * self.omega(structure, n, u))\n if (hbar_w > (kt * cutoff)):\n return 0.0\n c = (k * ((hbar_w / kt) ** 2))\n c *= (np.exp((hbar_w / kt)) / ((np.exp((hbar_w / kt)) - 1) ** 2))\n return (c * 6.022e+23)", "docstring": "Gets the directional heat capacity for a higher order tensor\nexpansion as a function of direction and polarization.\n\nArgs:\ntemperature (float): Temperature in kelvin\nstructure (float): Structure to be used in directional heat\ncapacity determination\nn (3x1 array-like): direction for Cv determination\nu (3x1 array-like): polarization direction, note that\nno attempt for verification of eigenvectors is made\ncutoff (float): cutoff for scale of kt / (hbar * omega)\nif lower than this value, returns 0", "source": "codesearchnet"} {"code": "def path_fraction_id_offset(points, fraction, relative_offset=False):\n if (not (0.0 <= fraction <= 1.0)):\n raise ValueError(('Invalid fraction: %.3f' % fraction))\n pts = np.array(points)[(:, COLS.XYZ)]\n lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)\n cum_lengths = np.cumsum(lengths)\n offset = (cum_lengths[(- 1)] * fraction)\n seg_id = np.argmin((cum_lengths < offset))\n if (seg_id > 0):\n offset -= cum_lengths[(seg_id - 1)]\n if relative_offset:\n offset /= lengths[seg_id]\n return (seg_id, offset)", "docstring": "Find the segment which corresponds to the fraction\nof the path length along the piecewise linear curve which\nis constructed from the set of points.\n\nArgs:\npoints: an iterable of indexable objects with indices\n0, 1, 2 correspoding to 3D cartesian coordinates\nfraction: path length fraction (0.0 <= fraction <= 1.0)\nrelative_offset: return absolute or relative segment distance\n\nReturns:\n(segment ID, segment offset) pair.", "source": "codesearchnet"} {"code": "def remove_backup(name):\n \n if name not in list_backups():\n log.debug('Backup already removed: %s', name)\n return True\n\n ps_cmd = ['Remove-WebConfigurationBackup',\n '-Name', \"'{0}'\".format(name)]\n\n cmd_ret = _srvmgr(ps_cmd)\n\n if cmd_ret['retcode'] != 0:\n msg = 'Unable to remove web configuration: {0}\\nError: {1}' \\\n ''.format(name, cmd_ret['stderr'])\n raise CommandExecutionError(msg)\n\n return name not in list_backups()", "docstring": "Remove an IIS Configuration backup from the System.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the backup to remove\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_backup backup_20170209", "source": "juraj-google-style"} {"code": "def delete(self, branch, commit_message, **kwargs):\n \n file_path = self.get_id().replace('/', '%2F')\n self.manager.delete(file_path, branch, commit_message, **kwargs)", "docstring": "Delete the file from the server.\n\nArgs:\nbranch (str): Branch from which the file will be removed\ncommit_message (str): Commit message for the deletion\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"} {"code": "def FromLdapToTimestamp(self, ldap_ts_string):\n if isinstance(ldap_ts_string, bytes):\n ldap_ts_string = ldap_ts_string.decode('utf-8')\n try:\n if self.conf.get('ad'):\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')\n else:\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')\n except ValueError:\n m = re.match('([0-9]*)(\\\\.[0-9]*)?(Z)', ldap_ts_string)\n if m:\n ldap_ts_string = m.group(1) + m.group(3)\n if self.conf.get('ad'):\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')\n else:\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')\n return int(calendar.timegm(t))", "docstring": "Transforms a LDAP timestamp into the nss_cache internal timestamp.\n\nArgs:\nldap_ts_string: An LDAP timestamp string in the format %Y%m%d%H%M%SZ\n\nReturns:\nnumber of seconds since epoch.", "source": "github-repos"} {"code": "def AddToBalance(self, assetId, fixed8_val):\n found = False\n for (key, balance) in self.Balances.items():\n if (key == assetId):\n self.Balances[assetId] = (self.Balances[assetId] + fixed8_val)\n found = True\n if (not found):\n self.Balances[assetId] = fixed8_val", "docstring": "Add amount to the specified balance.\n\nArgs:\nassetId (UInt256):\nfixed8_val (Fixed8): amount to add.", "source": "codesearchnet"} {"code": "def _validate_isvalid_composition(self, isvalid_composition, field, value):\n sum_amount = 0.0\n if (value['kind'] in ['mass fraction', 'mole fraction']):\n low_lim = 0.0\n up_lim = 1.0\n total_amount = 1.0\n elif (value['kind'] in ['mole percent']):\n low_lim = 0.0\n up_lim = 100.0\n total_amount = 100.0\n else:\n self._error(field, 'composition kind must be \"mole percent\", \"mass fraction\", or \"mole fraction\"')\n return False\n for sp in value['species']:\n amount = sp['amount'][0]\n sum_amount += amount\n if (amount < low_lim):\n self._error(field, (((('Species ' + sp['species-name']) + ' ') + value['kind']) + ' must be greater than {:.1f}'.format(low_lim)))\n elif (amount > up_lim):\n self._error(field, (((('Species ' + sp['species-name']) + ' ') + value['kind']) + ' must be less than {:.1f}'.format(up_lim)))\n if (not np.isclose(total_amount, sum_amount)):\n self._error(field, ((('Species ' + value['kind']) + 's do not sum to {:.1f}: '.format(total_amount)) + '{:f}'.format(sum_amount)))", "docstring": "Checks for valid specification of composition.\n\nArgs:\nisvalid_composition (bool): flag from schema indicating\ncomposition to be checked.\nfield (str): 'composition'\nvalue (dict): dictionary of composition\n\nThe rule's arguments are validated against this schema:\n{'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "codesearchnet"} {"code": "def keras_serializable(cls):\n initializer = cls.__init__\n config_class = getattr(cls, 'config_class', None)\n if config_class is None:\n raise AttributeError('Must set `config_class` to use @keras_serializable')\n\n @functools.wraps(initializer)\n def wrapped_init(self, *args, **kwargs):\n config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop('config', None)\n if isinstance(config, dict):\n config = config_class.from_dict(config)\n initializer(self, config, *args, **kwargs)\n elif isinstance(config, PretrainedConfig):\n if len(args) > 0:\n initializer(self, *args, **kwargs)\n else:\n initializer(self, config, *args, **kwargs)\n else:\n raise ValueError('Must pass either `config` (PretrainedConfig) or `config` (dict)')\n self._config = config\n self._kwargs = kwargs\n cls.__init__ = wrapped_init\n if not hasattr(cls, 'get_config'):\n raise TypeError('Only use @keras_serializable on keras.layers.Layer subclasses')\n if hasattr(cls.get_config, '_is_default'):\n\n def get_config(self):\n cfg = super(cls, self).get_config()\n cfg['config'] = self._config.to_dict()\n cfg.update(self._kwargs)\n return cfg\n cls.get_config = get_config\n cls._keras_serializable = True\n if hasattr(keras.utils, 'register_keras_serializable'):\n cls = keras.utils.register_keras_serializable()(cls)\n return cls", "docstring": "Decorate a Keras Layer class to support Keras serialization.\n\nThis is done by:\n\n1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at\nserialization time.\n2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and\nconvert it to a config object for the actual layer initializer.\n3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not\nneed to be supplied in `custom_objects` in the call to `keras.models.load_model`.\n\nArgs:\ncls (a `keras.layers.Layers subclass`):\nTypically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its\ninitializer.\n\nReturns:\nThe same class object, with modifications for Keras deserialization.", "source": "github-repos"} {"code": "def outputZip(self, figtype='png'):\n from zipfile import ZipFile\n with ZipFile((self.outfile + '.zip'), 'w') as zipcontainer:\n zipcontainer.writestr('summary.txt', '\n c = count(1)\n for section in self.sections:\n section.sectionOutZip(zipcontainer, 's{}_{}/'.format(next(c), section.title.replace(' ', '_')), figtype=figtype)", "docstring": "Outputs the report in a zip container.\nFigs and tabs as pngs and excells.\n\nArgs:\nfigtype (str): Figure type of images in the zip folder.", "source": "codesearchnet"} {"code": "def exp(array, ty):\n weld_obj = WeldObject(encoder_, decoder_)\n array_var = weld_obj.update(array)\n if isinstance(array, WeldObject):\n array_var = array.obj_id\n weld_obj.dependencies[array_var] = array\n weld_template = '\\n map(\\n %(array)s,\\n |ele: %(ty)s| exp(ele)\\n )\\n '\n weld_obj.weld_code = (weld_template % {'array': array_var, 'ty': ty})\n return weld_obj", "docstring": "Computes the per-element exponenet of the passed-in array.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"} {"code": "def describe(self, model_name):\n \n model_yaml = yaml.safe_dump(self.get_model_details(model_name), default_flow_style=False)\n print(model_yaml)", "docstring": "Print information of a specified model.\n\nArgs:\nmodel_name: the name of the model to print details on.", "source": "juraj-google-style"} {"code": "def getSubjectInfo(self, subject, vendorSpecific=None):\n \n response = self.getSubjectInfoResponse(subject, vendorSpecific)\n return self._read_dataone_type_response(response, 'SubjectInfo')", "docstring": "See Also: getSubjectInfoResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"} {"code": "def get_available_references(self, datas):\n names = []\n for (k, v) in datas.items():\n if k.startswith(RULE_REFERENCE):\n names.append(k[(len(RULE_REFERENCE) + 1):])\n return names", "docstring": "Get available manifest reference names.\n\nEvery rules starting with prefix from ``nomenclature.RULE_REFERENCE``\nare available references.\n\nOnly name validation is performed on these references.\n\nArguments:\ndatas (dict): Data where to search for reference declarations.\n\nReturns:\nlist: List of every available reference names. This is the real\nname unprefixed.", "source": "codesearchnet"} {"code": "def crcMeterRead(self, raw_read, def_buf):\n \n try:\n if len(raw_read) == 0:\n ekm_log(\"(\" + self.m_context + \") Empty return read.\")\n return False\n sent_crc = self.calc_crc16(raw_read[1:-2])\n logstr = \"(\" + self.m_context + \")CRC sent = \" + str(def_buf[\"crc16\"][MeterData.StringValue])\n logstr += \" CRC calc = \" + sent_crc\n ekm_log(logstr)\n if int(def_buf[\"crc16\"][MeterData.StringValue], 16) == int(sent_crc, 16):\n return True\n\n \n \n \n \n \n \n except struct.error:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n except TypeError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n except ValueError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n return False", "docstring": "Internal read CRC wrapper.\n\nArgs:\nraw_read (str): Bytes with implicit string cast from serial read\ndef_buf (SerialBlock): Populated read buffer.\n\nReturns:\nbool: True if passed CRC equals calculated CRC.", "source": "juraj-google-style"} {"code": "def __init__(self, input_size: int, num_experts: int, top_k: int):\n super().__init__()\n self.num_experts = num_experts\n self.input_size = input_size\n self.top_k = top_k\n self.layer = nn.Linear(input_size, num_experts, bias=False)", "docstring": "Initialize the top-k gating mechanism.\nArgs:\ninput_size (`int`):\nSize of the input.\nnum_experts (`int`):\nNumber of experts.\ntop_k (`int`):\nNumber of top experts to select.", "source": "github-repos"} {"code": "def _parse_symbol(self, sym):\n \n \n \n special = {\"Hw\": \"H\", \"Ow\": \"O\", \"Wat\": \"O\",\n \"wat\": \"O\", \"OH\": \"\", \"OH2\": \"\", \"NO3\": \"N\"}\n\n parsed_sym = None\n \n \n \n m_sp = re.match(\"|\".join(special.keys()), sym)\n if m_sp:\n parsed_sym = special[m_sp.group()]\n elif Element.is_valid_symbol(sym[:2].title()):\n parsed_sym = sym[:2].title()\n elif Element.is_valid_symbol(sym[0].upper()):\n parsed_sym = sym[0].upper()\n else:\n m = re.match(r\"w?[A-Z][a-z]*\", sym)\n if m:\n parsed_sym = m.group()\n\n if parsed_sym is not None and (m_sp or not re.match(r\"{}\\d*\".format(parsed_sym), sym)):\n msg = \"{} parsed as {}\".format(sym, parsed_sym)\n warnings.warn(msg)\n self.errors.append(msg)\n\n return parsed_sym", "docstring": "Parse a string with a symbol to extract a string representing an element.\n\nArgs:\nsym (str): A symbol to be parsed.\n\nReturns:\nA string with the parsed symbol. None if no parsing was possible.", "source": "juraj-google-style"} {"code": "def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:\n layer_statistics = collections.defaultdict(lambda: collections.defaultdict(list))\n initialize = True\n for tensor_data in self._data_gen():\n self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n initialize = False\n self._quant_interpreter.invoke()\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n diffs = self._quant_interpreter.get_tensor(tensor_detail['index'])\n for metric_name, metric_fn in self._layer_debug_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))\n if self._debug_options.layer_direct_compare_metrics is not None:\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n op_idx = self._defining_op[tensor_detail['index']]\n op_detail = self._quant_interpreter._get_op_details(op_idx)\n q_idx, f_idx = op_detail['inputs']\n quant_input_detail = self._quant_interpreter._get_tensor_details(q_idx, subgraph_index=0)\n for metric_name, metric_fn in self._debug_options.layer_direct_compare_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(self._quant_interpreter.get_tensor(f_idx), self._quant_interpreter.get_tensor(q_idx), quant_input_detail['quantization_parameters']['scales'][0], quant_input_detail['quantization_parameters']['zero_points'][0]))\n for metrics in layer_statistics.values():\n for metric_name in metrics:\n metrics[metric_name] = np.nanmean(metrics[metric_name])\n return layer_statistics", "docstring": "Collects layer statistics by applying layer debug metrics.\n\nFor all data from the given RepresentativeDataset, collect statistics per\nexample by getting the NumericVerify op results in _quant_interpreter\nand calculating layer debug metrics on the results.\n\nReturns:\naggregated per-layer statistics of NumericVerify results.\n{layer_name: {metric_name: metric}}", "source": "github-repos"} {"code": "def register_actor(name, actor_handle):\n \n if not isinstance(name, str):\n raise TypeError(\"The name argument must be a string.\")\n if not isinstance(actor_handle, ray.actor.ActorHandle):\n raise TypeError(\"The actor_handle argument must be an ActorHandle \"\n \"object.\")\n actor_name = _calculate_key(name)\n pickled_state = pickle.dumps(actor_handle)\n\n \n already_exists = _internal_kv_put(actor_name, pickled_state)\n if already_exists:\n \n \n actor_handle._ray_new_actor_handles.pop()\n raise ValueError(\n \"Error: the actor with name={} already exists\".format(name))", "docstring": "Register a named actor under a string key.\n\nArgs:\nname: The name of the named actor.\nactor_handle: The actor object to be associated with this name", "source": "juraj-google-style"} {"code": "def parent(self):\n if (len(self._path) == 1):\n return None\n else:\n parent_path = self._path[:(- 1)]\n return self._client.document(*parent_path)", "docstring": "Document that owns the current collection.\n\nReturns:\nOptional[~.firestore_v1beta1.document.DocumentReference]: The\nparent document, if the current collection is not a\ntop-level collection.", "source": "codesearchnet"} {"code": "def view(molecule, viewer=settings['defaults']['viewer'], use_curr_dir=False):\n try:\n molecule.view(viewer=viewer, use_curr_dir=use_curr_dir)\n except AttributeError:\n if pd.api.types.is_list_like(molecule):\n cartesian_list = molecule\n else:\n raise ValueError('Argument is neither list nor Cartesian.')\n if use_curr_dir:\n TEMP_DIR = os.path.curdir\n else:\n TEMP_DIR = tempfile.gettempdir()\n\n def give_filename(i):\n filename = (('ChemCoord_list_' + str(i)) + '.molden')\n return os.path.join(TEMP_DIR, filename)\n i = 1\n while os.path.exists(give_filename(i)):\n i = (i + 1)\n to_molden(cartesian_list, buf=give_filename(i))\n\n def open_file(i):\n 'Open file and close after being finished.'\n try:\n subprocess.check_call([viewer, give_filename(i)])\n except (subprocess.CalledProcessError, FileNotFoundError):\n raise\n finally:\n if use_curr_dir:\n pass\n else:\n os.remove(give_filename(i))\n Thread(target=open_file, args=(i,)).start()", "docstring": "View your molecule or list of molecules.\n\n.. note:: This function writes a temporary file and opens it with\nan external viewer.\nIf you modify your molecule afterwards you have to recall view\nin order to see the changes.\n\nArgs:\nmolecule: Can be a cartesian, or a list of cartesians.\nviewer (str): The external viewer to use. The default is\nspecified in settings.viewer\nuse_curr_dir (bool): If True, the temporary file is written to\nthe current diretory. Otherwise it gets written to the\nOS dependendent temporary directory.\n\nReturns:\nNone:", "source": "codesearchnet"} {"code": "def visit(self, visitor):\n visited = set()\n self._root_transform().visit(visitor, self, visited)", "docstring": "Visits depth-first every node of a pipeline's DAG.\n\nRunner-internal implementation detail; no backwards-compatibility guarantees\n\nArgs:\nvisitor (~apache_beam.pipeline.PipelineVisitor):\n:class:`~apache_beam.pipeline.PipelineVisitor` object whose callbacks\nwill be called for each node visited. See\n:class:`~apache_beam.pipeline.PipelineVisitor` comments.\n\nRaises:\nTypeError: if node is specified and is not a\n:class:`~apache_beam.pvalue.PValue`.\n~apache_beam.error.PipelineError: if node is specified and does not\nbelong to this pipeline instance.", "source": "github-repos"} {"code": "def _iceberg_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n if (pipeline := test_spec.get('pipeline', None)):\n for transform in pipeline.get('transforms', []):\n if transform.get('type', '') == 'ReadFromIceberg':\n config = transform['config']\n db_name, table_name, field_value_dynamic_destinations = config['table'].split('.')\n transform['type'] = 'Create'\n transform['config'] = {k: v for k, v in config.items() if k.startswith('__')}\n transform['config']['elements'] = INPUT_TABLES[str(db_name), str(table_name), str(field_value_dynamic_destinations)]\n return test_spec", "docstring": "Preprocessor for tests that involve reading from Iceberg.\n\nThis preprocessor replaces any ReadFromIceberg transform with a Create\ntransform that reads from a predefined in-memory dictionary. This allows\nthe test to verify the pipeline's correctness without relying on Iceberg\ntables stored externally.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with ReadFromIceberg transforms replaced.", "source": "github-repos"} {"code": "def get_path(url):\n \n\n if url not in URLHelper.__cache:\n URLHelper.__cache[url] = urlparse(url)\n\n return URLHelper.__cache[url].path", "docstring": "Get the path (e.g /page/23) of the given URL.\n\nArgs:\nurl (str): The URL to get the path from.\n\nReturns:\nstr: The path", "source": "juraj-google-style"} {"code": "def update(self, friendly_name=None, description=None, expiry=None, schema=None):\n \n self._load_info()\n if friendly_name is not None:\n self._info['friendlyName'] = friendly_name\n if description is not None:\n self._info['description'] = description\n if expiry is not None:\n if isinstance(expiry, datetime.datetime):\n expiry = calendar.timegm(expiry.utctimetuple()) * 1000\n self._info['expirationTime'] = expiry\n if schema is not None:\n if isinstance(schema, _schema.Schema):\n schema = schema._bq_schema\n self._info['schema'] = {'fields': schema}\n try:\n self._api.table_update(self._name_parts, self._info)\n except datalab.utils.RequestException:\n \n self._info = None\n except Exception as e:\n raise e", "docstring": "Selectively updates Table information.\n\nAny parameters that are omitted or None are not updated.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\nexpiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.\nschema: if not None, the new schema: either a list of dictionaries or a Schema.", "source": "juraj-google-style"} {"code": "def execute_no_wait(self, cmd, walltime=2, envs={}):\n \n\n \n stdin, stdout, stderr = self.ssh_client.exec_command(\n self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime\n )\n return None, stdout, stderr", "docstring": "Execute asynchronousely without waiting for exitcode\n\nArgs:\n- cmd (string): Commandline string to be executed on the remote side\n- walltime (int): timeout to exec_command\n\nKWargs:\n- envs (dict): A dictionary of env variables\n\nReturns:\n- None, stdout (readable stream), stderr (readable stream)\n\nRaises:\n- ChannelExecFailed (reason)", "source": "juraj-google-style"} {"code": "def trace_tensor(tensor, tracepoint_name=None):\n if tracepoint_name is None:\n tracepoint_name = tensor.name\n tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)\n tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION, (tensor, tracepoint_name))\n return tensor", "docstring": "Programmatic interface to trace a tensor with Tensor Tracer.\n\nTensor Tracer, by default, traces all tensors in the execution. This function\ncan be used to limit traced tensors. If this function is called for a subset\nof the tensors, only those will be traced.\n\nFor example, Tensor Traacer will only trace c below.\nc = tf.MatMul(a, b)\ntensor_tracer.trace_tensor(c)\nd = tf.add(c, 1)\nArgs:\ntensor: the tensor object for which the tracing is requested.\ntracepoint_name: an optional tensor tracepoint name string. A tracepoint\nname is an Tensor Tracer internal name for the tensor. It is useful when\ncomparing equivalent traces from different models that have different\ntensor namings. Equivalent tensors (with different names) can be mapped\nto each other by assigning a common tracepoint_name.\n\nReturns:\nThe provided tensor.", "source": "github-repos"} {"code": "def wrap_text(text, width=80):\n \n text = re.sub(r\"\\s+\", \" \", text).strip()\n wrapper = TextWrapper(\n width=width, break_long_words=False, replace_whitespace=True\n )\n return wrapper.fill(text)", "docstring": "Wrap text lines to maximum *width* characters.\n\nWrapped text is aligned against the left text border.\n\nArgs:\ntext (str): Text to wrap.\nwidth (int): Maximum number of characters per line.\n\nReturns:\nstr: Wrapped text.", "source": "juraj-google-style"} {"code": "def insert_at_frontier(self,\n operations: ops.OP_TREE,\n start: int,\n frontier: Dict[ops.Qid, int] = None\n ) -> Dict[ops.Qid, int]:\n \n if frontier is None:\n frontier = defaultdict(lambda: 0)\n operations = tuple(ops.flatten_op_tree(operations))\n if not operations:\n return frontier\n qubits = set(q for op in operations for q in op.qubits)\n if any(frontier[q] > start for q in qubits):\n raise ValueError('The frontier for qubits on which the operations'\n 'to insert act cannot be after start.')\n\n next_moments = self.next_moments_operating_on(qubits, start)\n\n insertion_indices, _ = self._pick_inserted_ops_moment_indices(\n operations, start, frontier)\n\n self._push_frontier(frontier, next_moments)\n\n self._insert_operations(operations, insertion_indices)\n\n return frontier", "docstring": "Inserts operations inline at frontier.\n\nArgs:\noperations: the operations to insert\nstart: the moment at which to start inserting the operations\nfrontier: frontier[q] is the earliest moment in which an operation\nacting on qubit q can be placed.", "source": "juraj-google-style"} {"code": "def datetimeobj_d_b_Y_H_M_S(value):\n \n d, b, Y, t, Z = value.split()\n H, M, S = t.split(\":\")\n return datetime.datetime(\n int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT\n )", "docstring": "Convert timestamp string to a datetime object.\n\nTimestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted\nby this function.\n\nArgs:\nvalue: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.\n\nReturns:\nA datetime object.\n\nRaises:\nValueError: If timestamp is invalid.\nKeyError: If the abbrieviated month is invalid.\n\nNote: The timezone is ignored it is simply assumed to be UTC/GMT.", "source": "juraj-google-style"} {"code": "def HandleClockSync(self, response):\n self.logger.info('Clock drift token has changed: %s.', response)\n self.distro_utils.HandleClockSync(self.logger)", "docstring": "Called when clock drift token changes.\n\nArgs:\nresponse: string, the metadata response with the new drift token value.", "source": "codesearchnet"} {"code": "def download_models(self, uniprot_acc, outdir='', force_rerun=False):\n \n downloaded = []\n subset = self.get_models(uniprot_acc)\n\n for entry in subset:\n ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])\n outfile = op.join(outdir, ident + '.pdb')\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n response = requests.get(entry['url'])\n\n if response.status_code == 404:\n log.error('{}: 404 returned, no model available.'.format(ident))\n\n else:\n with open(outfile, 'w') as f:\n f.write(response.text)\n\n log.debug('{}: downloaded homology model'.format(ident))\n downloaded.append(outfile)\n else:\n downloaded.append(outfile)\n\n return downloaded", "docstring": "Download all models available for a UniProt accession number.\n\nArgs:\nuniprot_acc (str): UniProt ACC/ID\noutdir (str): Path to output directory, uses working directory if not set\nforce_rerun (bool): Force a redownload the models if they already exist\n\nReturns:\nlist: Paths to the downloaded models", "source": "juraj-google-style"} {"code": "def save_state(self, out_path):\n \n\n state = self.dump_state()\n\n \n \n state = _clean_intenum(state)\n\n with open(out_path, \"w\") as outfile:\n json.dump(state, outfile, indent=4)", "docstring": "Save the current state of this emulated object to a file.\n\nArgs:\nout_path (str): The path to save the dumped state of this emulated\nobject.", "source": "juraj-google-style"} {"code": "def unsubscribe(self, future):\n \n assert future not in self._pending_unsubscribes, \\\n \"%r has already been unsubscribed from\" % \\\n self._pending_unsubscribes[future]\n subscribe = self._requests[future]\n self._pending_unsubscribes[future] = subscribe\n \n self._subscriptions.pop(subscribe.id)\n request = Unsubscribe(subscribe.id)\n request.set_callback(self._q.put)\n try:\n controller = self.get_controller(subscribe.path[0])\n except ValueError:\n \n pass\n else:\n self.handle_request(controller, request)", "docstring": "Terminates the subscription given by a future\n\nArgs:\nfuture (Future): The future of the original subscription", "source": "juraj-google-style"} {"code": "def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None, billing_tier=None):\n from . import _query\n sql = self._repr_sql_()\n return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields, sampling=sampling).results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)", "docstring": "Retrieves a sampling of data from the table.\n\nArgs:\nfields: an optional list of field names to retrieve.\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified.\nsampling: an optional sampling strategy to apply to the table.\nuse_cache: whether to use cached results or not.\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA QueryResultsTable object containing the resulting data.\nRaises:\nException if the sample query could not be executed or query response was malformed.", "source": "codesearchnet"} {"code": "def project_hidden(x, projection_tensors, hidden_size, num_blocks):\n (batch_size, latent_dim, _) = common_layers.shape_list(x)\n x = tf.reshape(x, shape=[1, (- 1), hidden_size])\n x_tiled = tf.reshape(tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, (- 1), hidden_size])\n x_projected = tf.matmul(x_tiled, projection_tensors)\n x_projected = tf.transpose(x_projected, perm=[1, 0, 2])\n x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, (- 1)])\n return x_4d", "docstring": "Project encoder hidden state under num_blocks using projection tensors.\n\nArgs:\nx: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].\nprojection_tensors: Projection tensors used to project the hidden state.\nhidden_size: Dimension of the latent space.\nnum_blocks: Number of blocks in DVQ.\n\nReturns:\nx_projected: Projected states of shape [batch_size, latent_dim, num_blocks,\nhidden_size / num_blocks].", "source": "codesearchnet"} {"code": "async def wait(self, timeout=None):\n \n\n await asyncio.wait_for(self._future, timeout)\n\n if self._exception is not None:\n self._raise_exception()\n\n return self._result", "docstring": "Wait for this operation to finish.\n\nYou can specify an optional timeout that defaults to no timeout if\nNone is passed. The result of the operation is returned from this\nmethod. If the operation raised an exception, it is reraised from this\nmethod.\n\nArgs:\ntimeout (float): The maximum number of seconds to wait before timing\nout.", "source": "juraj-google-style"} {"code": "def remove(self):\n removes = 0\n for (path, info) in self._make_iter(search='depth'):\n if info.is_dir:\n self.fs.removetree(path)\n else:\n self.fs.remove(path)\n removes += 1\n return removes", "docstring": "Removed all matched paths.\n\nReturns:\nint: Number of file and directories removed.\n\nExample:\n>>> import fs\n>>> fs.open_fs('~/projects/my_project').glob('**/*.pyc').remove()\n29", "source": "codesearchnet"} {"code": "def _CreateMethod(self, method_name):\n \n soap_service_method = self.zeep_client.service[method_name]\n\n def MakeSoapRequest(*args):\n AddToUtilityRegistry('zeep')\n soap_headers = self._GetZeepFormattedSOAPHeaders()\n packed_args = self._PackArguments(method_name, args)\n try:\n return soap_service_method(\n *packed_args, _soapheaders=soap_headers)['body']['rval']\n except zeep.exceptions.Fault as e:\n error_list = ()\n if e.detail is not None:\n underlying_exception = e.detail.find(\n '{%s}ApiExceptionFault' % self._GetBindingNamespace())\n fault_type = self.zeep_client.get_element(\n '{%s}ApiExceptionFault' % self._GetBindingNamespace())\n fault = fault_type.parse(\n underlying_exception, self.zeep_client.wsdl.types)\n error_list = fault.errors or error_list\n raise googleads.errors.GoogleAdsServerFault(\n e.detail, errors=error_list, message=e.message)\n return MakeSoapRequest", "docstring": "Create a method wrapping an invocation to the SOAP service.\n\nArgs:\nmethod_name: A string identifying the name of the SOAP method to call.\n\nReturns:\nA callable that can be used to make the desired SOAP request.", "source": "juraj-google-style"} {"code": "def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:\n attn_maps = []\n prev_attn_masks = None\n for attn_masks in attentions:\n attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))\n if prev_attn_masks is None:\n prev_attn_masks = attn_masks\n else:\n prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)\n cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)\n attn_maps.append(cur_attn_map)\n final_grouping = attn_maps[-1]\n return tf.stop_gradient(final_grouping)", "docstring": "Args:\nattentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`\nhw_shape (`tuple(int)`): height and width of the output attention map\nReturns:\n`tf.Tensor`: the attention map of shape [batch_size, groups, height, width]", "source": "github-repos"} {"code": "def retry(transport: 'UDPTransport', messagedata: bytes, message_id: UDPMessageID, recipient: Address, stop_event: Event, timeout_backoff: Iterable[int]) -> bool:\n async_result = transport.maybe_sendraw_with_result(recipient, messagedata, message_id)\n event_quit = event_first_of(async_result, stop_event)\n for timeout in timeout_backoff:\n if (event_quit.wait(timeout=timeout) is True):\n break\n log.debug('retrying message', node=pex(transport.raiden.address), recipient=pex(recipient), msgid=message_id)\n transport.maybe_sendraw_with_result(recipient, messagedata, message_id)\n return async_result.ready()", "docstring": "Send messagedata until it's acknowledged.\n\nExit when:\n\n- The message is delivered.\n- Event_stop is set.\n- The iterator timeout_backoff runs out.\n\nReturns:\nbool: True if the message was acknowledged, False otherwise.", "source": "codesearchnet"} {"code": "def register_defs(self, def_list, **kwargs):\n \n for item in def_list:\n if isinstance(item, tuple):\n self.register_rml_def(*item, **kwargs)\n elif isinstance(item, dict):\n cp_kwargs = kwargs.copy()\n item.update(kwargs)\n self.register_rml_def(**item)", "docstring": "Registers a list of Rml defintions objects\n\nArgs:\n-----\ndef_list: list of objects defining the rml definitons", "source": "juraj-google-style"} {"code": "def _parse_expiry(response_data):\n \n expires_in = response_data.get('expires_in', None)\n\n if expires_in is not None:\n return _helpers.utcnow() + datetime.timedelta(\n seconds=expires_in)\n else:\n return None", "docstring": "Parses the expiry field from a response into a datetime.\n\nArgs:\nresponse_data (Mapping): The JSON-parsed response data.\n\nReturns:\nOptional[datetime]: The expiration or ``None`` if no expiration was\nspecified.", "source": "juraj-google-style"} {"code": "def get_all_for_resource(identifier, configuration=None):\n resourceview = ResourceView(configuration=configuration)\n (success, result) = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['list'])\n resourceviews = list()\n if success:\n for resourceviewdict in result:\n resourceview = ResourceView(resourceviewdict, configuration=configuration)\n resourceviews.append(resourceview)\n return resourceviews", "docstring": "Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects\n\nArgs:\nidentifier (str): Identifier of resource\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[ResourceView]: List of ResourceView objects", "source": "codesearchnet"} {"code": "def get_pipe_series_output(commands: Sequence[str], stdinput: BinaryIO=None) -> bytes:\n processes = []\n for i in range(len(commands)):\n if (i == 0):\n processes.append(subprocess.Popen(shlex.split(commands[i]), stdin=subprocess.PIPE, stdout=subprocess.PIPE))\n else:\n processes.append(subprocess.Popen(shlex.split(commands[i]), stdin=processes[(i - 1)].stdout, stdout=subprocess.PIPE))\n return processes[(len(processes) - 1)].communicate(stdinput)[0]", "docstring": "Get the output from a piped series of commands.\n\nArgs:\ncommands: sequence of command strings\nstdinput: optional ``stdin`` data to feed into the start of the pipe\n\nReturns:\n``stdout`` from the end of the pipe", "source": "codesearchnet"} {"code": "def copyfile(src, dst, follow_symlinks=True):\n (src, src_is_storage) = format_and_is_storage(src)\n (dst, dst_is_storage) = format_and_is_storage(dst)\n if ((not src_is_storage) and (not dst_is_storage)):\n return shutil_copyfile(src, dst, follow_symlinks=follow_symlinks)\n with handle_os_exceptions():\n try:\n if ((not hasattr(dst, 'read')) and (not isdir(dirname(dst)))):\n raise IOError((\"No such file or directory: '%s'\" % dst))\n except ObjectPermissionError:\n pass\n _copy(src, dst, src_is_storage, dst_is_storage)", "docstring": "Copies a source file to a destination file.\n\nEquivalent to \"shutil.copyfile\".\n\nSource and destination can also be binary opened file-like objects.\n\nArgs:\nsrc (path-like object or file-like object): Source file.\ndst (path-like object or file-like object): Destination file.\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nRaises:\nIOError: Destination directory not found.", "source": "codesearchnet"} {"code": "async def __anit__(self, core, node):\n \n await s_base.Base.__anit__(self)\n\n self.core = core\n\n self.node = node\n self.iden = node.name()\n\n self.borked = None\n\n self.info = await node.dict()\n self.info.setdefault('owner', 'root')\n self.info.setdefault('layers', ())\n\n self.layers = []\n\n for iden in self.info.get('layers'):\n\n layr = core.layers.get(iden)\n\n if layr is None:\n self.borked = iden\n logger.warning('view %r has missing layer %r' % (self.iden, iden))\n continue\n\n if not self.layers and layr.readonly:\n self.borked = iden\n raise s_exc.ReadOnlyLayer(mesg=f'First layer {iden} must not be read-only')\n\n self.layers.append(layr)", "docstring": "Async init the view.\n\nArgs:\ncore (Cortex): The cortex that owns the view.\nnode (HiveNode): The hive node containing the view info.", "source": "juraj-google-style"} {"code": "def add_callback(self, name, func):\n \n\n if name not in self.callbacks:\n raise ValueError(\"Unknown callback name: %s\" % name)\n\n self.callbacks[name].add(func)", "docstring": "Add a callback when Device events happen\n\nArgs:\nname (str): currently support 'on_scan' and 'on_disconnect'\nfunc (callable): the function that should be called", "source": "juraj-google-style"} {"code": "def lsdirs(root=\".\", **kwargs):\n \n paths = ls(root=root, **kwargs)\n if isfile(root):\n return []\n return [_path for _path in paths if isdir(path(root, _path))]", "docstring": "Return only subdirectories from a directory listing.\n\nArguments:\n\nroot (str): Path to directory. Can be relative or absolute.\n**kwargs: Any additional arguments to be passed to ls().\n\nReturns:\n\nlist of str: A list of directory paths.\n\nRaises:\n\nOSError: If root directory does not exist.", "source": "juraj-google-style"} {"code": "def CredibleInterval(pmf, percentage=90):\n cdf = pmf.MakeCdf()\n prob = ((1 - (percentage / 100.0)) / 2)\n interval = (cdf.Value(prob), cdf.Value((1 - prob)))\n return interval", "docstring": "Computes a credible interval for a given distribution.\n\nIf percentage=90, computes the 90% CI.\n\nArgs:\npmf: Pmf object representing a posterior distribution\npercentage: float between 0 and 100\n\nReturns:\nsequence of two floats, low and high", "source": "codesearchnet"} {"code": "def __call__(self, shape, dtype=None):\n dtype = standardize_dtype(dtype)\n frame_length, input_channels, fft_length = shape\n win = None\n scaling = 1\n if self.window is not None:\n win = self.window\n if isinstance(win, str):\n win = scipy.signal.get_window(win, frame_length, self.periodic)\n win = ops.convert_to_tensor(win, dtype=dtype)\n if len(win.shape) != 1 or win.shape[-1] != frame_length:\n raise ValueError(f'The shape of `window` must be equal to [frame_length].Received: window shape={win.shape}')\n win = ops.reshape(win, [frame_length, 1, 1])\n if self.scaling == 'density':\n scaling = ops.sqrt(ops.sum(ops.square(win)))\n elif self.scaling == 'spectrum':\n scaling = ops.sum(ops.abs(win))\n _fft_length = (fft_length - 1) * 2\n freq = ops.reshape(ops.arange(fft_length, dtype=dtype), (1, 1, fft_length)) / _fft_length\n time = ops.reshape(ops.arange(frame_length, dtype=dtype), (frame_length, 1, 1))\n args = -2 * time * freq * ops.arccos(ops.cast(-1, dtype))\n if self.side == 'real':\n kernel = ops.cast(ops.cos(args), dtype)\n else:\n kernel = ops.cast(ops.sin(args), dtype)\n if win is not None:\n kernel = kernel * win / scaling\n return kernel", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nThe shape is assumed to be `(T, 1, F // 2 + 1)`, where `T` is the size\nof the given window, and `F` is the number of frequency bands. Only half\nthe frequency bands are used, which is a common practice in STFT,\nbecause the second half are the conjugates of the first half in\na reversed order.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes\nare supported. If not specified, `keras.backend.floatx()`\nis used, which default to `float32` unless you configured it\notherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github-repos"} {"code": "def _CheckStatusAnalysisProcess(self, pid):\n self._RaiseIfNotRegistered(pid)\n if (pid in self._completed_analysis_processes):\n status_indicator = definitions.STATUS_INDICATOR_COMPLETED\n process_status = {'processing_status': status_indicator}\n used_memory = 0\n else:\n process = self._processes_per_pid[pid]\n process_status = self._QueryProcessStatus(process)\n if (process_status is None):\n process_is_alive = False\n else:\n process_is_alive = True\n process_information = self._process_information_per_pid[pid]\n used_memory = (process_information.GetUsedMemory() or 0)\n if (self._worker_memory_limit and (used_memory > self._worker_memory_limit)):\n logger.warning('Process: {0:s} (PID: {1:d}) killed because it exceeded the memory limit: {2:d}.'.format(process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n if (status_indicator == definitions.STATUS_INDICATOR_COMPLETED):\n self._completed_analysis_processes.add(pid)\n else:\n rpc_errors = (self._rpc_errors_per_pid.get(pid, 0) + 1)\n self._rpc_errors_per_pid[pid] = rpc_errors\n if (rpc_errors > self._MAXIMUM_RPC_ERRORS):\n process_is_alive = False\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning('Unable to retrieve process: {0:s} (PID: {1:d}) status via RPC socket: http:\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n process_status = {'processing_status': processing_status_string}\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n if (status_indicator in definitions.ERROR_STATUS_INDICATORS):\n logger.error('Process {0:s} (PID: {1:d}) is not functioning correctly. Status code: {2!s}.'.format(process.name, pid, status_indicator))\n self._TerminateProcessByPid(pid)", "docstring": "Checks the status of an analysis process.\n\nArgs:\npid (int): process ID (PID) of a registered analysis process.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "codesearchnet"} {"code": "def __init__(\n self, data_stream=None, inode=None, location=None, parent=None, **kwargs):\n \n \n \n if (inode is None and not location) or not parent:\n raise ValueError('Missing inode and location, or parent value.')\n\n super(TSKPathSpec, self).__init__(parent=parent, **kwargs)\n self.data_stream = data_stream\n self.inode = inode\n self.location = location", "docstring": "Initializes a path specification.\n\nNote that the TSK path specification must have a parent.\n\nArgs:\ndata_stream (Optional[str]): data stream name, where None indicates\nthe default data stream.\ninode (Optional[int]): inode.\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when inode and location, or parent are not set.", "source": "juraj-google-style"} {"code": "def recordbatch(self, auth, resource, entries, defer=False):\n \n return self._call('recordbatch', auth, [resource, entries], defer)", "docstring": "Records a list of historical entries to the resource specified.\n\nCalls a function that bulids a request that writes a list of historical entries to the\nspecified resource.\n\nArgs:\nauth: Takes the device cik\nresource: Takes the dataport alias or rid.\nentries: A list of entries to write to the resource.", "source": "juraj-google-style"} {"code": "def observations_np(self, boundary=20):\n \n list_observations_np_ts = [t.observations_np for t in self.trajectories]\n \n OBS = list_observations_np_ts[0].shape[1:] \n\n num_time_steps = [t.num_time_steps for t in self.trajectories]\n t_max = max(num_time_steps)\n \n boundary = int(boundary)\n bucket_length = boundary * int(np.ceil(float(t_max) / boundary))\n\n def padding_config(obs):\n \n num_to_pad = bucket_length + 1 - obs.shape[0]\n return [(0, num_to_pad)] + [(0, 0)] * len(OBS)\n\n return np.stack([\n np.pad(obs, padding_config(obs), \"constant\")\n for obs in list_observations_np_ts]), num_time_steps", "docstring": "Pads the observations in all the trajectories and returns them.\n\nArgs:\nboundary: integer, Observations will be padded to (n * boundary) + 1 where\nn is an integer.\n\nReturns:\na tuple(padded_observations, time_steps), with shapes:\npadded_observations: (self.batch_size, n * boundary + 1) + OBS\ntime_steps: integer list of length = self.batch_size", "source": "juraj-google-style"} {"code": "def __init__(self, criterion, description=None, font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):\n self.criterion = criterion\n self.description = description\n self.font_attr = font_attr", "docstring": "Constructor of HighlightOptions.\n\nArgs:\ncriterion: (callable) A callable of the following signature:\ndef to_highlight(X):\n# Args:\n# X: The tensor to highlight elements in.\n#\n# Returns:\n# (boolean ndarray) A boolean ndarray of the same shape as X\n# indicating which elements are to be highlighted (iff True).\nThis callable will be used as the argument of np.argwhere() to\ndetermine which elements of the tensor are to be highlighted.\ndescription: (str) Description of the highlight criterion embodied by\ncriterion.\nfont_attr: (str) Font attribute to be applied to the\nhighlighted elements.", "source": "github-repos"} {"code": "def __intervals_from_tops(self, tops, values, basis, components, field=None, ignore_nan=True):\n length = float(basis.size)\n (start, stop) = (basis[0], basis[(- 1)])\n tops = [(start + ((p / (length - 1)) * (stop - start))) for p in tops]\n bases = (tops[1:] + [stop])\n list_of_Intervals = []\n for (i, t) in enumerate(tops):\n (v, c, d) = (values[i], [], {})\n if (ignore_nan and np.isnan(v)):\n continue\n if (field is not None):\n d = {field: v}\n if (components is not None):\n try:\n c = [deepcopy(components[int(v)])]\n except IndexError:\n c = []\n if (c and (c[0] is None)):\n c = []\n interval = Interval(t, bases[i], data=d, components=c)\n list_of_Intervals.append(interval)\n return list_of_Intervals", "docstring": "Private method. Take a sequence of tops in an arbitrary dimension,\nand provide a list of intervals from which a striplog can be made.\n\nThis is only intended to be used by ``from_image()``.\n\nArgs:\ntops (iterable). A list of floats.\nvalues (iterable). A list of values to look up.\nbasis (iterable). A list of components.\ncomponents (iterable). A list of Components.\n\nReturns:\nList. A list of Intervals.", "source": "codesearchnet"} {"code": "def cw_ssim_value(self, target, width=30):\n \n if not isinstance(target, SSIMImage):\n target = SSIMImage(target, size=self.img.size)\n\n \n widths = np.arange(1, width+1)\n\n \n sig1 = np.asarray(self.img.img_gray.getdata())\n sig2 = np.asarray(target.img_gray.getdata())\n\n \n cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)\n cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)\n\n \n c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))\n c1_2 = np.square(abs(cwtmatr1))\n c2_2 = np.square(abs(cwtmatr2))\n num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k\n den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k\n\n \n c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))\n num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k\n den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k\n\n \n ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)\n\n \n index = np.average(ssim_map)\n return index", "docstring": "Compute the complex wavelet SSIM (CW-SSIM) value from the reference\nimage to the target image.\n\nArgs:\ntarget (str or PIL.Image): Input image to compare the reference image\nto. This may be a PIL Image object or, to save time, an SSIMImage\nobject (e.g. the img member of another SSIM object).\nwidth: width for the wavelet convolution (default: 30)\n\nReturns:\nComputed CW-SSIM float value.", "source": "juraj-google-style"} {"code": "def __init__(self, station_code, DST=False):\n \n filename = env.WEATHER_DATA_PATH + '/' + _basename(station_code)\n self.csvfile = None\n try:\n self.csvfile = open(filename)\n except IOError:\n logger.info(\"File not found\")\n download_extract(_eere_url(station_code))\n self.csvfile = open(filename)\n logging.debug('opened %s', self.csvfile.name)\n fieldnames = [\"Year\", \"Month\", \"Day\", \"Hour\", \"Minute\", \"DS\",\n \"Dry-bulb (C)\", \"Dewpoint (C)\", \"Relative Humidity\",\n \"Pressure (Pa)\", \"ETR (W/m^2)\", \"ETRN (W/m^2)\",\n \"HIR (W/m^2)\", \"GHI (W/m^2)\", \"DNI (W/m^2)\",\n \"DHI (W/m^2)\", \"GHIL (lux)\", \"DNIL (lux)\", \"DFIL (lux)\",\n \"Zlum (Cd/m2)\", \"Wdir (degrees)\", \"Wspd (m/s)\",\n \"Ts cover\", \"O sky cover\", \"CeilHgt (m)\",\n \"Present Weather\", \"Pw codes\", \"Pwat (cm)\",\n \"AOD (unitless)\", \"Snow Depth (cm)\",\n \"Days since snowfall\"]\n station_meta = self.csvfile.readline().split(',')\n self.station_name = station_meta[1]\n self.CC = station_meta[3]\n self.station_fmt = station_meta[4]\n self.station_code = station_meta[5]\n self.lat = station_meta[6]\n self.lon = station_meta[7]\n self.TZ = float(station_meta[8])\n self.ELEV = station_meta[9]\n self.DST = DST\n\n if self.DST:\n geocoder = geocoders.GoogleV3()\n self.local_tz = pytz.timezone(geocoder.timezone((self.lat,\n self.lon)).zone)\n dummy = \"\"\n for _ in range(7):\n dummy += self.csvfile.readline()\n self.epw_data = csv.DictReader(self.csvfile, fieldnames=fieldnames)", "docstring": "Data for a weather station.\n\nArgs:\nstation_code (str): Station code of weather station\nDST (bool): Weather timestands in daylight savings. Default False", "source": "juraj-google-style"} {"code": "def _to_enos_networks(networks):\n \n nets = []\n for roles, network in networks:\n nets.append(network.to_enos(roles))\n logger.debug(nets)\n return nets", "docstring": "Transform the networks returned by deploy5k.\n\nArgs:\nnetworks (dict): networks returned by\n:py:func:`enoslib.infra.provider.Provider.init`", "source": "juraj-google-style"} {"code": "def get_escalatee(self, main_type, sub_type, unique_id, escalatee_id, params=None):\n \n params = params or {}\n\n return self.escalatee(main_type, sub_type, unique_id, escalatee_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nescalatee_id:\nparams:\n\nReturn:", "source": "juraj-google-style"} {"code": "def merge_caches_on_tpu(self, local_tpu_cache_tensor):\n x = array_ops.broadcast_to(local_tpu_cache_tensor, shape=[self._tt_config.num_replicas] + local_tpu_cache_tensor.shape.as_list())\n if tensor_tracer_flags.TT_SINGLE_CORE_SUMMARIES.value:\n return x\n return tpu_ops.all_to_all(x, concat_dimension=0, split_dimension=0, split_count=self._tt_config.num_replicas, group_assignment=[list(range(self._tt_config.num_replicas))])", "docstring": "Merges the given caches on tpu.\n\nArgs:\nlocal_tpu_cache_tensor: A local tensor that needs to be merged\nby concanting data from other tpu cores.\nReturns:\nA merged tf.Tensor.", "source": "github-repos"} {"code": "class GaussianDropout(layers.Layer):\n\n def __init__(self, rate, seed=None, **kwargs):\n super().__init__(**kwargs)\n if not 0 <= rate <= 1:\n raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}')\n self.rate = rate\n self.seed = seed\n if rate > 0:\n self.seed_generator = backend.random.SeedGenerator(seed)\n self.supports_masking = True\n self._build_at_init()\n\n def call(self, inputs, training=False):\n if training and self.rate > 0:\n stddev = math.sqrt(self.rate / (1.0 - self.rate))\n return inputs * backend.random.normal(shape=ops.shape(inputs), mean=1.0, stddev=stddev, dtype=self.compute_dtype, seed=self.seed_generator)\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n base_config = super().get_config()\n config = {'rate': self.rate, 'seed': self.seed}\n return {**base_config, **config}", "docstring": "Apply multiplicative 1-centered Gaussian noise.\n\nAs it is a regularization layer, it is only active at training time.\n\nArgs:\nrate: Float, drop probability (as with `Dropout`).\nThe multiplicative noise will have\nstandard deviation `sqrt(rate / (1 - rate))`.\nseed: Integer, optional random seed to enable deterministic behavior.\n\nCall arguments:\ninputs: Input tensor (of any rank).\ntraining: Python boolean indicating whether the layer should behave in\ntraining mode (adding dropout) or in inference mode (doing nothing).", "source": "github-repos"} {"code": "def _DownloadUrl(self, url, dest_dir):\n dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)\n dest_file.close()\n dest = dest_file.name\n self.logger.info('Downloading url from %s to %s.', url, dest)\n try:\n urlretrieve.urlretrieve(url, dest)\n return dest\n except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n self.logger.warning('Could not download %s. %s.', url, str(e))\n except Exception as e:\n self.logger.warning('Exception downloading %s. %s.', url, str(e))\n return None", "docstring": "Download a script from a given URL.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "codesearchnet"} {"code": "def get_metrics_namespace(self) -> str:\n return 'BeamML_HuggingFacePipelineModelHandler'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"} {"code": "def apply_operation_back(self, op, qargs=None, cargs=None, condition=None):\n qargs = (qargs or [])\n cargs = (cargs or [])\n all_cbits = self._bits_in_condition(condition)\n all_cbits.extend(cargs)\n self._check_condition(op.name, condition)\n self._check_bits(qargs, self.output_map)\n self._check_bits(all_cbits, self.output_map)\n self._add_op_node(op, qargs, cargs, condition)\n al = [qargs, all_cbits]\n for q in itertools.chain(*al):\n ie = list(self._multi_graph.predecessors(self.output_map[q]))\n if (len(ie) != 1):\n raise DAGCircuitError('output node has multiple in-edges')\n self._multi_graph.add_edge(ie[0], self._id_to_node[self._max_node_id], name=('%s[%s]' % (q[0].name, q[1])), wire=q)\n self._multi_graph.remove_edge(ie[0], self.output_map[q])\n self._multi_graph.add_edge(self._id_to_node[self._max_node_id], self.output_map[q], name=('%s[%s]' % (q[0].name, q[1])), wire=q)\n return self._id_to_node[self._max_node_id]", "docstring": "Apply an operation to the output of the circuit.\n\nArgs:\nop (Instruction): the operation associated with the DAG node\nqargs (list[tuple]): qubits that op will be applied to\ncargs (list[tuple]): cbits that op will be applied to\ncondition (tuple or None): optional condition (ClassicalRegister, int)\n\nReturns:\nDAGNode: the current max node\n\nRaises:\nDAGCircuitError: if a leaf node is connected to multiple outputs", "source": "codesearchnet"} {"code": "def markers(self, values):\n \n if not isinstance(values, list):\n raise TypeError(\"Markers must be a list of objects\")\n\n self.options[\"markers\"] = values", "docstring": "Set the markers.\n\nArgs:\nvalues (list): list of marker objects.\n\nRaises:\nValueError: Markers must be a list of objects.", "source": "juraj-google-style"} {"code": "def read_profile(name):\n \n config = configparser.ConfigParser()\n config.read(CONFIG_FILE)\n profile = config[name]\n repo = profile[\"repo\"]\n token = profile[\"token\"]\n return {\"repo\": repo, \"token\": token}", "docstring": "Get a named profile from the CONFIG_FILE.\n\nArgs:\n\nname\nThe name of the profile to load.\n\nReturns:\nA dictionary with the profile's ``repo`` and ``token`` values.", "source": "juraj-google-style"} {"code": "def _init_net_specs(conf):\n for (net_name, net_spec) in conf.get('nets', {}).items():\n net_spec['name'] = net_name\n net_spec['mapping'] = {}\n net_spec.setdefault('type', 'nat')\n return conf", "docstring": "Given a configuration specification, initializes all the net\ndefinitions in it so they can be used comfortably\n\nArgs:\nconf (dict): Configuration specification\n\nReturns:\ndict: the adapted new conf", "source": "codesearchnet"} {"code": "def open(self, filepath):\n \n with io.open(filepath, 'r', encoding='utf-8') as fp:\n content = fp.read()\n return content", "docstring": "Open settings backend to return its content\n\nArgs:\nfilepath (str): Settings object, depends from backend\n\nReturns:\nstring: File content.", "source": "juraj-google-style"} {"code": "def generate_code(max_length, max_nest, ops):\n stack = []\n\n def fetch_one():\n if stack:\n return stack.pop()\n else:\n value = random.randint((10 ** (max_length - 1)), ((10 ** max_length) - 1))\n code = str(value)\n return (value, code)\n\n def fetch(num_operands):\n (values, codes) = zip(*[fetch_one() for _ in six.moves.range(num_operands)])\n return (values, codes)\n for _ in six.moves.range(max_nest):\n op = random.choice(ops)\n (values, codes) = fetch(op.num_operands)\n new_value = op.eval(values)\n new_code = op.get_code(codes)\n stack.append((new_value, (('(' + new_code) + ')')))\n (final_value, final_code) = stack.pop()\n final_code = final_code[1:(- 1)]\n final_code.strip('()')\n if (not op.is_memory):\n final_value = (int(final_value) % (10 ** (max_length + 1)))\n return (str(final_value), final_code)", "docstring": "Generates code samples.\n\nArgs:\nmax_length: int. max literal length.\nmax_nest: int. max nesting level.\nops: CodeOp. set of allowable operations.\n\nReturns:\n1. (str) output value.\n2. (str) Code operation.", "source": "codesearchnet"} {"code": "def events(self, institute, case=None, variant_id=None, level=None, comments=False, panel=None):\n query = {}\n if variant_id:\n if comments:\n LOG.debug('Fetching all comments for institute {0} case {1} variant {2}'.format(institute['_id'], case['_id'], variant_id))\n query = {'$or': [{'category': 'variant', 'variant_id': variant_id, 'verb': 'comment', 'level': 'global'}, {'category': 'variant', 'variant_id': variant_id, 'institute': institute['_id'], 'case': case['_id'], 'verb': 'comment', 'level': 'specific'}]}\n else:\n query['institute'] = institute['_id']\n query['category'] = 'variant'\n query['variant_id'] = variant_id\n query['case'] = case['_id']\n else:\n query['institute'] = institute['_id']\n if panel:\n query['panel'] = panel\n else:\n query['category'] = 'case'\n if case:\n query['case'] = case['_id']\n if comments:\n query['verb'] = 'comment'\n return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)", "docstring": "Fetch events from the database.\n\nArgs:\ninstitute (dict): A institute\ncase (dict): A case\nvariant_id (str, optional): global variant id\nlevel (str, optional): restrict comments to 'specific' or 'global'\ncomments (bool, optional): restrict events to include only comments\npanel (str): A panel name\n\nReturns:\npymongo.Cursor: Query result", "source": "codesearchnet"} {"code": "def _assert_validators(self, validators):\n for validator in sorted(validators, key=(lambda validator: validator.insertion_index)):\n try:\n validator.verify(self)\n except _exceptions.ValidationError as e:\n message = validator.print_flags_with_values(self)\n raise _exceptions.IllegalFlagValueError(('%s: %s' % (message, str(e))))", "docstring": "Asserts if all validators in the list are satisfied.\n\nIt asserts validators in the order they were created.\n\nArgs:\nvalidators: Iterable(validators.Validator), validators to be\nverified.\nRaises:\nAttributeError: Raised if validators work with a non-existing flag.\nIllegalFlagValueError: Raised if validation fails for at least one\nvalidator.", "source": "codesearchnet"} {"code": "def _freeze_keras_model(self):\n input_signature = None\n if not isinstance(self._keras_model.call, _def_function.Function):\n input_signature = _model_input_signature(self._keras_model, keep_original_batch_size=True)\n func = _trace_model_call(self._keras_model, input_signature)\n concrete_func = func.get_concrete_function()\n self._funcs = [concrete_func]\n frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)\n input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]\n output_tensors = frozen_func.outputs\n return (graph_def, input_tensors, output_tensors, frozen_func)", "docstring": "Freeze Keras model to frozen graph.\n\nReturns:\ngraph_def: The frozen GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.\nfrozen_func: The frozen ConcreteFunction.", "source": "github-repos"} {"code": "def _testSimpleHelper(self, dtype, test_cases):\n current_test_case = []\n dataset = dataset_ops.Dataset.from_generator(lambda: current_test_case, dtype).unique()\n for test_case, expected in test_cases:\n current_test_case = test_case\n self.assertDatasetProduces(dataset, [compat.as_bytes(element) if dtype == dtypes.string else element for element in expected])", "docstring": "Test the `unique()` transformation on a list of test cases.\n\nArgs:\ndtype: The `dtype` of the elements in each test case.\ntest_cases: A list of pairs of lists. The first component is the test\ninput that will be passed to the transformation; the second component is\nthe expected sequence of outputs from the transformation.", "source": "github-repos"} {"code": "def tv(self, **kwargs):\n path = self._get_path('tv')\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Search for TV shows by title.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nfirst_air_date_year: (optional) Filter the results to only match\nshows that have a air date with with value.\nsearch_type: (optional) By default, the search type is 'phrase'.\nThis is almost guaranteed the option you will want.\nIt's a great all purpose search type and by far the\nmost tuned for every day querying. For those wanting\nmore of an \"autocomplete\" type search, set this\noption to 'ngram'.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"} {"code": "def remove_accent_marks(text, excluded=None):\n \n if excluded is None:\n excluded = set()\n\n return unicodedata.normalize(\n 'NFKC', ''.join(\n c for c in unicodedata.normalize(\n 'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))", "docstring": "Remove accent marks from input text.\n\nThis function removes accent marks in the text, but leaves\nunicode characters defined in the 'excluded' parameter.\n\nArgs:\ntext: The text to be processed.\nexcluded: Set of unicode characters to exclude.\n\nReturns:\nThe text without accent marks.", "source": "juraj-google-style"} {"code": "def _get_extras(self):\n extra_parts = ['']\n for value in self._unknown_keys.values():\n extra_parts.append(value)\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STREAM])\n extra_parts.append(self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])\n extra_parts.append(self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.ERROR])\n if self._known_keys[_InstrumentationKnownStatusKeys.STACK] not in self._known_keys[_InstrumentationKnownStatusKeys.STREAM]:\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STACK])\n return '\\n'.join(filter(None, extra_parts))", "docstring": "Gets the output for the extras section of the TestResultRecord.\n\nReturns:\nA string to set for a TestResultRecord's extras.", "source": "github-repos"} {"code": "def resolve_lookups(variable, context, provider):\n resolved_lookups = {}\n for lookup in variable.lookups:\n try:\n handler = LOOKUP_HANDLERS[lookup.type]\n except KeyError:\n raise UnknownLookupType(lookup)\n try:\n resolved_lookups[lookup] = handler(value=lookup.input, context=context, provider=provider)\n except Exception as e:\n raise FailedVariableLookup(variable.name, lookup, e)\n return resolved_lookups", "docstring": "Resolve a set of lookups.\n\nArgs:\nvariable (:class:`stacker.variables.Variable`): The variable resolving\nit's lookups.\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of the\nbase provider\n\nReturns:\ndict: dict of Lookup -> resolved value", "source": "codesearchnet"} {"code": "def sequence_beam_search(symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size, alpha, max_decode_length, eos_id):\n batch_size = tf.shape(initial_ids)[0]\n sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id)\n return sbs.search(initial_ids, initial_cache)", "docstring": "Search for sequence of subtoken ids with the largest probability.\n\nArgs:\nsymbols_to_logits_fn: A function that takes in ids, index, and cache as\narguments. The passed in arguments will have shape:\nids -> [batch_size * beam_size, index]\nindex -> [] (scalar)\ncache -> nested dictionary of tensors [batch_size * beam_size, ...]\nThe function must return logits and new cache.\nlogits -> [batch * beam_size, vocab_size]\nnew cache -> same shape/structure as inputted cache\ninitial_ids: Starting ids for each batch item.\nint32 tensor with shape [batch_size]\ninitial_cache: dict containing starting decoder variables information\nvocab_size: int size of tokens\nbeam_size: int number of beams\nalpha: float defining the strength of length normalization\nmax_decode_length: maximum length to decoded sequence\neos_id: int id of eos token, used to determine when a sequence has finished\n\nReturns:\nTop decoded sequences [batch_size, beam_size, max_decode_length]\nsequence scores [batch_size, beam_size]", "source": "codesearchnet"} {"code": "def send(self, cumulative_counters=None, gauges=None, counters=None):\n if ((not gauges) and (not cumulative_counters) and (not counters)):\n return\n data = {'cumulative_counter': cumulative_counters, 'gauge': gauges, 'counter': counters}\n _logger.debug('Sending datapoints to SignalFx: %s', data)\n for (metric_type, datapoints) in data.items():\n if (not datapoints):\n continue\n if (not isinstance(datapoints, list)):\n raise TypeError('Datapoints not of type list %s', datapoints)\n for datapoint in datapoints:\n self._add_extra_dimensions(datapoint)\n self._add_to_queue(metric_type, datapoint)\n self._start_thread()", "docstring": "Send the given metrics to SignalFx.\n\nArgs:\ncumulative_counters (list): a list of dictionaries representing the\ncumulative counters to report.\ngauges (list): a list of dictionaries representing the gauges to\nreport.\ncounters (list): a list of dictionaries representing the counters\nto report.", "source": "codesearchnet"} {"code": "def short(self, url):\n \n url = self.clean_url(url)\n shorten_url = f'{self.api_url}v1/shorten'\n payload = {\n 'domain': getattr(self, 'domain', 'adf.ly'),\n 'advert_type': getattr(self, 'type', 'int'),\n 'group_id': getattr(self, 'group_id', None),\n 'key': self.api_key,\n 'user_id': self.user_id,\n 'url': url,\n }\n response = self._post(shorten_url, data=payload)\n if not response.ok:\n raise BadAPIResponseException(response.content)\n\n try:\n data = response.json()\n except json.decoder.JSONDecodeError:\n raise BadAPIResponseException('API response could not be decoded')\n\n if data.get('errors'):\n errors = ','.join(i['msg'] for i in data['errors'])\n raise ShorteningErrorException(errors)\n\n if not data.get('data'):\n raise BadAPIResponseException(response.content)\n\n return data['data'][0]['short_url']", "docstring": "Short implementation for Adf.ly\nArgs:\nurl: the URL you want to shorten\n\nReturns:\nA string containing the shortened URL\n\nRaises:\nBadAPIResponseException: If the data is malformed or we got a bad\nstatus code on API response\nShorteningErrorException: If the API Returns an error as response", "source": "juraj-google-style"} {"code": "def attention_lm_moe_prepare_decoder(targets, hparams):\n \n targets_pad_mask = common_attention.embedding_to_padding(targets)\n with tf.name_scope(\"pad_remover\"):\n \n \n \n pad_remover = expert_utils.PadRemover(targets_pad_mask)\n\n if hparams.prepend_mode == \"prepend_inputs_full_attention\":\n decoder_self_attention_bias = (\n common_attention.attention_bias_prepend_inputs_full_attention(\n targets_pad_mask))\n else:\n decoder_self_attention_bias = (\n common_attention.attention_bias_lower_triangle(tf.shape(targets)[1]))\n decoder_input = common_layers.shift_right_3d(targets)\n if hparams.pos == \"timing\":\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n return (decoder_input, decoder_self_attention_bias, pad_remover)", "docstring": "Prepare one shard of the model for the decoder.\n\nArgs:\ntargets: a Tensor.\nhparams: run hyperparameters\n\nReturns:\ndecoder_input: a Tensor, bottom of decoder stack\ndecoder_self_attention_bias: a Tensor, containing large negative values\nto implement masked attention and possibly biases for diagonal alignments\npad_remover (expert_utils.PadRemover): an util object to remove padding", "source": "juraj-google-style"} {"code": "def add_error(self, error, critical=False):\n self.errors.append((error, critical))", "docstring": "Adds an error to the state.\n\nArgs:\nerror: The text that will be added to the error list.\ncritical: If set to True and the error is checked with check_errors, will\ndfTimewolf will abort.", "source": "codesearchnet"} {"code": "def ascii_tree(self, no_types: bool=False, val_count: bool=False) -> str:\n return self.schema._ascii_tree('', no_types, val_count)", "docstring": "Generate ASCII art representation of the schema tree.\n\nArgs:\nno_types: Suppress output of data type info.\nval_count: Show accumulated validation counts.\n\nReturns:\nString with the ASCII tree.", "source": "codesearchnet"} {"code": "def _ConvertToTimestamp(self, date, time, timezone):\n if ((not date) and (not time)):\n raise errors.TimestampError('Unable to extract timestamp from McAfee AV logline.')\n try:\n time_string = '{0:s} {1:s}'.format(date, time)\n except UnicodeDecodeError:\n raise errors.TimestampError('Unable to form a timestamp string.')\n return timelib.Timestamp.FromTimeString(time_string, timezone=timezone)", "docstring": "Converts date and time values into a timestamp.\n\nThe date and time are made up of two strings, the date and the time,\nseparated by a tab. The time is in local time. The month and day can\nbe either 1 or 2 characters long, e.g.: 7/30/2013\\\\t10:22:48 AM\n\nArgs:\ndate (str): date.\ntime (str): time.\ntimezone (pytz.timezone): timezone of the date and time.\n\nReturns:\nint: a timestamp integer containing the number of micro seconds since\nJanuary 1, 1970, 00:00:00 UTC.\n\nRaises:\nTimestampError: if the timestamp is badly formed or unable to transfer\nthe supplied date and time into a timestamp.", "source": "codesearchnet"} {"code": "def compute_ngrams(word_list, S=3, T=3):\n \n _ngrams = []\n if isinstance(word_list, str):\n word_list = [word_list]\n for word in word_list:\n for n in range(S, T+1):\n _ngrams += zip(*(word[i:] for i in range(n)))\n return [''.join(_ngram) for _ngram in _ngrams]", "docstring": "Compute NGrams in the word_list from [S-T)\nArgs:\nword_list (list): A list of words to compute ngram set from\nS (int): The smallest NGram (default=3)\nT (int): The biggest NGram (default=3)", "source": "juraj-google-style"} {"code": "def get_enterprise_user_id(self, obj):\n \n \n \n enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()\n\n return enterprise_learner and enterprise_learner.id", "docstring": "Get enterprise user id from user object.\n\nArguments:\nobj (User): Django User object\n\nReturns:\n(int): Primary Key identifier for enterprise user object.", "source": "juraj-google-style"} {"code": "def euler_angles_1q(unitary_matrix):\n \n if unitary_matrix.shape != (2, 2):\n raise QiskitError(\"euler_angles_1q: expected 2x2 matrix\")\n phase = la.det(unitary_matrix)**(-1.0/2.0)\n U = phase * unitary_matrix \n \n \n \n \n \n \n if abs(U[0, 0]) > _CUTOFF_PRECISION:\n theta = 2 * math.acos(abs(U[0, 0]))\n else:\n theta = 2 * math.asin(abs(U[1, 0]))\n \n phase11 = 0.0\n phase10 = 0.0\n if abs(math.cos(theta/2.0)) > _CUTOFF_PRECISION:\n phase11 = U[1, 1] / math.cos(theta/2.0)\n if abs(math.sin(theta/2.0)) > _CUTOFF_PRECISION:\n phase10 = U[1, 0] / math.sin(theta/2.0)\n phiplambda = 2 * math.atan2(np.imag(phase11), np.real(phase11))\n phimlambda = 2 * math.atan2(np.imag(phase10), np.real(phase10))\n phi = 0.0\n if abs(U[0, 0]) > _CUTOFF_PRECISION and abs(U[1, 0]) > _CUTOFF_PRECISION:\n phi = (phiplambda + phimlambda) / 2.0\n lamb = (phiplambda - phimlambda) / 2.0\n else:\n if abs(U[0, 0]) < _CUTOFF_PRECISION:\n lamb = -phimlambda\n else:\n lamb = phiplambda\n \n Rzphi = np.array([[np.exp(-1j*phi/2.0), 0],\n [0, np.exp(1j*phi/2.0)]], dtype=complex)\n Rytheta = np.array([[np.cos(theta/2.0), -np.sin(theta/2.0)],\n [np.sin(theta/2.0), np.cos(theta/2.0)]], dtype=complex)\n Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0],\n [0, np.exp(1j*lamb/2.0)]], dtype=complex)\n V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))\n if la.norm(V - U) > _CUTOFF_PRECISION:\n raise QiskitError(\"euler_angles_1q: incorrect result\")\n return theta, phi, lamb", "docstring": "Compute Euler angles for a single-qubit gate.\n\nFind angles (theta, phi, lambda) such that\nunitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)\n\nArgs:\nunitary_matrix (ndarray): 2x2 unitary matrix\n\nReturns:\ntuple: (theta, phi, lambda) Euler angles of SU(2)\n\nRaises:\nQiskitError: if unitary_matrix not 2x2, or failure", "source": "juraj-google-style"} {"code": "def collective_manager_ids_from_op(op):\n if op.type == 'CollectiveReduce':\n try:\n return [op.get_attr('_collective_manager_id')]\n except ValueError:\n pass\n elif op.type == 'StatefulPartitionedCall':\n try:\n return op.get_attr(utils.COLLECTIVE_MANAGER_IDS)\n except ValueError:\n pass\n return []", "docstring": "Returns CollectiveManager ID from the op if one exists, else None.\n\nCollectiveManager adds collective and no_op operations tagged with an ID,\nunique to the manager object. This function extracts that ID, or None, if the\nnode was not generated by a CollectiveManager.\n\nArgs:\nop: `Operation` to get the collective manager ID from.\n\nReturns:\nList of CollectiveManager IDs used by the op.", "source": "github-repos"} {"code": "def start(authkey, queues, mode='local'):\n \n global mgr, qdict, kdict\n qdict.clear()\n kdict.clear()\n for q in queues:\n qdict[q] = JoinableQueue()\n\n TFManager.register('get_queue', callable=lambda qname: _get_queue(qname))\n TFManager.register('get', callable=lambda key: _get(key))\n TFManager.register('set', callable=lambda key, value: _set(key, value))\n if mode == 'remote':\n mgr = TFManager(address=('', 0), authkey=authkey)\n else:\n mgr = TFManager(authkey=authkey)\n mgr.start()\n return mgr", "docstring": "Create a new multiprocess.Manager (or return existing one).\n\nArgs:\n:authkey: string authorization key\n:queues: *INTERNAL_USE*\n:mode: 'local' indicates that the manager will only be accessible from the same host, otherwise remotely accessible.\n\nReturns:\nA TFManager instance, which is also cached in local memory of the Python worker process.", "source": "juraj-google-style"} {"code": "def _constant_to_value(self, pyval, subst, get_node):\n if isinstance(pyval, str):\n return self.build_concrete_value(pyval, str)\n elif isinstance(pyval, bytes):\n return self.build_concrete_value(pyval, bytes)\n elif isinstance(pyval, bool):\n return self.true if pyval else self.false\n elif isinstance(pyval, int) and -1 <= pyval <= _MAX_IMPORT_DEPTH:\n return self.build_concrete_value(pyval, int)\n elif pyval.__class__ in self.primitive_classes:\n return self.primitive_instances[pyval.__class__]\n elif pyval.__class__ is frozenset:\n return self._frozenset_literal_to_value(pyval)\n elif isinstance(pyval, (pycnite.types.CodeTypeBase, blocks.OrderedCode)):\n return abstract.ConcreteValue(pyval, self.primitive_classes[types.CodeType], self.ctx)\n elif pyval is super:\n return special_builtins.Super.make(self.ctx)\n elif pyval is object:\n return special_builtins.Object.make(self.ctx)\n elif pyval.__class__ is type:\n try:\n return self.lookup_value(*self._type_to_name(pyval), subst)\n except (KeyError, AttributeError):\n log.debug('Failed to find pytd', exc_info=True)\n raise\n elif isinstance(pyval, abstract_utils.AsInstance):\n cls = pyval.cls\n if isinstance(cls, pytd.LateType):\n actual = self._load_late_type(cls)\n if not isinstance(actual, pytd.ClassType):\n return self.unsolvable\n cls = actual.cls\n if isinstance(cls, pytd.ClassType):\n cls = cls.cls\n if isinstance(cls, pytd.GenericType) and cls.name == 'typing.ClassVar':\n param, = cls.parameters\n return self.constant_to_value(abstract_utils.AsInstance(param), subst)\n elif isinstance(cls, pytd.GenericType) or (isinstance(cls, pytd.Class) and cls.template):\n if isinstance(cls, pytd.Class):\n params = tuple((t.type_param.upper_value for t in cls.template))\n cls = pytd.GenericType(base_type=pytd.ClassType(cls.name, cls), parameters=params)\n return self._pytd_generic_type_to_instance_value(cls, subst, get_node)\n elif isinstance(cls, pytd.Class):\n return self._pytd_class_to_instance_value(cls, subst)\n elif isinstance(cls, pytd.Literal):\n return self._get_literal_value(cls.value, subst)\n else:\n return self.constant_to_value(cls, subst)\n elif isinstance(pyval, pytd.Node):\n return self._pytd_constant_to_value(pyval, subst, get_node)\n elif pyval.__class__ is tuple:\n return self._tuple_literal_to_value(pyval)\n else:\n raise NotImplementedError(f\"Can't convert constant {type(pyval)} {pyval!r}\")", "docstring": "Create a BaseValue that represents a python constant.\n\nThis supports both constant from code constant pools and PyTD constants such\nas classes. This also supports builtin python objects such as int and float.\n\nArgs:\npyval: The python or PyTD value to convert.\nsubst: The current type parameters.\nget_node: A getter function for the current node.\n\nReturns:\nA Value that represents the constant, or None if we couldn't convert.\nRaises:\nNotImplementedError: If we don't know how to convert a value.\nTypeParameterError: If we can't find a substitution for a type parameter.", "source": "github-repos"} {"code": "def _get_cached_response_from_django_cache(key):\n \n if TieredCache._should_force_django_cache_miss():\n return CachedResponse(is_found=False, key=key, value=None)\n\n cached_value = django_cache.get(key, _CACHE_MISS)\n is_found = cached_value is not _CACHE_MISS\n return CachedResponse(is_found, key, cached_value)", "docstring": "Retrieves a CachedResponse for the given key from the django cache.\n\nIf the request was set to force cache misses, then this will always\nreturn a cache miss response.\n\nArgs:\nkey (string)\n\nReturns:\nA CachedResponse with is_found status and value.", "source": "juraj-google-style"} {"code": "def equivalent_to(std_function):\n\n def decorate(cos_function):\n 'Decorator argument handler'\n\n @wraps(cos_function)\n def decorated(path, *args, **kwargs):\n 'Decorated function'\n path = fsdecode(path).replace('\\\\', '/')\n if is_storage(path):\n with handle_os_exceptions():\n return cos_function(path, *args, **kwargs)\n return std_function(path, *args, **kwargs)\n return decorated\n return decorate", "docstring": "Decorates a cloud object compatible function\nto provides fall back to standard function if\nused on local files.\n\nArgs:\nstd_function (function): standard function to\nused with local files.\n\nReturns:\nfunction: new function", "source": "codesearchnet"} {"code": "def StartsWithIgnoreCase(self, value):\n \n self._awql = self._CreateSingleValueCondition(value,\n 'STARTS_WITH_IGNORE_CASE')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"starts with ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"} {"code": "def get_geometry(self):\n return (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta)", "docstring": "A convenience function to get the geometry variables.\n\nReturns:\nA tuple containing (thet0, thet, phi0, phi, alpha, beta).\nSee the Scatterer class documentation for a description of these\nangles.", "source": "codesearchnet"} {"code": "def predict_proba(self, L):\n \n self._set_constants(L)\n\n L_aug = self._get_augmented_label_matrix(L)\n mu = np.clip(self.mu.detach().clone().numpy(), 0.01, 0.99)\n\n \n if len(self.deps) > 0:\n jtm = np.zeros(L_aug.shape[1])\n\n \n for i in self.c_tree.nodes():\n node = self.c_tree.node[i]\n jtm[node[\"start_index\"] : node[\"end_index\"]] = 1\n\n \n for i, j in self.c_tree.edges():\n edge = self.c_tree[i][j]\n jtm[edge[\"start_index\"] : edge[\"end_index\"]] = 1\n else:\n jtm = np.ones(L_aug.shape[1])\n\n \n X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p))\n Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.k)\n return X / Z", "docstring": "Returns the [n,k] matrix of label probabilities P(Y | \\lambda)\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}", "source": "juraj-google-style"} {"code": "def get_points_and_weights(w_func=(lambda x: np.ones(x.shape)), left=(- 1.0), right=1.0, num_points=5, n=4096):\n dx = ((float(right) - left) / n)\n z = np.hstack(np.linspace((left + (0.5 * dx)), (right - (0.5 * dx)), n))\n w = (dx * w_func(z))\n (a, b) = discrete_gautschi(z, w, num_points)\n alpha = a\n beta = np.sqrt(b)\n J = np.diag(alpha)\n J += np.diag(beta, k=(- 1))\n J += np.diag(beta, k=1)\n (points, v) = np.linalg.eigh(J)\n ind = points.argsort()\n points = points[ind]\n weights = ((v[(0, :)] ** 2) * w.sum())\n weights = weights[ind]\n return (points, weights)", "docstring": "Quadratude points and weights for a weighting function.\n\nPoints and weights for approximating the integral\nI = \\int_left^right f(x) w(x) dx\ngiven the weighting function w(x) using the approximation\nI ~ w_i f(x_i)\n\nArgs:\nw_func: The weighting function w(x). Must be a function that takes\none argument and is valid over the open interval (left, right).\nleft: The left boundary of the interval\nright: The left boundary of the interval\nnum_points: number of integration points to return\nn: the number of points to evaluate w_func at.\n\nReturns:\nA tuple (points, weights) where points is a sorted array of the\npoints x_i and weights gives the corresponding weights w_i.", "source": "codesearchnet"} {"code": "def _matches_version(actual_version, required_version):\n if actual_version is None:\n return False\n actual_version = actual_version.strip()\n required_version = required_version.strip()\n return actual_version.startswith(required_version)", "docstring": "Checks whether some version meets the requirements.\n\nAll elements of the required_version need to be present in the\nactual_version.\n\nrequired_version actual_version result\n-----------------------------------------\n1 1.1 True\n1.2 1 False\n1.2 1.3 False\n1 True\n\nArgs:\nrequired_version: The version specified by the user.\nactual_version: The version detected from the CUDA installation.\nReturns: Whether the actual version matches the required one.", "source": "github-repos"} {"code": "def normalize(self, mode='max', value=1):\n if (mode.lower() == 'sum'):\n factor = np.sum(self.y, axis=0)\n elif (mode.lower() == 'max'):\n factor = np.max(self.y, axis=0)\n else:\n raise ValueError(('Unsupported normalization mode %s!' % mode))\n self.y /= (factor / value)", "docstring": "Normalize the spectrum with respect to the sum of intensity\n\nArgs:\nmode (str): Normalization mode. Supported modes are \"max\" (set the\nmax y value to value, e.g., in XRD patterns), \"sum\" (set the\nsum of y to a value, i.e., like a probability density).\nvalue (float): Value to normalize to. Defaults to 1.", "source": "codesearchnet"} {"code": "def __init__(self, identifier=None):\n \n super(SessionCompletion, self).__init__()\n self.aborted = False\n self.analysis_reports_counter = None\n self.event_labels_counter = None\n self.identifier = identifier\n self.parsers_counter = None\n self.timestamp = None", "docstring": "Initializes a session completion attribute container.\n\nArgs:\nidentifier (Optional[str]): unique identifier of the session.\nThe identifier should match that of the corresponding\nsession start information.", "source": "juraj-google-style"} {"code": "def spawn(self, function, *args, **kwargs):\n assert (self.state != STOPPED), \"Can't spawn when process stopped\"\n spawned = Spawned(function, args, kwargs)\n self._spawned.append(spawned)\n self._spawn_count += 1\n if (self._spawn_count > SPAWN_CLEAR_COUNT):\n self._clear_spawn_list()\n return spawned", "docstring": "Runs the function in a worker thread, returning a Result object\n\nArgs:\nfunction: Function to run\nargs: Positional arguments to run the function with\nkwargs: Keyword arguments to run the function with\n\nReturns:\nSpawned: Something you can call wait(timeout) on to see when it's\nfinished executing", "source": "codesearchnet"} {"code": "def from_prev_calc(cls, prev_calc_dir, copy_wavecar=True, mode='DIAG', nbands_factor=5, ncores=16, **kwargs):\n (vasprun, outcar) = get_vasprun_outcar(prev_calc_dir)\n prev_incar = vasprun.incar\n structure = vasprun.final_structure\n nbands = int(vasprun.parameters['NBANDS'])\n if (mode.upper() == 'DIAG'):\n nbands = int((np.ceil(((nbands * nbands_factor) / ncores)) * ncores))\n files_to_transfer = {}\n if copy_wavecar:\n for fname in ('WAVECAR', 'WAVEDER', 'WFULL'):\n w = sorted(glob.glob(str((Path(prev_calc_dir) / (fname + '*')))))\n if w:\n if (fname == 'WFULL'):\n for f in w:\n fname = Path(f).name\n fname = fname.split('.')[0]\n files_to_transfer[fname] = f\n else:\n files_to_transfer[fname] = str(w[(- 1)])\n return cls(structure=structure, prev_incar=prev_incar, nbands=nbands, mode=mode, files_to_transfer=files_to_transfer, **kwargs)", "docstring": "Generate a set of Vasp input files for GW or BSE calculations from a\ndirectory of previous Exact Diag Vasp run.\n\nArgs:\nprev_calc_dir (str): The directory contains the outputs(\nvasprun.xml of previous vasp run.\ncopy_wavecar: Whether to copy the old WAVECAR, WAVEDER and\nassociated files. Defaults to True.\nmode (str): Supported modes are \"STATIC\", \"DIAG\" (default), \"GW\",\nand \"BSE\".\nnbands_factor (int): Multiplicative factor for NBANDS. Only applies\nif mode==\"DIAG\". Need to be tested for convergence.\nncores (int): numbers of cores you do calculations. VASP will alter\nNBANDS if it was not dividable by ncores. Only applies\nif mode==\"DIAG\".\n\\\\*\\\\*kwargs: All kwargs supported by MVLGWSet,\nother than structure, prev_incar and mode, which\nare determined from the prev_calc_dir.", "source": "codesearchnet"} {"code": "def lease(self, items):\n self._manager.leaser.add(items)\n self._manager.maybe_pause_consumer()", "docstring": "Add the given messages to lease management.\n\nArgs:\nitems(Sequence[LeaseRequest]): The items to lease.", "source": "codesearchnet"} {"code": "def create_unique_autosave_filename(self, filename, autosave_dir):\n \n basename = osp.basename(filename)\n autosave_filename = osp.join(autosave_dir, basename)\n if autosave_filename in self.name_mapping.values():\n counter = 0\n root, ext = osp.splitext(basename)\n while autosave_filename in self.name_mapping.values():\n counter += 1\n autosave_basename = '{}-{}{}'.format(root, counter, ext)\n autosave_filename = osp.join(autosave_dir, autosave_basename)\n return autosave_filename", "docstring": "Create unique autosave file name for specified file name.\n\nArgs:\nfilename (str): original file name\nautosave_dir (str): directory in which autosave files are stored", "source": "juraj-google-style"} {"code": "def select_action(self, state_key, next_action_list):\n \n if self.q_df is None or self.q_df.shape[0] == 0:\n return random.choice(next_action_list)\n\n next_action_b_df = self.__calculate_boltzmann_factor(state_key, next_action_list)\n\n if next_action_b_df.shape[0] == 1:\n return next_action_b_df[\"action_key\"].values[0]\n\n prob = np.random.random()\n next_action_b_df = next_action_b_df.sort_values(by=[\"boltzmann_factor\"])\n\n i = 0\n while prob > next_action_b_df.iloc[i, :][\"boltzmann_factor\"] + next_action_b_df.iloc[i + 1, :][\"boltzmann_factor\"]:\n i += 1\n if i + 1 >= next_action_b_df.shape[0]:\n break\n\n max_b_action_key = next_action_b_df.iloc[i, :][\"action_key\"]\n return max_b_action_key", "docstring": "Select action by Q(state, action).\n\nConcreat method for boltzmann distribution.\n\nArgs:\nstate_key: The key of state.\nnext_action_list: The possible action in `self.t+1`.\nIf the length of this list is 0, all action should be possible.\n\nReturns:\nThe key of action.", "source": "juraj-google-style"} {"code": "def DoesNotContain(self, value):\n \n self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"does not contain\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"} {"code": "def date_to_integer(date):\n if (pd and isinstance(date, pd.Timestamp)):\n try:\n date = date.to_datetime64()\n except:\n date = date.to_datetime()\n if isinstance(date, np.datetime64):\n return date.astype('datetime64[ms]').astype(float)\n elif isinstance(date, cftime_types):\n return cftime_to_timestamp(date, 'ms')\n if hasattr(date, 'timetuple'):\n dt_int = (calendar.timegm(date.timetuple()) * 1000)\n else:\n raise ValueError('Datetime type not recognized')\n return dt_int", "docstring": "Converts support date types to milliseconds since epoch\n\nAttempts highest precision conversion of different datetime\nformats to milliseconds since the epoch (1970-01-01 00:00:00).\nIf datetime is a cftime with a non-standard calendar the\ncaveats described in hv.core.util.cftime_to_timestamp apply.\n\nArgs:\ndate: Date- or datetime-like object\n\nReturns:\nMilliseconds since 1970-01-01 00:00:00", "source": "codesearchnet"} {"code": "def poisson_source(rate, iterable, target):\n if (rate <= 0.0):\n raise ValueError('poisson_source rate {} is not positive'.format(rate))\n it = iter(iterable)\n for item in it:\n duration = random.expovariate(rate)\n sleep(duration)\n try:\n target.send(item)\n except StopIteration:\n return prepend(item, it)\n return empty_iter()", "docstring": "Send events at random times with uniform probability.\n\nArgs:\nrate: The average number of events to send per second.\niterable: A series of items which will be sent to the target one by one.\ntarget: The target coroutine or sink.\n\nReturns:\nAn iterator over any remaining items.", "source": "codesearchnet"} {"code": "async def _handle_conversation_delta(self, conversation):\n conv_id = conversation.conversation_id.id\n conv = self._conv_dict.get(conv_id, None)\n if (conv is None):\n (await self._get_or_fetch_conversation(conv_id))\n else:\n conv.update_conversation(conversation)", "docstring": "Receive Conversation delta and create or update the conversation.\n\nArgs:\nconversation: hangouts_pb2.Conversation instance\n\nRaises:\nNetworkError: A request to fetch the complete conversation failed.", "source": "codesearchnet"} {"code": "def open_street_map_geoloc_link(data):\n \n if isinstance(data, str):\n lat_lon = ip_geoloc(data)\n if lat_lon is None:\n return ''\n lat, lon = lat_lon\n else:\n lat, lon = data\n return 'https:\n '?query=%s%%2C%s", "docstring": "Get a link to open street map pointing on this IP's geolocation.\n\nArgs:\ndata (str/tuple): IP address or (latitude, longitude).\n\nReturns:\nstr: a link to open street map pointing on this IP's geolocation.", "source": "juraj-google-style"} {"code": "def extract_list_from_list_of_dict(list_of_dict, key):\n \n \n result = list()\n for dictionary in list_of_dict:\n result.append(dictionary[key])\n return result", "docstring": "Extract a list by looking up key in each member of a list of dictionaries\n\nArgs:\nlist_of_dict (List[DictUpperBound]): List of dictionaries\nkey (Any): Key to find in each dictionary\n\nReturns:\nList: List containing values returned from each dictionary", "source": "juraj-google-style"} {"code": "def DeleteRecords(cls, ids, token):\n \n with data_store.DB.GetMutationPool() as mutation_pool:\n mutation_pool.QueueDeleteRecords(ids)", "docstring": "Delete records identified by ids.\n\nArgs:\nids: A list of ids provided by ClaimRecords.\ntoken: The database access token to delete with.\n\nRaises:\nLockError: If the queue is not locked.", "source": "juraj-google-style"} {"code": "def days(start, end=None):\n \n return iterate.between(start, datetime.timedelta(days=1), end)", "docstring": "Iterate over the days between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a day apart.", "source": "juraj-google-style"} {"code": "def parse_genetic_models(models_info, case_id):\n \n genetic_models = []\n if models_info:\n for family_info in models_info.split(','):\n splitted_info = family_info.split(':')\n if splitted_info[0] == case_id:\n genetic_models = splitted_info[1].split('|')\n\n return genetic_models", "docstring": "Parse the genetic models entry of a vcf\n\nArgs:\nmodels_info(str): The raw vcf information\ncase_id(str)\n\nReturns:\ngenetic_models(list)", "source": "juraj-google-style"} {"code": "def unwrap(data_type):\n unwrapped_nullable = False\n unwrapped_alias = False\n while (is_alias(data_type) or is_nullable_type(data_type)):\n if is_nullable_type(data_type):\n unwrapped_nullable = True\n if is_alias(data_type):\n unwrapped_alias = True\n data_type = data_type.data_type\n return (data_type, unwrapped_nullable, unwrapped_alias)", "docstring": "Convenience method to unwrap all Aliases and Nullables from around a\nDataType. This checks for nullable wrapping aliases, as well as aliases\nwrapping nullables.\n\nArgs:\ndata_type (DataType): The target to unwrap.\n\nReturn:\nTuple[DataType, bool, bool]: The underlying data type; a bool that is\nset if a nullable was present; a bool that is set if an alias was\npresent.", "source": "codesearchnet"} {"code": "def line(self, x0, y0, x1, y1, char):\n if (x0 > x1):\n (x1, x0) = (x0, x1)\n (y1, y0) = (y0, y1)\n dx = (x1 - x0)\n dy = (y1 - y0)\n if ((dx == 0) and (dy == 0)):\n self.point(x0, y0, char)\n elif (abs(dx) >= abs(dy)):\n for x in range(x0, (x1 + 1)):\n if (dx == 0):\n y = y0\n else:\n y = (y0 + int(round((((x - x0) * dy) / float(dx)))))\n self.point(x, y, char)\n elif (y0 < y1):\n for y in range(y0, (y1 + 1)):\n if (dy == 0):\n x = x0\n else:\n x = (x0 + int(round((((y - y0) * dx) / float(dy)))))\n self.point(x, y, char)\n else:\n for y in range(y1, (y0 + 1)):\n if (dy == 0):\n x = x0\n else:\n x = (x1 + int(round((((y - y1) * dx) / float(dy)))))\n self.point(x, y, char)", "docstring": "Create a line on ASCII canvas.\n\nArgs:\nx0 (int): x coordinate where the line should start.\ny0 (int): y coordinate where the line should start.\nx1 (int): x coordinate where the line should end.\ny1 (int): y coordinate where the line should end.\nchar (str): character to draw the line with.", "source": "codesearchnet"} {"code": "def is_empty(self):\n return self._empty", "docstring": "Deteremines whether or not anything has been parsed with this\ninstrumentation block.\n\nReturns:\nA boolean indicating whether or not the this instrumentation block\nhas parsed and contains any output.", "source": "github-repos"} {"code": "def FromEvent(cls, service_event):\n (_, _, name) = service_event.key_path.rpartition(WindowsService._REGISTRY_KEY_PATH_SEPARATOR)\n service_type = service_event.regvalue.get('Type', '')\n image_path = service_event.regvalue.get('ImagePath', '')\n start_type = service_event.regvalue.get('Start', '')\n service_dll = service_event.regvalue.get('ServiceDll', '')\n object_name = service_event.regvalue.get('ObjectName', '')\n if service_event.pathspec:\n source = (service_event.pathspec.location, service_event.key_path)\n else:\n source = ('Unknown', 'Unknown')\n return cls(name=name, service_type=service_type, image_path=image_path, start_type=start_type, object_name=object_name, source=source, service_dll=service_dll)", "docstring": "Creates a service object from an event.\n\nArgs:\nservice_event (EventObject): event to create a new service object from.\n\nReturns:\nWindowsService: service.", "source": "codesearchnet"} {"code": "def ptb_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, 'PTBProducer', [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name='raw_data', dtype=tf.int32)\n data_len = tf.size(raw_data)\n batch_len = (data_len \n data = tf.reshape(raw_data[0:(batch_size * batch_len)], [batch_size, batch_len])\n epoch_size = ((batch_len - 1) \n assertion = tf.assert_positive(epoch_size, message='epoch_size == 0, decrease batch_size or num_steps')\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name='epoch_size')\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, (i * num_steps)], [batch_size, ((i + 1) * num_steps)])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, ((i * num_steps) + 1)], [batch_size, (((i + 1) * num_steps) + 1)])\n y.set_shape([batch_size, num_steps])\n return (x, y)", "docstring": "Iterate on the raw PTB data.\n\nThis chunks up raw_data into batches of examples and returns Tensors that\nare drawn from these batches.\n\nArgs:\nraw_data: one of the raw data outputs from ptb_raw_data.\nbatch_size: int, the batch size.\nnum_steps: int, the number of unrolls.\nname: the name of this operation (optional).\n\nReturns:\nA pair of Tensors, each shaped [batch_size, num_steps]. The second element\nof the tuple is the same data time-shifted to the right by one.\n\nRaises:\ntf.errors.InvalidArgumentError: if batch_size or num_steps are too high.", "source": "codesearchnet"} {"code": "def preconnect(self, size=-1):\n \n if size == -1 and self.max_size == -1:\n raise ClientError(\"size=-1 not allowed with pool max_size=-1\")\n limit = min(size, self.max_size) if size != -1 else self.max_size\n clients = yield [self.get_connected_client() for _ in range(0, limit)]\n for client in clients:\n self.release_client(client)", "docstring": "(pre)Connects some or all redis clients inside the pool.\n\nArgs:\nsize (int): number of redis clients to build and to connect\n(-1 means all clients if pool max_size > -1)\n\nRaises:\nClientError: when size == -1 and pool max_size == -1", "source": "juraj-google-style"} {"code": "def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):\n src_r = RasterUtilClass.read_raster(srcfile)\n src_data = src_r.data\n dst_data = numpy.copy(src_data)\n if ((gdaltype == GDT_Float32) and (src_r.dataType != GDT_Float32)):\n gdaltype = src_r.dataType\n no_data = src_r.noDataValue\n new_no_data = DEFAULT_NODATA\n if (gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]):\n new_no_data = 0\n if (not MathClass.floatequal(new_no_data, src_r.noDataValue)):\n if (src_r.noDataValue not in v_dict):\n v_dict[src_r.noDataValue] = new_no_data\n no_data = new_no_data\n for (k, v) in iteritems(v_dict):\n dst_data[(src_data == k)] = v\n RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data, src_r.geotrans, src_r.srs, no_data, gdaltype)", "docstring": "Reclassify raster by given classifier dict.\n\nArgs:\nsrcfile: source raster file.\nv_dict: classifier dict.\ndstfile: destination file path.\ngdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.", "source": "codesearchnet"} {"code": "def get_embedded_object(self, signature_id):\n request = self._get_request()\n return request.get((self.EMBEDDED_OBJECT_GET_URL + signature_id))", "docstring": "Retrieves a embedded signing object\n\nRetrieves an embedded object containing a signature url that can be opened in an iFrame.\n\nArgs:\n\nsignature_id (str): The id of the signature to get a signature url for\n\nReturns:\nAn Embedded object", "source": "codesearchnet"} {"code": "def fastcc_consistent_subset(model, epsilon, solver):\n reaction_set = set(model.reactions)\n return reaction_set.difference(fastcc(model, epsilon, solver))", "docstring": "Return consistent subset of model.\n\nThe largest consistent subset is returned as\na set of reaction names.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.\n\nReturns:\nSet of reaction IDs in the consistent reaction subset.", "source": "codesearchnet"} {"code": "def clone(self, *args, **overrides):\n clone = super(NdLayout, self).clone(*args, **overrides)\n clone._max_cols = self._max_cols\n clone.id = self.id\n return clone", "docstring": "Clones the NdLayout, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned NdLayout object", "source": "codesearchnet"} {"code": "def notify(self, subsystem, recipient, subject, body_html, body_text):\n if (not re.match(self.validation, recipient, re.I)):\n raise ValueError('Invalid recipient provided')\n if recipient.startswith('\n target_type = 'channel'\n elif (recipient.find('@') != (- 1)):\n target_type = 'user'\n else:\n self.log.error('Unknown contact type for Slack: {}'.format(recipient))\n return\n try:\n self._send_message(target_type=target_type, target=recipient, message=body_text, title=subject)\n except SlackError as ex:\n self.log.error('Failed sending message to {}: {}'.format(recipient, ex))", "docstring": "You can send messages either to channels and private groups by using the following formats\n\n#channel-name\n@username-direct-message\n\nArgs:\nsubsystem (`str`): Name of the subsystem originating the notification\nrecipient (`str`): Recipient\nsubject (`str`): Subject / title of the notification, not used for this notifier\nbody_html (`str)`: HTML formatted version of the message, not used for this notifier\nbody_text (`str`): Text formatted version of the message\n\nReturns:\n`None`", "source": "codesearchnet"} {"code": "def cancel(**kwargs):\n task_list = _query(**kwargs)\n for task in task_list:\n task.status = WorkQueue.CANCELED\n task.finished = datetime.datetime.utcnow()\n db.session.add(task)\n return len(task_list)", "docstring": "Cancels work items based on their criteria.\n\nArgs:\n**kwargs: Same parameters as the query() method.\n\nReturns:\nThe number of tasks that were canceled.", "source": "codesearchnet"} {"code": "def __eq__(self, other: Any) -> bool:\n if isinstance(other, str):\n return self.path == other\n return isinstance(other, KeyPath) and self.keys == other.keys", "docstring": "Equality check.\n\nArgs:\nother: A string or a KeyPath.\n\nReturns:\nWhether JSON-path representation (either absolute or relative form)\nof current path equals to other.", "source": "github-repos"} {"code": "def device_name(self):\n return self._device_name", "docstring": "Name of the device that the tensor belongs to.\n\nReturns:\n(`str`) device name.", "source": "github-repos"} {"code": "def remove_unused_links(self, used):\n unused = []\n self._execute('SELECT * FROM {}'.format(self.LINK_STATE_TABLE))\n for row in self.cursor:\n (relpath, inode, mtime) = row\n inode = self._from_sqlite(inode)\n path = os.path.join(self.root_dir, relpath)\n if (path in used):\n continue\n if (not os.path.exists(path)):\n continue\n actual_inode = get_inode(path)\n (actual_mtime, _) = get_mtime_and_size(path)\n if ((inode == actual_inode) and (mtime == actual_mtime)):\n logger.debug(\"Removing '{}' as unused link.\".format(path))\n remove(path)\n unused.append(relpath)\n for relpath in unused:\n cmd = 'DELETE FROM {} WHERE path = \"{}\"'\n self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))", "docstring": "Removes all saved links except the ones that are used.\n\nArgs:\nused (list): list of used links that should not be removed.", "source": "codesearchnet"} {"code": "def allow_partial(allow: Optional[bool]=True) -> ContextManager[None]:\n return thread_local.thread_local_value_scope(_TLS_ALLOW_PARTIAL, allow, None)", "docstring": "Returns a context manager that allows partial values in scope.\n\nThis function is thread-safe and can be nested. In the nested use case, the\nallow flag of immediate parent context is effective.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Int()),\n('y', pg.typing.Int())\n])\nclass A(pg.Object):\npass\n\nwith pg.allow_partial(True):\na = A(x=1) # Missing `y`, but OK\nwith pg.allow_partial(False):\na.rebind(x=pg.MISSING_VALUE) # NOT OK\na.rebind(x=pg.MISSING_VALUE) # OK\n\nArgs:\nallow: If True, allow partial symbolic values in scope.\nIf False, do not allow partial symbolic values in scope even if\nindividual objects allow so. If None, honor object-level\n`allow_partial` property.\n\nReturns:\nA context manager that allows/disallow partial symbolic values in scope.\nAfter leaving the scope, the `allow_partial` state of individual objects\nwill remain intact.", "source": "github-repos"} {"code": "def update_estimator_from_task(estimator, task_id, task_type):\n if (task_type is None):\n return\n if (task_type.lower() == 'training'):\n training_job = (\"{{ ti.xcom_pull(task_ids='%s')['Training']['TrainingJobName'] }}\" % task_id)\n job_name = training_job\n elif (task_type.lower() == 'tuning'):\n training_job = (\"{{ ti.xcom_pull(task_ids='%s')['Tuning']['BestTrainingJob']['TrainingJobName'] }}\" % task_id)\n job_name = (\"{{ ti.xcom_pull(task_ids='%s')['Tuning']['TrainingJobDefinition']['StaticHyperParameters']['sagemaker_job_name'].strip('%s') }}\" % (task_id, '\"'))\n else:\n raise ValueError(\"task_type must be either 'training', 'tuning' or None.\")\n estimator._current_job_name = training_job\n if isinstance(estimator, sagemaker.estimator.Framework):\n update_submit_s3_uri(estimator, job_name)", "docstring": "Update training job of the estimator from a task in the DAG\n\nArgs:\nestimator (sagemaker.estimator.EstimatorBase): The estimator to update\ntask_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or\nairflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG.\ntask_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be\n'training', 'tuning' or None (which means training job is not from any task).", "source": "codesearchnet"} {"code": "def scale_translation(self, trans_scale_factor: float) -> Rigid:\n return self.apply_trans_fn(lambda t: t * trans_scale_factor)", "docstring": "Scales the translation by a constant factor.\n\nArgs:\ntrans_scale_factor:\nThe constant factor\nReturns:\nA transformation object with a scaled translation.", "source": "github-repos"} {"code": "def mac_app_exists(app):\n\t\n\n\tAPP_CHECK_APPLESCRIPT = \n\n\twith open('/tmp/app_check.AppleScript', 'w') as f:\n\t\tf.write(APP_CHECK_APPLESCRIPT % app)\n\n\tapp_check_proc = sp.Popen(\n\t\t['osascript', '-e', '/tmp/app_check.AppleScript'])\n\n\tif app_check_proc.wait() != 0:\n\t\treturn False\n\n\telse:\n\t\treturn True", "docstring": "Check if 'app' is installed (OS X).\n\nCheck if the given applications is installed on this OS X system.\n\nArgs:\napp (str): The application name.\n\nReturns:\nbool: Is the app installed or not?", "source": "juraj-google-style"} {"code": "def key_for_namespace(cls, namespace):\n if namespace:\n return model.Key(cls.KIND_NAME, namespace)\n else:\n return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)", "docstring": "Return the Key for a namespace.\n\nArgs:\nnamespace: A string giving the namespace whose key is requested.\n\nReturns:\nThe Key for the namespace.", "source": "codesearchnet"} {"code": "def RemoveKeywordsForName(self, name, keywords):\n data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)", "docstring": "Removes keywords for a name.\n\nArgs:\nname: A name which should not be associated with some keywords anymore.\nkeywords: A collection of keywords.", "source": "codesearchnet"} {"code": "def _verify_static_batch_size_equality(tensors, columns):\n expected_batch_size = None\n for i in range(0, len(tensors)):\n batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(tensors[i].shape[0]))\n if batch_size.value is not None:\n if expected_batch_size is None:\n bath_size_column_index = i\n expected_batch_size = batch_size\n elif not expected_batch_size.is_compatible_with(batch_size):\n raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, batch_size))", "docstring": "Verify equality between static batch sizes.\n\nArgs:\ntensors: iterable of input tensors.\ncolumns: Corresponding feature columns.\n\nRaises:\nValueError: in case of mismatched batch sizes.", "source": "github-repos"} {"code": "def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:\n \n self._validate_xoxp_token()\n kwargs.update({\"name\": name})\n return self.api_call(\"usergroups.create\", json=kwargs)", "docstring": "Create a User Group\n\nArgs:\nname (str): A name for the User Group. Must be unique among User Groups.\ne.g. 'My Test Team'", "source": "juraj-google-style"} {"code": "def update_port_monitor(self, resource, timeout=(- 1)):\n data = resource.copy()\n if ('type' not in data):\n data['type'] = 'port-monitor'\n uri = '{}{}'.format(self.data['uri'], self.PORT_MONITOR_PATH)\n return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the port monitor configuration of a logical interconnect.\n\nArgs:\nresource: Port monitor configuration.\n\nReturns:\ndict: Port monitor configuration.", "source": "codesearchnet"} {"code": "def replaceext(filepath, new_ext):\n \n if new_ext and new_ext[0] != '.':\n new_ext = '.' + new_ext\n\n root, ext = os.path.splitext(safepath(filepath))\n return root + new_ext", "docstring": "Replace any existing file extension with a new one\n\nExample::\n\n>>> replaceext('/foo/bar.txt', 'py')\n'/foo/bar.py'\n>>> replaceext('/foo/bar.txt', '.doc')\n'/foo/bar.doc'\n\nArgs:\nfilepath (str, path): file path\nnew_ext (str): new file extension; if a leading dot is not included,\nit will be added.\n\nReturns:\nTuple[str]", "source": "juraj-google-style"} {"code": "def update_nsval(self, *, nsval: str=None, ns: str=None, val: str=None) -> None:\n if ((not (ns and val)) and nsval):\n (ns, val) = nsval.split(':', 1)\n elif ((not (ns and val)) and (not nsval)):\n log.error('Did not update NSArg - no ns:val or nsval provided')\n self.namespace = ns\n self.value = val", "docstring": "Update Namespace and valueast.\n\nArgs:\nnsval: e.g. HGNC:AKT1\nns: namespace\nval: value of entity", "source": "codesearchnet"} {"code": "def calc_copulas(self,\n output_file,\n model_names=(\"start-time\", \"translation-x\", \"translation-y\"),\n label_columns=(\"Start_Time_Error\", \"Translation_Error_X\", \"Translation_Error_Y\")):\n \n if len(self.data['train']) == 0:\n self.load_data()\n groups = self.data[\"train\"][\"member\"][self.group_col].unique()\n copulas = {}\n label_columns = list(label_columns)\n for group in groups:\n print(group)\n group_data = self.data[\"train\"][\"total_group\"].loc[\n self.data[\"train\"][\"total_group\"][self.group_col] == group]\n group_data = group_data.dropna()\n group_data.reset_index(drop=True, inplace=True)\n copulas[group] = {}\n copulas[group][\"mean\"] = group_data[label_columns].mean(axis=0).values\n copulas[group][\"cov\"] = np.cov(group_data[label_columns].values.T)\n copulas[group][\"model_names\"] = list(model_names)\n del group_data\n pickle.dump(copulas, open(output_file, \"w\"), pickle.HIGHEST_PROTOCOL)", "docstring": "Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.\nDistributions are written to a pickle file for later use.\nArgs:\noutput_file: Pickle file\nmodel_names: Names of the tracking models\nlabel_columns: Names of the data columns used for labeling\nReturns:", "source": "juraj-google-style"} {"code": "def visibility(self, value=9999.0):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `visibility`'.format(value))\n\n self._visibility = value", "docstring": "Corresponds to IDD Field `visibility` This is the value for\nvisibility in km. (Horizontal visibility at the time indicated.)\n\nArgs:\nvalue (float): value for IDD Field `visibility`\nUnit: km\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def _resize_for_patching(self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:\n new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)\n resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n return resized_image", "docstring": "Resizes an image to a target resolution while maintaining aspect ratio.\n\nArgs:\nimage (np.array):\nThe input image.\ntarget_resolution (tuple):\nThe target resolution (height, width) of the image.\nresample (`PILImageResampling`):\nResampling filter to use if resizing the image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nnp.array: The resized and padded image.", "source": "github-repos"} {"code": "def get_template_edit_url(self, template_id):\n request = self._get_request()\n return request.get((self.EMBEDDED_TEMPLATE_EDIT_URL + template_id))", "docstring": "Retrieves a embedded template for editing\n\nRetrieves an embedded object containing a template url that can be opened in an iFrame.\n\nArgs:\n\ntemplate_id (str): The id of the template to get a signature url for\n\nReturns:\nAn Embedded object", "source": "codesearchnet"} {"code": "def find_next(self, *strings, **kwargs):\n \n start = kwargs.pop(\"start\", None)\n keys_only = kwargs.pop(\"keys_only\", False)\n staht = start if start is not None else self.cursor\n for start, stop in [(staht, len(self)), (0, staht)]:\n for i in range(start, stop):\n for string in strings:\n if string in self[i]:\n tup = (i, self[i])\n self.cursor = i + 1\n if keys_only: return i\n return tup", "docstring": "From the editor's current cursor position find the next instance of the\ngiven string.\n\nArgs:\nstrings (iterable): String or strings to search for\n\nReturns:\ntup (tuple): Tuple of cursor position and line or None if not found\n\nNote:\nThis function cycles the entire editor (i.e. cursor to length of\neditor to zero and back to cursor position).", "source": "juraj-google-style"} {"code": "def __init__(self, value: T, proxy: Optional[T]=None):\n if proxy is None:\n proxy = value\n super().__init__('constant', proxy)\n self._value = value", "docstring": "Initialize a constant expression.\n\nArgs:\nvalue: The constant value to be produced by this expression.\nproxy: (Optional) a proxy object with same type as `value` to use for\nrapid type checking at pipeline construction time. If not provided,\n`value` will be used directly.", "source": "github-repos"} {"code": "def decode_image_tokens(self, image_tokens: torch.Tensor):\n decoded_image = self.model.vqmodel.decode(image_tokens)\n decoded_image = decoded_image.permute(0, 2, 3, 1)\n return decoded_image", "docstring": "Decodes generated image tokens from language model to continuous pixel values\nwith VQGAN module via upsampling.\nArgs:\nimage_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):\nThe tensors corresponding to the input images.", "source": "github-repos"} {"code": "def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')):\n tuple_list = []\n if (not key_vals_dict):\n return tuple_list\n vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())])\n for (k, vs) in key_vals_dict.items():\n try:\n tuple_list.extend([((k + tuple(v)) + ((fill,) * (vlen - len(v)))) for v in vs])\n except TypeError:\n tuple_list.extend([(((k,) + tuple(v)) + ((fill,) * (vlen - len(v)))) for v in vs])\n return tuple_list", "docstring": "Convert ``key_vals_dict`` to `tuple_list``.\n\nArgs:\nkey_vals_dict (dict): The first parameter.\nfill: a value to fill missing data\n\nReturns:\nA list of tuples", "source": "codesearchnet"} {"code": "def save_publication(pub):\n _assert_obj_type(pub)\n _get_handler().store_object(pub)\n return pub.to_comm(light_request=True)", "docstring": "Save `pub` into database and into proper indexes.\n\nAttr:\npub (obj): Instance of the :class:`.DBPublication`.\n\nReturns:\nobj: :class:`.DBPublication` without data.\n\nRaises:\nInvalidType: When the `pub` is not instance of :class:`.DBPublication`.\nUnindexablePublication: When there is no index (property) which can be\nused to index `pub` in database.", "source": "codesearchnet"} {"code": "def __init__(self, watch_paths, on_changed=None, interval=1.0, recursive=True):\n \n if isinstance(watch_paths, basestring):\n watch_paths = [watch_paths]\n\n watch_paths = [os.path.abspath(path) for path in watch_paths]\n for path in watch_paths:\n if not os.path.exists(path) or not os.path.isdir(path):\n raise MissingFolderError(path)\n\n self.watch_paths = watch_paths\n self.interval = interval * 1000.0\n self.recursive = recursive\n self.periodic_callback = PeriodicCallback(self.check_fs_events, self.interval)\n self.on_changed = on_changed\n self.observer = Observer()\n for path in self.watch_paths:\n self.observer.schedule(\n WatcherEventHandler(self),\n path,\n self.recursive\n )\n self.started = False\n self.fs_event_queue = Queue()", "docstring": "Constructor.\n\nArgs:\nwatch_paths: A list of filesystem paths to watch for changes.\non_changed: Callback to call when one or more changes to the watch path are detected.\ninterval: The minimum interval at which to notify about changes (in seconds).\nrecursive: Should the watch path be monitored recursively for changes?", "source": "juraj-google-style"} {"code": "def hardware_status(self):\n stat = structs.JLinkHardwareStatus()\n res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))\n if (res == 1):\n raise errors.JLinkException('Error in reading hardware status.')\n return stat", "docstring": "Retrieves and returns the hardware status.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA ``JLinkHardwareStatus`` describing the J-Link hardware.", "source": "codesearchnet"} {"code": "def set_acl(self, role, users):\n acl_updates = [{'user': user, 'role': role} for user in users]\n r = fapi.update_repository_method_acl(self.namespace, self.name, self.snapshot_id, acl_updates, self.api_url)\n fapi._check_response_code(r, 200)", "docstring": "Set permissions for this method.\n\nArgs:\nrole (str): Access level\none of {one of \"OWNER\", \"READER\", \"WRITER\", \"NO ACCESS\"}\nusers (list(str)): List of users to give role to", "source": "codesearchnet"} {"code": "def _entry_allocated_bitmap(self, entry_number):\n \n index, offset = divmod(entry_number, 8)\n return bool(self._bitmap[index] & (1 << offset))", "docstring": "Checks if a particular index is allocated.\n\nArgs:\nentry_number (int): Index to verify\n\nReturns:\nbool: True if it is allocated, False otherwise.", "source": "juraj-google-style"} {"code": "def expand_dims(a, axis):\n \n if hasattr(a, 'expand_dims') and hasattr(type(a), '__array_interface__'):\n return a.expand_dims(axis)\n else:\n return np.expand_dims(a, axis)", "docstring": "Insert a new axis, corresponding to a given position in the array shape\n\nArgs:\na (array_like): Input array.\naxis (int): Position (amongst axes) where new axis is to be inserted.", "source": "juraj-google-style"} {"code": "def fill(self, background_shape, img):\n background_shape = tuple(background_shape)\n return self._fill(background_shape, img)", "docstring": "Return a proper background image of background_shape, given img.\n\nArgs:\nbackground_shape (tuple): a shape (h, w)\nimg: an image\nReturns:\na background image", "source": "codesearchnet"} {"code": "def get_display_name(self, room=None):\n if room:\n try:\n return room.members_displaynames[self.user_id]\n except KeyError:\n return self.user_id\n if (not self.displayname):\n self.displayname = self.api.get_display_name(self.user_id)\n return (self.displayname or self.user_id)", "docstring": "Get this user's display name.\n\nArgs:\nroom (Room): Optional. When specified, return the display name of the user\nin this room.\n\nReturns:\nThe display name. Defaults to the user ID if not set.", "source": "codesearchnet"} {"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"} {"code": "def join(self):\n self._cluster.join()", "docstring": "Blocks until all the scheduled functions have finished execution.\n\nIf any previously scheduled function raises an error, `join` will fail by\nraising any one of those errors, and clear the errors collected so far. If\nthis happens, some of the previously scheduled functions may have not been\nexecuted. Users can call `fetch` on the returned\n`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have\nexecuted, failed, or cancelled. If some that have been cancelled need to be\nrescheduled, users should call `schedule` with the function again.\n\nWhen `join` returns or raises, it guarantees that there is no function that\nis still being executed.\n\nRaises:\nException: one of the exceptions caught by the coordinator by any\npreviously scheduled function since the last time an error was thrown or\nsince the beginning of the program.", "source": "github-repos"} {"code": "def set_tuple_shapes(self, tuple_shapes):\n if len(tuple_shapes) != self.number_of_tuple_elements:\n raise ValueError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of length {self.number_of_tuple_elements}')\n try:\n tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]\n except (ValueError, TypeError) as e:\n raise TypeError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of elements each convertible to TensorShape: got error {str(e)}') from e\n if self._frozen:\n for frozen, updated in zip(self._tuple_shapes, tuple_shapes):\n if frozen != updated:\n raise ValueError(f'Trying to update InfeedQueue with frozen configuration with an incompatible shape. Frozen shapes are {str(self._tuple_shapes)}, updated shapes are {str(tuple_shapes)}')\n else:\n self._tuple_shapes = tuple_shapes\n self._validate()", "docstring": "Sets the shape of each element of the queue.\n\ntuple_shapes must be a list of length\nself.number_of_tuple_elements, and each element must be\nconvertible to a TensorShape.\n\nArgs:\ntuple_shapes: the shapes of each queue element.\n\nRaises:\nValueError: if tuple_shapes is not of length\nself.number_of_tuple_elements.\nTypeError: if an element of tuple_shapes cannot be converted to\na TensorShape.", "source": "github-repos"} {"code": "def selection_error_control(self, form_info):\n \n keys, names = self.return_selected_form_items(form_info['ChannelList'])\n chosen_channels_number = len(keys)\n\n if form_info['new_channel'] and chosen_channels_number < 2:\n return False, _(\n u\"You should choose at least two channel to merge operation at a new channel.\")\n elif form_info['existing_channel'] and chosen_channels_number == 0:\n return False, _(\n u\"You should choose at least one channel to merge operation with existing channel.\")\n elif form_info['find_chosen_channel'] and chosen_channels_number != 1:\n return False, _(u\"You should choose one channel for split operation.\")\n\n return True, None", "docstring": "It controls the selection from the form according\nto the operations, and returns an error message\nif it does not comply with the rules.\n\nArgs:\nform_info: Channel or subscriber form from the user\n\nReturns: True or False\nerror message", "source": "juraj-google-style"} {"code": "def unparse_range(obj):\n if isinstance(obj, (int, long)):\n return str(obj)\n if isinstance(obj, tuple):\n arg = (str(obj[0]) + '-')\n if (len(obj) > 1):\n arg += str(obj[1])\n return arg\n raise ValueError('Must be an integer or tuple')", "docstring": "Unparse a range argument.\n\nArgs:\nobj: An article range. There are a number of valid formats; an integer\nspecifying a single article or a tuple specifying an article range.\nIf the range doesn't give a start article then all articles up to\nthe specified last article are included. If the range doesn't\nspecify a last article then all articles from the first specified\narticle up to the current last article for the group are included.\n\nReturns:\nThe range as a string that can be used by an NNTP command.\n\nNote: Sample valid formats.\n4678\n(,5234)\n(4245,)\n(4245, 5234)", "source": "codesearchnet"} {"code": "def _scalar_operations(self, axis, scalar, func):\n if isinstance(scalar, (list, np.ndarray, pandas.Series)):\n new_index = (self.index if (axis == 0) else self.columns)\n\n def list_like_op(df):\n if (axis == 0):\n df.index = new_index\n else:\n df.columns = new_index\n return func(df)\n new_data = self._map_across_full_axis(axis, self._prepare_method(list_like_op))\n return self.__constructor__(new_data, self.index, self.columns)\n else:\n return self._map_partitions(self._prepare_method(func))", "docstring": "Handler for mapping scalar operations across a Manager.\n\nArgs:\naxis: The axis index object to execute the function on.\nscalar: The scalar value to map.\nfunc: The function to use on the Manager with the scalar.\n\nReturns:\nA new QueryCompiler with updated data and new index.", "source": "codesearchnet"} {"code": "def get_examples_per_second_hook(every_n_steps=100, batch_size=128, warm_steps=5, **kwargs):\n return hooks.ExamplesPerSecondHook(every_n_steps=every_n_steps, batch_size=batch_size, warm_steps=warm_steps)", "docstring": "Function to get ExamplesPerSecondHook.\n\nArgs:\nevery_n_steps: `int`, print current and average examples per second every\nN steps.\nbatch_size: `int`, total batch size used to calculate examples/second from\nglobal time.\nwarm_steps: skip this number of steps before logging and running average.\n**kwargs: a dictionary of arguments to ExamplesPerSecondHook.\n\nReturns:\nReturns a ProfilerHook that writes out timelines that can be loaded into\nprofiling tools like chrome://tracing.", "source": "codesearchnet"} {"code": "def add_test_class(self, clazz, config=None, tests=None, name_suffix=None):\n if self._test_selector:\n cls_name = clazz.__name__\n if (cls_name, name_suffix) in self._test_selector:\n tests = self._test_selector[cls_name, name_suffix]\n elif cls_name in self._test_selector:\n tests = self._test_selector[cls_name]\n else:\n logging.info('Skipping test class %s due to CLI argument `tests`.', cls_name)\n return\n if not config:\n config = self._config\n self._runner.add_test_class(config, clazz, tests, name_suffix)", "docstring": "Adds a test class to the suite.\n\nArgs:\nclazz: class, a Mobly test class.\nconfig: config_parser.TestRunConfig, the config to run the class with. If\nnot specified, the default config passed from google3 infra is used.\ntests: list of strings, names of the tests to run in this test class, in\nthe execution order. Or a string with prefix `re:` for full regex match\nof test cases; all matched test cases will be executed; an error is\nraised if no match is found.\nIf not specified, all tests in the class are executed.\nCLI argument `tests` takes precedence over this argument.\nname_suffix: string, suffix to append to the class name for reporting.\nThis is used for differentiating the same class executed with different\nparameters in a suite.", "source": "github-repos"} {"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(ArchiveResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n local_stream = utils.BytearrayStream(input_stream.read(self.length))\n if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n self.is_oversized(local_stream)", "docstring": "Read the data encoding the Archive response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"} {"code": "def italic(self, action):\n \n if action =='on':\n action = '4'\n elif action=='off':\n action = '5'\n else:\n raise RuntimeError('Invalid action for function italic. Options are on and off')\n self.send(chr(27)+action)", "docstring": "Enable/cancel italic printing\n\nArgs:\naction: Enable or disable italic printing. Options are 'on' and 'off'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"} {"code": "def __init__(\n self,\n expr,\n grouping_column_names,\n column_names,\n grouping_column_types,\n column_types):\n \n self.expr = expr\n self.grouping_column_name = grouping_column_names\n self.column_names = column_names\n self.grouping_column_types = grouping_column_types\n self.column_types = column_types\n\n if isinstance(self.column_types, list):\n if len(self.column_types) == 1:\n column_types = self.column_types[0]\n else:\n column_types = WeldStruct(self.column_types)\n\n if len(self.grouping_column_types) == 1:\n grouping_column_types = self.grouping_column_types[0]\n else:\n grouping_column_types = WeldStruct(self.grouping_column_types)\n self.weld_type = WeldStruct([grouping_column_types, column_types])", "docstring": "Summary\n\nArgs:\nexpr (TYPE): Description\ngrouping_column_name (TYPE): Description\ncolumn_names (TYPE): Description\ngrouping_column_type (TYPE): Description\ncolumn_types (TYPE): Description", "source": "juraj-google-style"} {"code": "def __init__(self, tcex, main_type, api_type, sub_type, api_entity, owner):\n \n self._tcex = tcex\n self._data = {}\n\n self._owner = owner\n self._type = main_type\n self._api_sub_type = sub_type\n self._api_type = api_type\n self._unique_id = None\n self._api_entity = api_entity\n\n self._utils = TcExUtils()\n self._tc_requests = TiTcRequest(self._tcex)", "docstring": "Initialize Class Properties.\n\nArgs:\ntcex:\nmain_type:\napi_type:\nsub_type:\napi_entity:", "source": "juraj-google-style"} {"code": "def get(self, key):\n match = self._get_match(key=key)\n if (not match):\n return None\n return self._get_value_from_match(key=key, match=match)", "docstring": "Gets the value of the property of the given key.\n\nArgs:\nkey (str): Key of the property to look-up.", "source": "codesearchnet"} {"code": "def print_projects(projects=None):\n grouped_by = {}\n if (not projects):\n print(\"Your selection didn't include any projects for this experiment.\")\n return\n for name in projects:\n prj = projects[name]\n if (prj.GROUP not in grouped_by):\n grouped_by[prj.GROUP] = []\n grouped_by[prj.GROUP].append('{name}/{group}'.format(name=prj.NAME, group=prj.GROUP))\n for name in grouped_by:\n print('group: {0}'.format(name))\n group_projects = sorted(grouped_by[name])\n for prj in group_projects:\n prj_cls = projects[prj]\n version_str = None\n if hasattr(prj_cls, 'versions'):\n version_str = ', '.join(prj_cls.versions())\n project_id = '{0}/{1}'.format(prj_cls.NAME, prj_cls.GROUP)\n project_str = ' name: {id:<32} version: {version:<24} source: {src}'.format(id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE))\n print(project_str)\n if prj_cls.__doc__:\n docstr = prj_cls.__doc__.strip('\\n ')\n print(' description: {desc}'.format(desc=docstr))\n if version_str:\n print(' versions: {versions}'.format(versions=version_str))\n print()", "docstring": "Print a list of projects registered for that experiment.\n\nArgs:\nexp: The experiment to print all projects for.", "source": "codesearchnet"} {"code": "def _CalculateElementsDataSize(self, context):\n \n elements_data_size = None\n\n if self._HasElementsDataSize():\n elements_data_size = self._EvaluateElementsDataSize(context)\n\n elif self._HasNumberOfElements():\n element_byte_size = self._element_data_type_definition.GetByteSize()\n if element_byte_size is not None:\n number_of_elements = self._EvaluateNumberOfElements(context)\n elements_data_size = number_of_elements * element_byte_size\n\n return elements_data_size", "docstring": "Calculates the elements data size.\n\nArgs:\ncontext (Optional[DataTypeMapContext]): data type map context, used to\ndetermine the size hint.\n\nReturns:\nint: the elements data size or None if not available.", "source": "juraj-google-style"} {"code": "def normalize_input(data: str) -> typing.Tuple[str, typing.Set[int]]:\n chunks = data.replace('\\n', utils.SEP).strip().split(utils.SEP)\n chunk_lengths = [len(chunk) for chunk in chunks]\n sep_indices = set(itertools.accumulate(chunk_lengths, lambda x, y: x + y))\n sentence = ''.join(chunks)\n return (sentence, sep_indices)", "docstring": "Normalizes the input to one line with separators.\n\nArgs:\ndata(str): Source input\n\nReturns:\ntyping.Tuple[str, typing.Set[int]]: A tuple of the sentence and the\nseparator indices.", "source": "github-repos"} {"code": "def delete(self, addon_id, data={}, **kwargs):\n return super(Addon, self).delete(addon_id, data, **kwargs)", "docstring": "Delete addon for given id\n\nArgs:\naddon_id : Id for which addon object has to be deleted", "source": "codesearchnet"} {"code": "def kill_all_processes(self, check_alive=True, allow_graceful=False):\n \n \n \n \n \n \n if ray_constants.PROCESS_TYPE_RAYLET in self.all_processes:\n self._kill_process_type(\n ray_constants.PROCESS_TYPE_RAYLET,\n check_alive=check_alive,\n allow_graceful=allow_graceful)\n\n \n \n for process_type in list(self.all_processes.keys()):\n self._kill_process_type(\n process_type,\n check_alive=check_alive,\n allow_graceful=allow_graceful)", "docstring": "Kill all of the processes.\n\nNote that This is slower than necessary because it calls kill, wait,\nkill, wait, ... instead of kill, kill, ..., wait, wait, ...\n\nArgs:\ncheck_alive (bool): Raise an exception if any of the processes were\nalready dead.", "source": "juraj-google-style"} {"code": "def sample(self, nmr_samples, burnin=0, thinning=1):\n if ((not thinning) or (thinning < 1)):\n thinning = 1\n if ((not burnin) or (burnin < 0)):\n burnin = 0\n max_samples_per_batch = max((1000 \n with self._logging(nmr_samples, burnin, thinning):\n if (burnin > 0):\n for (batch_start, batch_end) in split_in_batches(burnin, max_samples_per_batch):\n self._sample((batch_end - batch_start), return_output=False)\n if (nmr_samples > 0):\n outputs = []\n for (batch_start, batch_end) in split_in_batches(nmr_samples, max_samples_per_batch):\n outputs.append(self._sample((batch_end - batch_start), thinning=thinning))\n return SimpleSampleOutput(*[np.concatenate([o[ind] for o in outputs], axis=(- 1)) for ind in range(3)])", "docstring": "Take additional samples from the given likelihood and prior, using this sampler.\n\nThis method can be called multiple times in which the sample state is stored in between.\n\nArgs:\nnmr_samples (int): the number of samples to return\nburnin (int): the number of samples to discard before returning samples\nthinning (int): how many sample we wait before storing a new one. This will draw extra samples such that\nthe total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples\nstored is ``nmr_samples``. If set to one or lower we store every sample after the burn in.\n\nReturns:\nSamplingOutput: the sample output object", "source": "codesearchnet"} {"code": "def _select_helper(args, kwargs):\n if (len(args) > 1):\n raise TypeError('select accepts at most ONE positional argument.')\n if ((len(args) > 0) and (len(kwargs) > 0)):\n raise TypeError('select accepts EITHER a positional argument, OR keyword arguments (not both).')\n if ((len(args) == 0) and (len(kwargs) == 0)):\n raise TypeError('select requires EITHER a positional argument, OR keyword arguments.')\n if args:\n arg = args[0]\n if isinstance(arg, dict):\n selector = arg\n elif isinstance(arg, string_types):\n selector = dict(name=arg)\n elif (isinstance(arg, type) and issubclass(arg, Model)):\n selector = {'type': arg}\n else:\n raise TypeError('selector must be a dictionary, string or plot object.')\n elif ('selector' in kwargs):\n if (len(kwargs) == 1):\n selector = kwargs['selector']\n else:\n raise TypeError(\"when passing 'selector' keyword arg, not other keyword args may be present\")\n else:\n selector = kwargs\n return selector", "docstring": "Allow flexible selector syntax.\n\nReturns:\ndict", "source": "codesearchnet"} {"code": "def find_sorted_task_dependencies(task, task_name, task_id):\n log.info('find_sorted_task_dependencies {} {}'.format(task_name, task_id))\n cot_input_dependencies = [_craft_dependency_tuple(task_name, task_type, task_id) for (task_type, task_id) in task['extra'].get('chainOfTrust', {}).get('inputs', {}).items()]\n upstream_artifacts_dependencies = [_craft_dependency_tuple(task_name, artifact_dict['taskType'], artifact_dict['taskId']) for artifact_dict in task.get('payload', {}).get('upstreamArtifacts', [])]\n dependencies = [*cot_input_dependencies, *upstream_artifacts_dependencies]\n dependencies = _sort_dependencies_by_name_then_task_id(dependencies)\n parent_task_id = (get_parent_task_id(task) or get_decision_task_id(task))\n parent_task_type = 'parent'\n parent_tuple = _craft_dependency_tuple(task_name, parent_task_type, parent_task_id)\n dependencies.insert(0, parent_tuple)\n log.info('found dependencies: {}'.format(dependencies))\n return dependencies", "docstring": "Find the taskIds of the chain of trust dependencies of a given task.\n\nArgs:\ntask (dict): the task definition to inspect.\ntask_name (str): the name of the task, for logging and naming children.\ntask_id (str): the taskId of the task.\n\nReturns:\nlist: tuples associating dependent task ``name`` to dependent task ``taskId``.", "source": "codesearchnet"} {"code": "def rabi_oscillations(sampler: sim.Sampler, qubit: devices.GridQubit, max_angle: float=(2 * np.pi), *, repetitions: int=1000, num_points: int=200) -> RabiResult:\n theta = sympy.Symbol('theta')\n circuit = circuits.Circuit.from_ops((ops.X(qubit) ** theta))\n circuit.append(ops.measure(qubit, key='z'))\n sweep = study.Linspace(key='theta', start=0.0, stop=(max_angle / np.pi), length=num_points)\n results = sampler.run_sweep(circuit, params=sweep, repetitions=repetitions)\n angles = np.linspace(0.0, max_angle, num_points)\n excited_state_probs = np.zeros(num_points)\n for i in range(num_points):\n excited_state_probs[i] = np.mean(results[i].measurements['z'])\n return RabiResult(angles, excited_state_probs)", "docstring": "Runs a Rabi oscillation experiment.\n\nRotates a qubit around the x-axis of the Bloch sphere by a sequence of Rabi\nangles evenly spaced between 0 and max_angle. For each rotation, repeat\nthe circuit a number of times and measure the average probability of the\nqubit being in the |1> state.\n\nArgs:\nsampler: The quantum engine or simulator to run the circuits.\nqubit: The qubit under test.\nmax_angle: The final Rabi angle in radians.\nrepetitions: The number of repetitions of the circuit for each Rabi\nangle.\nnum_points: The number of Rabi angles.\n\nReturns:\nA RabiResult object that stores and plots the result.", "source": "codesearchnet"} {"code": "def method_exists(cls, method):\n methods = cls.API_METHODS\n for key in method.split('.'):\n methods = methods.get(key)\n if (methods is None):\n break\n if isinstance(methods, str):\n logger.debug('%r: %r', method, methods)\n return True\n return False", "docstring": "Whether a given method exists in the known API.\n\nArguments:\nmethod (:py:class:`str`): The name of the method.\n\nReturns:\n:py:class:`bool`: Whether the method is in the known API.", "source": "codesearchnet"} {"code": "def _definition_from_example(example):\n \n assert isinstance(example, dict)\n\n def _has_simple_type(value):\n accepted = (str, int, float, bool)\n return isinstance(value, accepted)\n\n definition = {\n 'type': 'object',\n 'properties': {},\n }\n for key, value in example.items():\n if not _has_simple_type(value):\n raise Exception(\"Not implemented yet\")\n ret_value = None\n if isinstance(value, str):\n ret_value = {'type': 'string'}\n elif isinstance(value, int):\n ret_value = {'type': 'integer', 'format': 'int64'}\n elif isinstance(value, float):\n ret_value = {'type': 'number', 'format': 'double'}\n elif isinstance(value, bool):\n ret_value = {'type': 'boolean'}\n else:\n raise Exception(\"Not implemented yet\")\n definition['properties'][key] = ret_value\n\n return definition", "docstring": "Generates a swagger definition json from a given example\nWorks only for simple types in the dict\n\nArgs:\nexample: The example for which we want a definition\nType is DICT\n\nReturns:\nA dict that is the swagger definition json", "source": "juraj-google-style"} {"code": "def get_integrated_diff(self, ind, radius, nbins=1):\n if (not self.is_spin_polarized):\n radii = [((radius / nbins) * (i + 1)) for i in range(nbins)]\n data = np.zeros((nbins, 2))\n data[(:, 0)] = radii\n return data\n struct = self.structure\n a = self.dim\n if ((ind not in self._distance_matrix) or (self._distance_matrix[ind]['max_radius'] < radius)):\n coords = []\n for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):\n coords.append([(x / a[0]), (y / a[1]), (z / a[2])])\n sites_dist = struct.lattice.get_points_in_sphere(coords, struct[ind].coords, radius)\n self._distance_matrix[ind] = {'max_radius': radius, 'data': np.array(sites_dist)}\n data = self._distance_matrix[ind]['data']\n inds = (data[(:, 1)] <= radius)\n dists = data[(inds, 1)]\n data_inds = np.rint((np.mod(list(data[(inds, 0)]), 1) * np.tile(a, (len(dists), 1)))).astype(int)\n vals = [self.data['diff'][(x, y, z)] for (x, y, z) in data_inds]\n (hist, edges) = np.histogram(dists, bins=nbins, range=[0, radius], weights=vals)\n data = np.zeros((nbins, 2))\n data[(:, 0)] = edges[1:]\n data[(:, 1)] = [(sum(hist[0:(i + 1)]) / self.ngridpts) for i in range(nbins)]\n return data", "docstring": "Get integrated difference of atom index ind up to radius. This can be\nan extremely computationally intensive process, depending on how many\ngrid points are in the VolumetricData.\n\nArgs:\nind (int): Index of atom.\nradius (float): Radius of integration.\nnbins (int): Number of bins. Defaults to 1. This allows one to\nobtain the charge integration up to a list of the cumulative\ncharge integration values for radii for [radius/nbins,\n2 * radius/nbins, ....].\n\nReturns:\nDifferential integrated charge as a np array of [[radius, value],\n...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],\ndata[:,1])", "source": "codesearchnet"} {"code": "def get_cpu_isa_version():\n key = 'cpu_isa'\n out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n if err and FLAGS.debug:\n print('Error in detecting supported ISA:\\n %s' % str(err))\n ret_val = out\n required_isa = ['avx', 'avx2', 'avx512f', 'sse4', 'sse4_1']\n found = []\n missing = []\n for isa in required_isa:\n for sys_isa in ret_val.split(b' '):\n if isa == sys_isa:\n if isa not in found:\n found.append(isa)\n missing = list(set(required_isa) - set(found))\n return (found, missing)", "docstring": "Retrieves all Instruction Set Architecture(ISA) available.\n\nRequired ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1'\n\nReturns:\nTuple\n(list of available ISA, list of missing ISA)", "source": "github-repos"} {"code": "def __init__(self, action):\n _check_type(action, str)\n self.action = action", "docstring": "Constructor.\n\nArgs:\naction: (`OnSessionInitAction`) Debugger action to take on session init.", "source": "github-repos"} {"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:\n name = name or self._name + '_price'\n with tf.name_scope(name):\n discount_curve = get_discount_curve(self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(self._coupon_end_dates)\n _, cashflows = self.cashflows(market, past_fixing=self._past_fixing)\n cashflow_pvs = cashflows * discount_factors\n return tf.math.reduce_sum(cashflow_pvs, axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape` containing the modeled price of each\nstream based on the input market data.", "source": "github-repos"} {"code": "def volumes(self):\n if (not self.__volumes):\n self.__volumes = Volumes(self.__connection)\n return self.__volumes", "docstring": "Gets the Volumes API client.\n\nReturns:\nVolumes:", "source": "codesearchnet"} {"code": "def parse_config_for_selected_keys(content, keys):\n config_items = {key: None for key in keys}\n if (not content):\n return (config_items, content)\n stripped = content.strip()\n if (len(stripped) == 0):\n return ({}, None)\n elif (stripped[0] == '{'):\n config = json.loads(content)\n else:\n config = yaml.load(content)\n if (not isinstance(config, dict)):\n raise ValueError('Invalid config.')\n for key in keys:\n config_items[key] = config.pop(key, None)\n if (not config):\n return (config_items, None)\n if (stripped[0] == '{'):\n content_out = json.dumps(config, indent=4)\n else:\n content_out = yaml.dump(config, default_flow_style=False)\n return (config_items, content_out)", "docstring": "Parse a config from a magic cell body for selected config keys.\n\nFor example, if 'content' is:\nconfig_item1: value1\nconfig_item2: value2\nconfig_item3: value3\nand 'keys' are: [config_item1, config_item3]\n\nThe results will be a tuple of\n1. The parsed config items (dict): {config_item1: value1, config_item3: value3}\n2. The remaining content (string): config_item2: value2\n\nArgs:\ncontent: the input content. A string. It has to be a yaml or JSON string.\nkeys: a list of keys to retrieve from content. Note that it only checks top level keys\nin the dict.\n\nReturns:\nA tuple. First is the parsed config including only selected keys. Second is\nthe remaining content.\n\nRaises:\nException if the content is not a valid yaml or JSON string.", "source": "codesearchnet"} {"code": "def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):\n is_successor = self._is_predecessor_of_possible_successor(txn_id, possible_successor)\n in_different_batch = (not self._is_in_same_batch(txn_id, possible_successor))\n has_not_been_seen = (possible_successor not in already_seen)\n return (is_successor and in_different_batch and has_not_been_seen)", "docstring": "Decide if possible_successor should be replayed.\n\nArgs:\ntxn_id (str): Id of txn in failed batch.\npossible_successor (str): Id of txn to possibly replay.\nalready_seen (list): A list of possible_successors that have\nbeen replayed.\n\nReturns:\n(bool): If the possible_successor should be replayed.", "source": "codesearchnet"} {"code": "def __init__(self, resolver, mets_url, src_dir=None, skip=None, download=False, page_strictness='strict'):\n \n self.report = ValidationReport()\n self.skip = skip if skip else []\n log.debug('resolver=%s mets_url=%s src_dir=%s', resolver, mets_url, src_dir)\n self.resolver = resolver\n self.mets_url = mets_url\n self.download = download\n self.page_strictness = page_strictness\n\n self.src_dir = src_dir\n if mets_url is None and src_dir is not None:\n mets_url = '%s/mets.xml' % src_dir\n self.workspace = None\n self.mets = None", "docstring": "Construct a new WorkspaceValidator.\n\nArgs:\nresolver (Resolver):\nmets_url (string):\nsrc_dir (string):\nskip (list):\ndownload (boolean):\npage_strictness (\"strict\"|\"lax\"|\"fix\"|\"off\"):", "source": "juraj-google-style"} {"code": "def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n for (beta, uni) in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n diacritics = beta[1:]\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = (beta[0] + ''.join(perm))\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n return t", "docstring": "Create the trie for betacode conversion.\n\nArgs:\ntext: The beta code text to convert. All of this text must be betacode.\nstrict: Flag to allow for flexible diacritic order on input.\n\nReturns:\nThe trie for conversion.", "source": "codesearchnet"} {"code": "def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):\n \n super(TupleTypeChecker, self).__init__(\n iter_type=tuple, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty\n )", "docstring": "Initialization method.\n\nArgs:\nitem_type (type): the type of the items inside the tuple.\nmin_length (int): minimum length of the tuple (included).\nmax_length (int): maximum length of the tuple (included).\nempty (bool): whether empty tuple is allowed.", "source": "juraj-google-style"} {"code": "def _get_attributes(self):\n return map((lambda i, c: (i[1], c[1])), self._get_instance_attributes(), self.get_class_attributes())", "docstring": "Return a generator for instance and class attribute.\n\n.. code-block:: python3\n\nfor instance_attribute, class_attribute in self._get_attributes():\nprint(\"Instance Attribute: {}\".format(instance_attribute))\nprint(\"Class Attribute: {}\".format(class_attribute))\n\nReturns:\ngenerator: Tuples with instance attribute and class attribute", "source": "codesearchnet"} {"code": "def _get_var_info(var, prev_tensor_name=None):\n if checkpoint_utils._is_variable(var):\n current_var_name = _infer_var_name([var])\n elif isinstance(var, list) and all((checkpoint_utils._is_variable(v) for v in var)):\n current_var_name = _infer_var_name(var)\n elif isinstance(var, variables_lib.PartitionedVariable):\n current_var_name = _infer_var_name([var])\n var = var._get_variable_list()\n else:\n raise TypeError('var MUST be one of the following: a Variable, list of Variable or PartitionedVariable, but is {}'.format(type(var)))\n if not prev_tensor_name:\n prev_tensor_name = current_var_name\n return (prev_tensor_name, var)", "docstring": "Helper method for standarizing Variable and naming.\n\nArgs:\nvar: Current graph's variable that needs to be warm-started (initialized).\nCan be either of the following: (i) `Variable` (ii) `ResourceVariable`\n(iii) list of `Variable`: The list must contain slices of the same larger\nvariable. (iv) `PartitionedVariable`\nprev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\nNone, we lookup tensor with same name as given `var`.\n\nReturns:\nA tuple of the Tensor name and var.", "source": "github-repos"} {"code": "class ConvNextStage(nn.Module):\n\n def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):\n super().__init__()\n if in_channels != out_channels or stride > 1:\n self.downsampling_layer = nn.Sequential(ConvNextLayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))\n else:\n self.downsampling_layer = nn.Identity()\n drop_path_rates = drop_path_rates or [0.0] * depth\n self.layers = nn.Sequential(*[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)])\n\n def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:\n hidden_states = self.downsampling_layer(hidden_states)\n hidden_states = self.layers(hidden_states)\n return hidden_states", "docstring": "ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.\n\nArgs:\nconfig ([`ConvNextConfig`]): Model configuration class.\nin_channels (`int`): Number of input channels.\nout_channels (`int`): Number of output channels.\ndepth (`int`): Number of residual blocks.\ndrop_path_rates(`List[float]`): Stochastic depth rates for each layer.", "source": "github-repos"} {"code": "def allclose_up_to_global_phase(\n a: np.ndarray,\n b: np.ndarray,\n *,\n rtol: float = 1.e-5,\n atol: float = 1.e-8,\n equal_nan: bool = False\n) -> bool:\n \n\n a, b = transformations.match_global_phase(a, b)\n\n \n return np.allclose(a=a, b=b, rtol=rtol, atol=atol, equal_nan=equal_nan)", "docstring": "Determines if a ~= b * exp(i t) for some t.\n\nArgs:\na: A numpy array.\nb: Another numpy array.\nrtol: Relative error tolerance.\natol: Absolute error tolerance.\nequal_nan: Whether or not NaN entries should be considered equal to\nother NaN entries.", "source": "juraj-google-style"} {"code": "def do_IDENT(self, service_name: str, source: list, *args, **kwargs) -> None:\n \n self.logger.info(' IDENT %s as %s', service_name, source)\n self.messaging._address_map[service_name] = source", "docstring": "Perform identification of a service to a binary representation.\n\nArgs:\nservice_name: human readable name for service\nsource: zmq representation for the socket source", "source": "juraj-google-style"} {"code": "def add_mount_point(self, path, total_size=None):\n path = self.absnormpath(path)\n if (path in self.mount_points):\n self.raise_os_error(errno.EEXIST, path)\n self._last_dev += 1\n self.mount_points[path] = {'idev': self._last_dev, 'total_size': total_size, 'used_size': 0}\n root_dir = (self.root if (path == self.root.name) else self.create_dir(path))\n root_dir.st_dev = self._last_dev\n return self.mount_points[path]", "docstring": "Add a new mount point for a filesystem device.\nThe mount point gets a new unique device number.\n\nArgs:\npath: The root path for the new mount path.\n\ntotal_size: The new total size of the added filesystem device\nin bytes. Defaults to infinite size.\n\nReturns:\nThe newly created mount point dict.\n\nRaises:\nOSError: if trying to mount an existing mount point again.", "source": "codesearchnet"} {"code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n return cpu_embedding_lookup(features, weights, self.embedding_tables, self._feature_config)", "docstring": "Apply standard lookup ops on CPU.\n\nArgs:\nfeatures: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n`tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\nwill be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\nor `tf.RaggedTensor` is supported per call.\nweights: If not `None`, a nested structure of `tf.Tensor`s,\n`tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\nthat the tensors should be of float type (and they will be downcast to\n`tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\nsame for the parallel entries from `features` and similarly for\n`tf.RaggedTensor`s we assume the row_splits are the same.\n\nReturns:\nA nested structure of Tensors with the same structure as input features.", "source": "github-repos"} {"code": "def get_event_from_name(self, event_name):\n \n return next((e for e in self.events if e.name == event_name), None)", "docstring": "Return an event from a name\nArgs:\nevent_name (str): name of the event\nReturns:\nEvent", "source": "juraj-google-style"} {"code": "def _default_tolerance(dtype):\n if dtype == np.float16:\n return 0.005\n elif dtype in (np.float32, np.complex64):\n return 0.001\n elif dtype in (np.float64, np.complex128):\n return 1e-05\n else:\n return None", "docstring": "Returns a sensible default tolerance for comparing results of a given type.\n\nArgs:\ndtype: A datatype.", "source": "github-repos"} {"code": "def set_property(self, name, value, update_session=True):\n \n if type(value) == datetime:\n value = value.isoformat()\n else:\n value = value\n\n try:\n prop = self.get_property(name)\n if prop.value == value:\n return False\n\n prop.value = value\n\n except AttributeError:\n prop = ResourceProperty()\n prop.resource_id = self.id\n prop.name = name\n prop.value = value\n\n if update_session:\n db.session.add(prop)\n\n return True", "docstring": "Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if\nthere were no changes to the value of the property.\n\nArgs:\nname (str): Name of the property to create or update\nvalue (any): Value of the property. This can be any type of JSON serializable data\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n`bool`", "source": "juraj-google-style"} {"code": "def in_coord_list(coord_list, coord, atol=1e-08):\n return (len(find_in_coord_list(coord_list, coord, atol=atol)) > 0)", "docstring": "Tests if a particular coord is within a coord_list.\n\nArgs:\ncoord_list: List of coords to test\ncoord: Specific coordinates\natol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and\narray.\n\nReturns:\nTrue if coord is in the coord list.", "source": "codesearchnet"} {"code": "def load_flag_values(self, flags=None):\n if (flags is None):\n flags = self._flags\n for keyval in flags.config_value:\n (k, v) = keyval.split('=', 1)\n v = (self._modules['yaml'].load(v) if isinstance(v, str) else v)\n k = (k.decode() if isinstance(k, bytes) else k)\n v = (v.decode() if isinstance(v, bytes) else v)\n self._flag_values.setdefault(k, v)", "docstring": "Load flag values given from command line flags.\n\nArgs:\nflags: An argparse Namespace containing the command line flags.", "source": "codesearchnet"} {"code": "def intersect_curves(nodes1, nodes2):\n nodes1 = _curve_helpers.full_reduce(nodes1)\n nodes2 = _curve_helpers.full_reduce(nodes2)\n (_, num_nodes1) = nodes1.shape\n (_, num_nodes2) = nodes2.shape\n swapped = False\n if (num_nodes1 > num_nodes2):\n (nodes1, nodes2) = (nodes2, nodes1)\n swapped = True\n coeffs = normalize_polynomial(to_power_basis(nodes1, nodes2))\n if np.all((coeffs == 0.0)):\n raise NotImplementedError(_COINCIDENT_ERR)\n _check_non_simple(coeffs)\n t_vals = roots_in_unit_interval(coeffs)\n final_s = []\n final_t = []\n for t_val in t_vals:\n ((x_val,), (y_val,)) = _curve_helpers.evaluate_multi(nodes2, np.asfortranarray([t_val]))\n s_val = locate_point(nodes1, x_val, y_val)\n if (s_val is not None):\n _resolve_and_add(nodes1, s_val, final_s, nodes2, t_val, final_t)\n result = np.zeros((2, len(final_s)), order='F')\n if swapped:\n (final_s, final_t) = (final_t, final_s)\n result[(0, :)] = final_s\n result[(1, :)] = final_t\n return result", "docstring": "r\"\"\"Intersect two parametric B |eacute| zier curves.\n\nArgs:\nnodes1 (numpy.ndarray): The nodes in the first curve.\nnodes2 (numpy.ndarray): The nodes in the second curve.\n\nReturns:\nnumpy.ndarray: ``2 x N`` array of intersection parameters.\nEach row contains a pair of values :math:`s` and :math:`t`\n(each in :math:`\\left[0, 1\\right]`) such that the curves\nintersect: :math:`B_1(s) = B_2(t)`.\n\nRaises:\nNotImplementedError: If the \"intersection polynomial\" is\nall zeros -- which indicates coincident curves.", "source": "codesearchnet"} {"code": "def time_to_jump( self ):\n \n k_tot = rate_prefactor * np.sum( self.p )\n return -( 1.0 / k_tot ) * math.log( random.random() )", "docstring": "The timestep until the next jump.\n\nArgs:\nNone\n\nReturns:\n(Float): The timestep until the next jump.", "source": "juraj-google-style"} {"code": "def find(self, package, **kwargs):\n \n if not exists(package):\n return None\n name, path = None, None\n enforce_init = kwargs.pop('enforce_init', True)\n if isdir(package):\n if isfile(join(package, '__init__.py')) or not enforce_init:\n name, path = basename(package), package\n elif isfile(package) and package.endswith('.py'):\n name, path = splitext(basename(package))[0], package\n if name and path:\n return PackageSpec(name, path)\n return None", "docstring": "Find method.\n\nArgs:\npackage (str): package to find.\n**kwargs (): additional keyword arguments.\n\nReturns:\nPackageSpec: the PackageSpec corresponding to the package, or None.", "source": "juraj-google-style"} {"code": "def server_hardware(self):\n if (not self.__server_hardware):\n self.__server_hardware = ServerHardware(self.__connection)\n return self.__server_hardware", "docstring": "Gets the ServerHardware API client.\n\nReturns:\nServerHardware:", "source": "codesearchnet"} {"code": "def metamodel_from_str(lang_desc, metamodel=None, **kwargs):\n \n\n if not metamodel:\n metamodel = TextXMetaModel(**kwargs)\n\n language_from_str(lang_desc, metamodel)\n\n return metamodel", "docstring": "Creates a new metamodel from the textX description given as a string.\n\nArgs:\nlang_desc(str): A textX language description.\nmetamodel(TextXMetaModel): A metamodel that should be used.\nother params: See TextXMetaModel.", "source": "juraj-google-style"} {"code": "def _handle_deferred_dependencies(self, name, trackable):\n self._maybe_initialize_trackable()\n trackable._maybe_initialize_trackable()\n deferred_dependencies_list = self._deferred_dependencies.pop(name, ())\n for checkpoint_position in sorted(deferred_dependencies_list, key=lambda restore: restore.checkpoint.restore_uid, reverse=True):\n checkpoint_position.restore(trackable)\n for name_based_restore in sorted(self._self_name_based_restores, key=lambda checkpoint: checkpoint.restore_uid, reverse=True):\n trackable._name_based_attribute_restore(name_based_restore)", "docstring": "Pop and load any deferred checkpoint restores into `trackable`.\n\nThis method does not add a new dependency on `trackable`, but it does\ncheck if any outstanding/deferred dependencies have been queued waiting for\nthis dependency to be added (matched based on `name`). If so,\n`trackable` and its dependencies are restored. The restorations are\nconsidered fulfilled and so are deleted.\n\n`_track_trackable` is more appropriate for adding a\nnormal/unconditional dependency, and includes handling for deferred\nrestorations. This method allows objects such as `Optimizer` to use the same\nrestoration logic while managing conditional dependencies themselves, by\noverriding `_checkpoint_dependencies` and `_lookup_dependency` to change the\nobject's dependencies based on the context it is saved/restored in (a single\noptimizer instance can have state associated with multiple graphs).\n\nArgs:\nname: The name of the dependency within this object (`self`), used to\nmatch `trackable` with values saved in a checkpoint.\ntrackable: The Trackable object to restore (inheriting from `Trackable`).", "source": "github-repos"} {"code": "def seq(self, value):\n \n if value == self._defaults['seq'] and 'seq' in self._values:\n del self._values['seq']\n else:\n self._values['seq'] = value", "docstring": "The seq property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def sample_point(input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs) -> torch.Tensor:\n if point_coordinates.dim() == 3:\n add_dim = True\n point_coordinates = point_coordinates.unsqueeze(2)\n point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs)\n if add_dim:\n point_features = point_features.squeeze(3)\n return point_features", "docstring": "A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors.\n\nArgs:\ninput_features (`torch.Tensor` of shape (batch_size, channels, height, width)):\nA tensor that contains features map on a height * width grid\npoint_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,:\n2)):\nA tensor that contains [0, 1] * [0, 1] normalized point coordinates\nadd_dim (`bool`):\nboolean value to keep track of added dimension\n\nReturns:\npoint_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels,\nheight_grid, width_grid):\nA tensor that contains features for points in `point_coordinates`.", "source": "github-repos"} {"code": "def receive_datagram(self, data, address):\n \n\n \n if not self.app:\n logger.debug(\"Packet received\", address, data)\n return False\n\n \n \n try:\n response = self.app.handle_message(data, address)\n except Exception as err:\n logger.error(\"Error processing message from \" + str(address) +\n \":\" + str(data))\n logger.error(traceback.format_exc())\n return False\n\n \n \n if response:\n self.send_datagram(response, address)", "docstring": "Executes when UDP data has been received and sends the packet data\nto our app to process the request.\n\nArgs:\ndata (str): The raw serialized packet data received.\naddress (tuple): The address and port of the origin of the received\npacket. E.g. (address, port).\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def ensure_image_is_hex(input_path):\n family = utilities.get_family('module_settings.json')\n target = family.platform_independent_target()\n build_dir = target.build_dirs()['build']\n if (platform.system() == 'Windows'):\n env = Environment(tools=['mingw'], ENV=os.environ)\n else:\n env = Environment(tools=['default'], ENV=os.environ)\n input_path = str(input_path)\n image_name = os.path.basename(input_path)\n (root, ext) = os.path.splitext(image_name)\n if (len(ext) == 0):\n raise BuildError('Unknown file format or missing file extension in ensure_image_is_hex', file_name=input_path)\n file_format = ext[1:]\n if (file_format == 'hex'):\n return input_path\n if (file_format == 'elf'):\n new_file = os.path.join(build_dir, (root + '.hex'))\n if (new_file not in CONVERTED_HEX_FILES):\n env.Command(new_file, input_path, action=Action('arm-none-eabi-objcopy -O ihex $SOURCE $TARGET', 'Creating intel hex file from: $SOURCE'))\n CONVERTED_HEX_FILES.add(new_file)\n return new_file\n raise BuildError('Unknown file format extension in ensure_image_is_hex', file_name=input_path, extension=file_format)", "docstring": "Return a path to a hex version of a firmware image.\n\nIf the input file is already in hex format then input_path\nis returned and nothing is done. If it is not in hex format\nthen an SCons action is added to convert it to hex and the\ntarget output file path is returned.\n\nA cache is kept so that each file is only converted once.\n\nArgs:\ninput_path (str): A path to a firmware image.\n\nReturns:\nstr: The path to a hex version of input_path, this may\nbe equal to input_path if it is already in hex format.", "source": "codesearchnet"} {"code": "def __init__(self, recipe=None, project=None, user=None, service=None, client=None, filepath=None, key=None, verbose=False, trace_print=False, trace_file=False):\n starthinker_trace_start(trace_print, trace_file)\n self.recipe = recipe or {}\n self.verbose = verbose\n self.filepath = filepath\n if 'setup' not in self.recipe:\n self.recipe['setup'] = {}\n if 'auth' not in self.recipe['setup']:\n self.recipe['setup']['auth'] = {}\n if service:\n self.recipe['setup']['auth']['service'] = service\n if client:\n self.recipe['setup']['auth']['client'] = client\n if user:\n self.recipe['setup']['auth']['user'] = user\n if project:\n self.recipe['setup']['id'] = project\n if key:\n self.recipe['setup']['key'] = key\n self.project = self.recipe['setup'].get('project', self.recipe['setup'].get('id'))\n self.key = self.recipe['setup'].get('key')\n self.timezone = ZoneInfo(self.recipe['setup'].get('timezone', 'America/Los_Angeles'))\n self.now = datetime.now(self.timezone)\n self.date = self.now.date()\n self.hour = self.now.hour\n if self.verbose:\n print('DATE:', self.now.date())\n print('HOUR:', self.now.hour)", "docstring": "Used in StarThinker scripts as programmatic entry point.\n\nArgs:\n* recipe: (dict) JSON object representing the recipe\n* project: (string) See module description.\n* user: (string) See module description.\n* service: (string) See module description.\n* client: (string) See module description.\n* key: (string) See module description.\n* verbose: (boolean) See module description.\n* trace_print: (boolean) True if writing execution trace to stdout.\n* trace_file: (boolean) True if writing execution trace to file.\n* args: (dict) dictionary of arguments (used with argParse).\n\nReturns:\nNothing.", "source": "github-repos"} {"code": "def get_selector(self, name):\n try:\n return self.matcher.by_name[name]\n except (AttributeError, KeyError):\n if (self.base is not None):\n return self.base.get_selector(name)\n else:\n raise KeyError(\"No selector found for style '{}'\".format(name))", "docstring": "Find a selector mapped to a style in this or a base style sheet.\n\nArgs:\nname (str): a style name\n\nReturns:\n:class:`.Selector`: the selector mapped to the style `name`\n\nRaises:\nKeyError: if the style `name` was not found in this or a base\nstyle sheet", "source": "codesearchnet"} {"code": "def read_samples(self, sr=None, offset=0, duration=None):\n read_duration = self.duration\n if ((offset > 0) and (read_duration is not None)):\n read_duration -= offset\n if (duration is not None):\n if (read_duration is None):\n read_duration = duration\n else:\n read_duration = min(duration, read_duration)\n return self.track.read_samples(sr=sr, offset=(self.start + offset), duration=read_duration)", "docstring": "Read the samples of the utterance.\n\nArgs:\nsr (int): If None uses the sampling rate given by the track,\notherwise resamples to the given sampling rate.\noffset (float): Offset in seconds to read samples from.\nduration (float): If not ``None`` read only this\nnumber of seconds in maximum.\n\nReturns:\nnp.ndarray: A numpy array containing the samples\nas a floating point (numpy.float32) time series.", "source": "codesearchnet"} {"code": "def ip_network(address, strict=True):\n \n try:\n return IPv4Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n try:\n return IPv6Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %\n address)", "docstring": "Take an IP string/int and return an object of the correct type.\n\nArgs:\naddress: A string or integer, the IP network. Either IPv4 or\nIPv6 networks may be supplied; integers less than 2**32 will\nbe considered to be IPv4 by default.\n\nReturns:\nAn IPv4Network or IPv6Network object.\n\nRaises:\nValueError: if the string passed isn't either a v4 or a v6\naddress. Or if the network has host bits set.", "source": "juraj-google-style"} {"code": "def __init__(self, field=None):\n \n super().__init__(action_type=ActionType.OFPAT_SET_FIELD)\n self.field = OxmTLV() if field is None else field", "docstring": "Create a ActionSetField with the optional parameters below.\n\nArgs:\nlength (int): length padded to 64 bits, followed by exactly\noxm_len bytes containing a single OXM TLV, then\nexactly ((oxm_len + 4) + 7)/8*8 - (oxm_len + 4)\n(between 0 and 7) bytes of all-zero bytes\nfield (:class:`OxmTLV`): OXM field and value.", "source": "juraj-google-style"} {"code": "def sample(self, n):\n \n total = bq.Query('select count(*) from %s' %\n self._get_source()).execute().result()[0].values()[0]\n if n > total:\n raise ValueError('sample larger than population')\n sampling = bq.Sampling.random(percent=n * 100.0 / float(total))\n if self._query is not None:\n source = self._query\n else:\n source = 'SELECT * FROM `%s`' % self._table\n sample = bq.Query(source).execute(sampling=sampling).result()\n df = sample.to_dataframe()\n return df", "docstring": "Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will\nincur cost.\n\nArgs:\nn: number of sampled counts. Note that the number of counts returned is approximated.\nReturns:\nA dataframe containing sampled data.\nRaises:\nException if n is larger than number of rows.", "source": "juraj-google-style"} {"code": "def UpdateBudget(self, client_customer_id, budget_id, micro_amount,\n delivery_method):\n \n self.client.SetClientCustomerId(client_customer_id)\n operations = [{\n 'operator': 'SET',\n 'operand': {\n 'budgetId': budget_id,\n 'amount': {\n 'microAmount': micro_amount\n },\n 'deliveryMethod': delivery_method\n }\n }]\n self.client.GetService('BudgetService').mutate(operations)", "docstring": "Update a Budget with the given budgetId.\n\nArgs:\nclient_customer_id: str Client Customer Id used to update Budget.\nbudget_id: str Id of the budget to be updated.\nmicro_amount: str New value for the microAmount field.\ndelivery_method: str New value for the deliveryMethod field.", "source": "juraj-google-style"} {"code": "def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None):\n if dtype_hint is None:\n dtype_hint = dtypes.int64\n if isinstance(partition, np.ndarray) and partition.dtype == np.int32 and (dtype is None):\n partition = ops.convert_to_tensor(partition, name=name)\n else:\n partition = tensor_conversion.convert_to_tensor_v2(partition, dtype_hint=dtype_hint, dtype=dtype, name=name)\n if partition.dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('%s must have dtype int32 or int64' % name)\n return partition", "docstring": "Converts `partition` to Tensors.\n\nArgs:\npartition: A row-partitioning tensor for the `RowPartition` being\nconstructed. I.e., one of: row_splits, row_lengths, row_starts,\nrow_limits, value_rowids, uniform_row_length.\nname: The name of the row-partitioning tensor.\ndtype: Optional dtype for the RowPartition. If missing, the type\nis inferred from the type of `uniform_row_length`, dtype_hint,\nor tf.int64.\ndtype_hint: Optional dtype for the RowPartition, used when dtype\nis None. In some cases, a caller may not have a dtype in mind when\nconverting to a tensor, so dtype_hint can be used as a soft preference.\nIf the conversion to `dtype_hint` is not possible, this argument has no\neffect.\n\nReturns:\nA tensor equivalent to partition.\n\nRaises:\nValueError: if dtype is not int32 or int64.", "source": "github-repos"} {"code": "def get_value_by_row_col(self, row, col):\n \n if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n raise ValueError(\"The row or col must be >=0 and less than \"\n \"nRows (%d) or nCols (%d)!\" % (self.nRows, self.nCols))\n else:\n value = self.data[int(round(row))][int(round(col))]\n if value == self.noDataValue:\n return None\n else:\n return value", "docstring": "Get raster value by (row, col).\n\nArgs:\nrow: row number.\ncol: col number.\n\nReturns:\nraster value, None if the input are invalid.", "source": "juraj-google-style"} {"code": "def create_local_scope_from_def_args(self, call_args, def_args, line_number, saved_function_call_index):\n for i in range(len(call_args)):\n def_arg_local_name = def_args[i]\n def_arg_temp_name = ((('temp_' + str(saved_function_call_index)) + '_') + def_args[i])\n local_scope_node = RestoreNode(((def_arg_local_name + ' = ') + def_arg_temp_name), def_arg_local_name, [def_arg_temp_name], line_number=line_number, path=self.filenames[(- 1)])\n self.nodes[(- 1)].connect(local_scope_node)\n self.nodes.append(local_scope_node)", "docstring": "Create the local scope before entering the body of a function call.\n\nArgs:\ncall_args(list[ast.Name]): Of the call being made.\ndef_args(ast_helper.Arguments): Of the definition being called.\nline_number(int): Of the def of the function call about to be entered into.\nsaved_function_call_index(int): Unique number for each call.\n\nNote: We do not need a connect_if_allowed because of the\npreceding call to save_def_args_in_temp.", "source": "codesearchnet"} {"code": "def _ScanFileSystem(self, scan_node, base_path_specs):\n \n if not scan_node or not scan_node.path_spec:\n raise errors.ScannerError('Invalid or missing file system scan node.')\n\n file_system = resolver.Resolver.OpenFileSystem(scan_node.path_spec)\n if not file_system:\n return\n\n try:\n path_resolver = windows_path_resolver.WindowsPathResolver(\n file_system, scan_node.path_spec.parent)\n\n if self._ScanFileSystemForWindowsDirectory(path_resolver):\n base_path_specs.append(scan_node.path_spec)\n\n finally:\n file_system.Close()", "docstring": "Scans a file system scan node for file systems.\n\nThis method checks if the file system contains a known Windows directory.\n\nArgs:\nscan_node (SourceScanNode): file system scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the scan node is invalid.", "source": "juraj-google-style"} {"code": "def _WriteIfcfg(self, interfaces, logger):\n for interface in interfaces:\n interface_config = os.path.join(self.network_path, ('ifcfg-%s' % interface))\n interface_content = ['\n with open(interface_config, 'w') as interface_file:\n interface_file.write('\\n'.join(interface_content))\n logger.info('Created ifcfg file for interface %s.', interface)", "docstring": "Write ifcfg files for multi-NIC support.\n\nOverwrites the files. This allows us to update ifcfg-* in the future.\nDisable the network setup to override this behavior and customize the\nconfigurations.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"} {"code": "def run_command(self, command, arg=None, is_eval=False, member_id=None):\n logger.debug('run_command({command}, {arg}, {is_eval}, {member_id})'.format(**locals()))\n mode = ((is_eval and 'eval') or 'command')\n hostname = None\n if isinstance(member_id, int):\n hostname = self.member_id_to_host(member_id)\n result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg)\n logger.debug('command result: {result}'.format(result=result))\n return result", "docstring": "run command on replica set\nif member_id is specified command will be execute on this server\nif member_id is not specified command will be execute on the primary\n\nArgs:\ncommand - command string\narg - command argument\nis_eval - if True execute command as eval\nmember_id - member id\n\nreturn command's result", "source": "codesearchnet"} {"code": "def _GetDaysPerMonth(self, year, month):\n if (month not in range(1, 13)):\n raise ValueError('Month value out of bounds.')\n days_per_month = self._DAYS_PER_MONTH[(month - 1)]\n if ((month == 2) and self._IsLeapYear(year)):\n days_per_month += 1\n return days_per_month", "docstring": "Retrieves the number of days in a month of a specific year.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\n\nReturns:\nint: number of days in the month.\n\nRaises:\nValueError: if the month value is out of bounds.", "source": "codesearchnet"} {"code": "def _hexvalue_to_hsv(hexvalue):\n \n h = int(hexvalue[7:10], 16) / 360\n s = int(hexvalue[10:12], 16) / 255\n v = int(hexvalue[12:14], 16) / 255\n\n return (h, s, v)", "docstring": "Converts the hexvalue used by tuya for colour representation into\nan HSV value.\n\nArgs:\nhexvalue(string): The hex representation generated by BulbDevice._rgb_to_hexvalue()", "source": "juraj-google-style"} {"code": "def zoom_blur(x, severity=1):\n c = [np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03)][(severity - 1)]\n x = (np.array(x) / 255.0).astype(np.float32)\n out = np.zeros_like(x)\n for zoom_factor in c:\n out += clipped_zoom(x, zoom_factor)\n x = ((x + out) / (len(c) + 1))\n x_clip = (np.clip(x, 0, 1) * 255)\n return around_and_astype(x_clip)", "docstring": "Zoom blurring to images.\n\nApplying zoom blurring to images by zooming the central part of the images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied zoom blur.", "source": "codesearchnet"} {"code": "def get_shreds(self, feature_extractors, sheet_name):\n if (self._shreds is None):\n shreds = []\n (_, contours, _) = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for (i, contour) in enumerate(contours):\n shred = self._make_shred(contour, i, feature_extractors, sheet_name)\n if (shred is not None):\n shreds.append(shred)\n self._shreds = shreds\n return self._shreds", "docstring": "Detects shreds in the current sheet and constructs Shred instances.\n\nCaches the results for further invocations.\n\nArgs:\nfeature_extractors: iterable of AbstractShredFeature instances to\nuse for shreds feature assignment.\nsheet_name: string, included in shred attributes.\n\nReturns:\nlist of Shred instances.", "source": "codesearchnet"} {"code": "def get_wells(self, uwis=None):\n if (uwis is None):\n return Project(self.__list)\n return Project([w for w in self if (w.uwi in uwis)])", "docstring": "Returns a new Project with only the wells named by UWI.\n\nArgs:\nuwis (list): list or tuple of UWI strings.\n\nReturns:\nproject.", "source": "codesearchnet"} {"code": "def get_collection(self, name, scope=None) -> list[Any]:\n with self._lock:\n collection = self._collections.get(name, None)\n if collection is None:\n return []\n if scope is None:\n return list(collection)\n else:\n c = []\n regex = re.compile(scope)\n for item in collection:\n try:\n if regex.match(item.name):\n c.append(item)\n except AttributeError:\n pass\n return c", "docstring": "Returns a list of values in the collection with the given `name`.\n\nThis is different from `get_collection_ref()` which always returns the\nactual collection list if it exists in that it returns a new list each time\nit is called.\n\nArgs:\nname: The key for the collection. For example, the `GraphKeys` class\ncontains many standard names for collections.\nscope: (Optional.) A string. If supplied, the resulting list is filtered\nto include only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a\nscope is supplied. The choice of `re.match` means that a `scope` without\nspecial tokens filters by prefix.\n\nReturns:\nThe list of values in the collection with the given `name`, or\nan empty list if no value has been added to that collection. The\nlist contains the values in the order under which they were\ncollected.", "source": "github-repos"} {"code": "def add_input(self, input_):\n \n if not isinstance(input_, Input):\n raise TypeError('`input_` must be a Input instance')\n self.inputs.append(input_)", "docstring": "Adds an input to a Transaction's list of inputs.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`): An Input to be added to the Transaction.", "source": "juraj-google-style"} {"code": "def swo_flush(self, num_bytes=None):\n \n if num_bytes is None:\n num_bytes = self.swo_num_bytes()\n\n buf = ctypes.c_uint32(num_bytes)\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.FLUSH,\n ctypes.byref(buf))\n if res < 0:\n raise errors.JLinkException(res)\n\n return None", "docstring": "Flushes data from the SWO buffer.\n\nAfter this method is called, the flushed part of the SWO buffer is\nempty.\n\nIf ``num_bytes`` is not present, flushes all data currently in the SWO\nbuffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\nnum_bytes (int): the number of bytes to flush\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"} {"code": "def resolve_theme(self, name):\n \n if name not in settings.CODEMIRROR_THEMES:\n msg = (\"Given theme name '{}' does not exists in \"\n \"'settings.CODEMIRROR_THEMES'.\")\n raise UnknowThemeError(msg.format(name))\n\n return settings.CODEMIRROR_THEMES.get(name)", "docstring": "From given theme name, return theme file path from\n``settings.CODEMIRROR_THEMES`` map.\n\nArguments:\nname (string): Theme name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_THEMES``.\n\nReturns:\nstring: Theme file path.", "source": "juraj-google-style"} {"code": "def _GetNextPath(self):\n paths = sorted((path for path in io_wrapper.ListDirectoryAbsolute(self._directory) if self._path_filter(path)))\n if (not paths):\n return None\n if (self._path is None):\n return paths[0]\n if ((not io_wrapper.IsCloudPath(paths[0])) and (not self._ooo_writes_detected)):\n current_path_index = bisect.bisect_left(paths, self._path)\n ooo_check_start = max(0, (current_path_index - self._OOO_WRITE_CHECK_COUNT))\n for path in paths[ooo_check_start:current_path_index]:\n if self._HasOOOWrite(path):\n self._ooo_writes_detected = True\n break\n next_paths = list((path for path in paths if ((self._path is None) or (path > self._path))))\n if next_paths:\n return min(next_paths)\n else:\n return None", "docstring": "Gets the next path to load from.\n\nThis function also does the checking for out-of-order writes as it iterates\nthrough the paths.\n\nReturns:\nThe next path to load events from, or None if there are no more paths.", "source": "codesearchnet"} {"code": "def __init__(self, sess, bad_init_action=None, bad_run_start_action=None, bad_debug_urls=None):\n self._bad_init_action = bad_init_action\n self._bad_run_start_action = bad_run_start_action\n self._bad_debug_urls = bad_debug_urls\n framework.BaseDebugWrapperSession.__init__(self, sess)", "docstring": "Constructor.\n\nArgs:\nsess: The TensorFlow Session object to be wrapped.\nbad_init_action: (str) bad action value to be returned during the\non-session-init callback.\nbad_run_start_action: (str) bad action value to be returned during the\nthe on-run-start callback.\nbad_debug_urls: Bad URL values to be returned during the on-run-start\ncallback.", "source": "github-repos"} {"code": "def assert_eventual(self, func, required, allowed, timeout_secs=300.0):\n required = set(required)\n assert required\n seen = set()\n start_time = time.time()\n while timeout_secs is None or time.time() - start_time < timeout_secs:\n if seen == required:\n return\n value = func()\n if value not in allowed:\n self.fail(msg=f'Disallowed value: {value}.')\n if value in required:\n seen.add(value)\n missing = [v for v in required if v not in seen]\n self.fail(msg=f'Timed out. Missing values: {str([str(v) for v in missing])}.')", "docstring": "Tests that calls to the given function meet required and allowed values.\n\nArgs:\nfunc: function to test.\nrequired: iterable of required values. Must be hashable and non-empty.\nallowed: iterable of allowed values. Must be hashable and non-empty.\ntimeout_secs: fails if more than this time is required.", "source": "github-repos"} {"code": "def _translate_to_fulltype_for_flat_tensors(spec: type_spec.TypeSpec) -> List[full_type_pb2.FullTypeDef]:\n if isinstance(spec, RaggedTensorSpec):\n dt = spec.dtype\n elem_t = _DT_TO_FT.get(dt)\n if elem_t is None:\n logging.vlog(1, 'dtype %s that has no conversion to fulltype.', dt)\n elif elem_t == full_type_pb2.TFT_LEGACY_VARIANT:\n logging.vlog(1, 'Ragged tensors containing variants are not supported.', dt)\n else:\n assert len(spec._flat_tensor_specs) == 1\n return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_RAGGED, args=[full_type_pb2.FullTypeDef(type_id=elem_t)])]\n return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET) for t in spec._flat_tensor_specs]", "docstring": "Convert a TypeSec to a list of FullTypeDef.\n\nThe FullTypeDef created corresponds to the encoding used with datasets\n(and map_fn) that uses variants (and not FullTypeDef corresponding to the\ndefault \"component\" encoding).\n\nCurrently, the only use of this is for information about the contents of\nragged tensors, so only ragged tensors return useful full type information\nand other types return TFT_UNSET. While this could be improved in the future,\nthis function is intended for temporary use and expected to be removed\nwhen type inference support is sufficient.\n\nArgs:\nspec: A TypeSpec for one element of a dataset or map_fn.\n\nReturns:\nA list of FullTypeDef corresponding to SPEC. The length of this list\nis always the same as the length of spec._flat_tensor_specs.", "source": "github-repos"} {"code": "def draw_layer(ax, layer):\n \n ax.set_aspect('equal', 'datalim')\n ax.plot(*layer)\n ax.axis('off')", "docstring": "Draws a layer on the given matplotlib axis.\n\nArgs:\nax (axis): the matplotlib axis to draw on\nlayer (layer): the layers to plot", "source": "juraj-google-style"} {"code": "def send_rpc_request(self, request):\n self._client_send(request)\n response = self._client_receive()\n if not response:\n raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_SERVER)\n return self._decode_socket_response_bytes(response)", "docstring": "Sends an RPC request to the server and receives a response.\n\nArgs:\nrequest: str, the request to send the server.\n\nReturns:\nThe string of the RPC response.\n\nRaises:\nerrors.Error: if failed to send the request or receive a response.\nerrors.ProtocolError: if received an empty response from the server.\nUnicodeError: if failed to decode the received response.", "source": "github-repos"} {"code": "def expression_filter(self, name, **kwargs):\n \n\n def decorator(func):\n self.filters[name] = ExpressionFilter(name, func, **kwargs)\n\n return decorator", "docstring": "Returns a decorator function for adding an expression filter.\n\nArgs:\nname (str): The name of the filter.\n**kwargs: Variable keyword arguments for the filter.\n\nReturns:\nCallable[[Callable[[AbstractExpression, Any], AbstractExpression]]]: A decorator\nfunction for adding an expression filter.", "source": "juraj-google-style"} {"code": "def set_all_xlims(self, xlim, dx, xscale, fontsize=None):\n self._set_all_lims('x', xlim, dx, xscale, fontsize)\n return", "docstring": "Set limits and ticks for x axis for whole figure.\n\nThis will set x axis limits and tick marks for the entire figure.\nIt can be overridden in the SinglePlot class.\n\nArgs:\nxlim (len-2 list of floats): The limits for the axis.\ndx (float): Amount to increment by between the limits.\nxscale (str): Scale of the axis. Either `log` or `lin`.\nfontsize (int, optional): Set fontsize for x axis tick marks.\nDefault is None.", "source": "codesearchnet"} {"code": "def match(self, part: ProcessorPart) -> bool:\n ...", "docstring": "Returns True if `part` should be processed by this part processor.\n\nReturns False if it sure that the part processor will not process the input\npart and that the part processor should pass the part as is.\n\nNOTE: the part processor `__call__` implementation should always skip the\npart (i.e. return the part as is) when `match` returns False.\n\nA typical example are part processors that are type-dependent, e.g. a part\nprocessor that parses a specific proto from the part or that only parses\ntext.\n\nArgs:\npart: the part to check.\n\nReturns:\nFalse if the part has no chance of being processed by this part\nprocessor. True otherwise.", "source": "github-repos"} {"code": "def remove_snippet_client(self, name):\n if name not in self._snippet_clients:\n raise Error(self._device, MISSING_SNIPPET_CLIENT_MSG % name)\n client = self._snippet_clients.pop(name)\n client.stop()", "docstring": "Removes a snippet client from management.\n\nArgs:\nname: string, the name of the snippet client to remove.\n\nRaises:\nError: if no snippet client is managed under the specified name.", "source": "github-repos"} {"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n images_kwargs = InternVLProcessorKwargs._defaults.get('images_kwargs', {})\n images_kwargs.update(kwargs)\n num_image_patches = [self.image_processor.get_number_of_image_tokens(*image_size, images_kwargs) for image_size in image_sizes]\n num_image_tokens = [2 + self.image_seq_length * num_patches for num_patches in num_image_patches]\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"} {"code": "def plot_dendrogram(ax, obj, show_diameters=True):\n \n \n dnd = Dendrogram(obj, show_diameters=show_diameters)\n dnd.generate()\n\n \n \n \n\n _render_dendrogram(dnd, ax, 0.)\n\n ax.set_title('Morphology Dendrogram')\n ax.set_xlabel('micrometers (um)')\n ax.set_ylabel('micrometers (um)')\n\n ax.set_aspect('auto')\n ax.legend()", "docstring": "Dendrogram of `obj`\n\nArgs:\nobj: Neuron or tree \\\nneurom.Neuron, neurom.Tree\nshow_diameters : boolean \\\nDetermines if node diameters will \\\nbe show or not.", "source": "juraj-google-style"} {"code": "def _price_lognormal_rate(self, valuation_date, market, pricing_context):\n discount_curve = market.discount_curve\n discount_factors = tf.where(self._payment_dates > valuation_date, discount_curve.get_discount_factor(self._payment_dates), 0.0)\n forward_rates = self._get_forward_rate(valuation_date, market)\n if pricing_context is None:\n volatility_surface = market.volatility_curve\n black_vols = volatility_surface.interpolate(self._reset_dates, self._strike, self._term)\n else:\n black_vols = tf.convert_to_tensor(pricing_context, dtype=self._dtype)\n expiry_times = dates.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._reset_dates, dtype=self._dtype)\n caplet_prices = black_scholes.option_price(forwards=forward_rates, strikes=self._strike, volatilities=black_vols, expiries=expiry_times, is_call_options=self._is_cap)\n intrinsic_value = tf.where(self._is_cap, tf.math.maximum(forward_rates - self._strike, 0.0), tf.math.maximum(self._strike - forward_rates, 0))\n caplet_prices = tf.where(self._payment_dates < valuation_date, tf.constant(0.0, dtype=self._dtype), tf.where(self._accrual_start_dates < valuation_date, intrinsic_value, caplet_prices))\n caplet_prices = self._notional * self._daycount_fractions * caplet_prices\n return discount_factors * caplet_prices", "docstring": "Computes caplet/floorlet prices using lognormal model for forward rates.\n\nThe function computes individual caplet prices for the batch of caps/floors\nusing the lognormal model for the forward rates. If the volatilities are\nare supplied (through the input `pricing_context`) then they are used as\nforward rate volatilies. Otherwise, volatilities are extracted using the\nvolatility surface for `market`.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the Cap/Floor.\npricing_context: An optional input containing the black volatility for\nfor the forward rates.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the price of each caplet\n(or floorlet) based using the lognormal model for forward rates.", "source": "github-repos"} {"code": "def _put_into_indexes(self, obj):\n no_of_used_indexes = 0\n for (field_name, db_index) in list(self._get_db_fields(obj)):\n attr_value = getattr(obj, field_name)\n if (attr_value is None):\n continue\n container = db_index.get(attr_value, None)\n if (container is None):\n container = OOTreeSet()\n db_index[attr_value] = container\n container.insert(obj)\n no_of_used_indexes += 1\n if (no_of_used_indexes <= 0):\n raise UnindexableObject('You have to use atleast one of the identificators!')", "docstring": "Put publication into all indexes.\n\nAttr:\nobj (obj): Indexable object.\n\nRaises:\nUnindexableObject: When there is no index (property) which can be\nused to index `obj` in database.", "source": "codesearchnet"} {"code": "def create_resource(self, parent_id=\"\"):\n \n resource_name = self.trigger_settings.get('resource', '')\n resource_name = resource_name.replace('/', '')\n if not self.resource_id:\n created_resource = self.client.create_resource(\n restApiId=self.api_id, parentId=parent_id, pathPart=resource_name)\n self.resource_id = created_resource['id']\n self.log.info(\"Successfully created resource\")\n else:\n self.log.info(\"Resource already exists. To update resource please delete existing resource: %s\",\n resource_name)", "docstring": "Create the specified resource.\n\nArgs:\nparent_id (str): The resource ID of the parent resource in API Gateway", "source": "juraj-google-style"} {"code": "def FlowAccumFromProps(props, weights=None, in_place=False):\n if (type(props) is not rd3array):\n raise Exception('A richdem.rd3array or numpy.ndarray is required!')\n if ((weights is not None) and in_place):\n accum = rdarray(weights, no_data=(- 1))\n elif ((weights is not None) and (not in_place)):\n accum = rdarray(weights, copy=True, meta_obj=props, no_data=(- 1))\n elif (weights is None):\n accum = rdarray(np.ones(shape=props.shape[0:2], dtype='float64'), meta_obj=props, no_data=(- 1))\n else:\n raise Exception('Execution should never reach this point!')\n if (accum.dtype != 'float64'):\n raise Exception(\"Accumulation array must be of type 'float64'!\")\n accumw = accum.wrap()\n _AddAnalysis(accum, 'FlowAccumFromProps(dem, weights={weights}, in_place={in_place})'.format(weights=('None' if (weights is None) else 'weights'), in_place=in_place))\n _richdem.FlowAccumulation(props.wrap(), accumw)\n accum.copyFromWrapped(accumw)\n return accum", "docstring": "Calculates flow accumulation from flow proportions.\n\nArgs:\nprops (rdarray): An elevation model\nweights (rdarray): Flow accumulation weights to use. This is the\namount of flow generated by each cell. If this is\nnot provided, each cell will generate 1 unit of\nflow.\nin_place (bool): If True, then `weights` is modified in place. An\naccumulation matrix is always returned, but it will\njust be a view of the modified data if `in_place`\nis True.\n\nReturns:\nA flow accumulation array. If `weights` was provided and `in_place` was\nTrue, then this matrix is a view of the modified data.", "source": "codesearchnet"} {"code": "def parse(file_contents, file_name):\n env = Environment()\n result = ''\n try:\n env.parse(file_contents)\n except Exception:\n (_, exc_value, _) = sys.exc_info()\n result += 'ERROR: Jinja2 Template File: {0}'.format(file_name)\n result += (repr(exc_value) + '\\n')\n return result", "docstring": "Takes a list of files which are assumed to be jinja2 templates and tries to\nparse the contents of the files\n\nArgs:\nfile_contents (str): File contents of a jinja file\n\nRaises:\nException: An exception is raised if the contents of the file cannot be\nparsed.", "source": "codesearchnet"} {"code": "def _get_mpr_view(self, connection, table):\n logger.debug('Looking for view of the table.\\n table: {}'.format(table.vid))\n view = self.get_view_name(table)\n view_exists = self._relation_exists(connection, view)\n if view_exists:\n logger.debug('View of the table exists.\\n table: {}, view: {}'.format(table.vid, view))\n return view\n raise MissingViewError('sqlite database does not have view for {} table.'.format(table.vid))", "docstring": "Finds and returns view name in the sqlite db represented by given connection.\n\nArgs:\nconnection: connection to sqlite db where to look for partition table.\ntable (orm.Table):\n\nRaises:\nMissingViewError: if database does not have partition table.\n\nReturns:\nstr: database table storing partition data.", "source": "codesearchnet"} {"code": "def bitwise_not(x):\n if any_symbolic_tensors((x,)):\n return BitwiseNot().symbolic_call(x)\n return backend.numpy.bitwise_not(x)", "docstring": "Compute bit-wise inversion, or bit-wise NOT, element-wise.\n\nComputes the bit-wise NOT of the underlying binary representation of the\nintegers in the input arrays. This ufunc implements the C/Python operator\n`~`.\n\nArgs:\nx: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"} {"code": "def branch(self):\n cmd = ['git', 'symbolic-ref', '--short', 'HEAD']\n try:\n output = self.sh(cmd, shell=False, ignore_error=True).rstrip()\n except subprocess.CalledProcessError as e:\n log.exception(e)\n return ('\n if output.startswith('fatal: ref HEAD is not a symbolic ref'):\n output = ('\n return output", "docstring": "Determine the branch name of the working directory of this Repository\n\nReturns:\nstr: branch name (``git symbolic-ref --short HEAD``)", "source": "codesearchnet"} {"code": "def get_submissions_for_student_item(request, course_id, student_id, item_id):\n student_item_dict = dict(course_id=course_id, student_id=student_id, item_id=item_id)\n context = dict(**student_item_dict)\n try:\n submissions = get_submissions(student_item_dict)\n context['submissions'] = submissions\n except SubmissionRequestError:\n context['error'] = 'The specified student item was not found.'\n return render_to_response('submissions.html', context)", "docstring": "Retrieve all submissions associated with the given student item.\n\nDeveloper utility for accessing all the submissions associated with a\nstudent item. The student item is specified by the unique combination of\ncourse, student, and item.\n\nArgs:\nrequest (dict): The request.\ncourse_id (str): The course id for this student item.\nstudent_id (str): The student id for this student item.\nitem_id (str): The item id for this student item.\n\nReturns:\nHttpResponse: The response object for this request. Renders a simple\ndevelopment page with all the submissions related to the specified\nstudent item.", "source": "codesearchnet"} {"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n \n init_params = dict()\n\n init_params['role'] = job_details['RoleArn']\n init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']\n init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']\n init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']\n init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']\n init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']\n init_params['base_job_name'] = job_details['TrainingJobName']\n init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']\n init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']\n\n has_hps = 'HyperParameters' in job_details\n init_params['hyperparameters'] = job_details['HyperParameters'] if has_hps else {}\n\n if 'TrainingImage' in job_details['AlgorithmSpecification']:\n init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage']\n elif 'AlgorithmName' in job_details['AlgorithmSpecification']:\n init_params['algorithm_arn'] = job_details['AlgorithmSpecification']['AlgorithmName']\n else:\n raise RuntimeError('Invalid AlgorithmSpecification. Either TrainingImage or '\n 'AlgorithmName is expected. None was found.')\n\n if 'MetricDefinitons' in job_details['AlgorithmSpecification']:\n init_params['metric_definitions'] = job_details['AlgorithmSpecification']['MetricsDefinition']\n\n if 'EnableInterContainerTrafficEncryption' in job_details:\n init_params['encrypt_inter_container_traffic'] = \\\n job_details['EnableInterContainerTrafficEncryption']\n\n subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY))\n if subnets:\n init_params['subnets'] = subnets\n if security_group_ids:\n init_params['security_group_ids'] = security_group_ids\n\n if 'InputDataConfig' in job_details and model_channel_name:\n for channel in job_details['InputDataConfig']:\n if channel['ChannelName'] == model_channel_name:\n init_params['model_channel_name'] = model_channel_name\n init_params['model_uri'] = channel['DataSource']['S3DataSource']['S3Uri']\n break\n\n return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded.\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"} {"code": "def sync_l(self, option: str = 'all') -> None:\n \n if option in ['system', 'vendor', 'oem', 'data', 'all']:\n self._execute('-s', self.device_sn, 'sync', '-l', option)\n else:\n raise ValueError('There is no option named: {!r}.'.format(option))", "docstring": "List but don't copy.\n\nArgs:\noption: 'system', 'vendor', 'oem', 'data', 'all'", "source": "juraj-google-style"} {"code": "def parse_geometry(ml_log, log=None, ml_version='2016.12', print_output=False):\n \n \n aabb = {}\n geometry = {'aabb':aabb}\n with open(ml_log) as fread:\n for line in fread:\n if 'Mesh Bounding Box min' in line: \n geometry['aabb']['min'] = (line.split()[4:7])\n geometry['aabb']['min'] = [util.to_float(val) for val in geometry['aabb']['min']]\n if 'Mesh Bounding Box max' in line: \n geometry['aabb']['max'] = (line.split()[4:7])\n geometry['aabb']['max'] = [util.to_float(val) for val in geometry['aabb']['max']]\n if 'Mesh Bounding Box Size' in line: \n geometry['aabb']['size'] = (line.split()[4:7])\n geometry['aabb']['size'] = [util.to_float(val) for val in geometry['aabb']['size']]\n if 'Mesh Bounding Box Diag' in line: \n geometry['aabb']['diagonal'] = util.to_float(line.split()[4])\n if 'Mesh Volume' in line:\n geometry['volume_mm3'] = util.to_float(line.split()[3])\n geometry['volume_cm3'] = geometry['volume_mm3'] * 0.001\n if 'Mesh Surface' in line:\n if ml_version == '1.3.4BETA':\n geometry['area_mm2'] = util.to_float(line.split()[3])\n else:\n geometry['area_mm2'] = util.to_float(line.split()[4])\n geometry['area_cm2'] = geometry['area_mm2'] * 0.01\n if 'Mesh Total Len of' in line:\n if 'including faux edges' in line:\n geometry['total_edge_length_incl_faux'] = util.to_float(\n line.split()[7])\n else:\n geometry['total_edge_length'] = util.to_float(\n line.split()[7])\n if 'Thin shell barycenter' in line:\n geometry['barycenter'] = (line.split()[3:6])\n geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']]\n if 'Thin shell (faces) barycenter' in line: \n geometry['barycenter'] = (line.split()[4:7])\n geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']]\n if 'Vertices barycenter' in line: \n geometry['vert_barycenter'] = (line.split()[2:5])\n geometry['vert_barycenter'] = [util.to_float(val) for val in geometry['vert_barycenter']]\n if 'Center of Mass' in line:\n geometry['center_of_mass'] = (line.split()[4:7])\n geometry['center_of_mass'] = [util.to_float(val) for val in geometry['center_of_mass']]\n if 'Inertia Tensor' in line:\n geometry['inertia_tensor'] = []\n for val in range(3):\n row = (next(fread, val).split()[1:4])\n row = [util.to_float(b) for b in row]\n geometry['inertia_tensor'].append(row)\n if 'Principal axes' in line:\n geometry['principal_axes'] = []\n for val in range(3):\n row = (next(fread, val).split()[1:4])\n row = [util.to_float(b) for b in row]\n geometry['principal_axes'].append(row)\n if 'axis momenta' in line:\n geometry['axis_momenta'] = (next(fread).split()[1:4])\n geometry['axis_momenta'] = [util.to_float(val) for val in geometry['axis_momenta']]\n break \n for key, value in geometry.items():\n if log is not None:\n log_file = open(log, 'a')\n log_file.write('{:27} = {}\\n'.format(key, value))\n log_file.close()\n elif print_output:\n print('{:27} = {}'.format(key, value))\n return geometry", "docstring": "Parse the ml_log file generated by the measure_geometry function.\n\nWarnings: Not all keys may exist if mesh is not watertight or manifold\n\nArgs:\nml_log (str): MeshLab log file to parse\nlog (str): filename to log output", "source": "juraj-google-style"} {"code": "def DeregisterAttributeContainer(cls, attribute_container_class):\n \n container_type = attribute_container_class.CONTAINER_TYPE.lower()\n if container_type not in cls._attribute_container_classes:\n raise KeyError(\n 'Attribute container class not set for container type: '\n '{0:s}.'.format(attribute_container_class.CONTAINER_TYPE))\n\n del cls._attribute_container_classes[container_type]", "docstring": "Deregisters an attribute container class.\n\nThe attribute container classes are identified based on their lower case\ncontainer type.\n\nArgs:\nattribute_container_class (type): attribute container class.\n\nRaises:\nKeyError: if attribute container class is not set for\nthe corresponding container type.", "source": "juraj-google-style"} {"code": "def convert_tokens_to_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]:\n if tokens is None:\n return None\n if isinstance(tokens, str):\n return self._convert_token_to_id_with_added_voc(tokens)\n ids = []\n for token in tokens:\n ids.append(self._convert_token_to_id_with_added_voc(token))\n return ids", "docstring": "Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the\nvocabulary.\n\nArgs:\ntokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).\n\nReturns:\n`int` or `List[int]`: The token id or list of token ids.", "source": "github-repos"} {"code": "def _prefix_from_prefix_string(cls, prefixlen_str):\n \n \n \n if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):\n cls._report_invalid_netmask(prefixlen_str)\n try:\n prefixlen = int(prefixlen_str)\n except ValueError:\n cls._report_invalid_netmask(prefixlen_str)\n if not (0 <= prefixlen <= cls._max_prefixlen):\n cls._report_invalid_netmask(prefixlen_str)\n return prefixlen", "docstring": "Return prefix length from a numeric string\n\nArgs:\nprefixlen_str: The string to be converted\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nNetmaskValueError: If the input is not a valid netmask", "source": "juraj-google-style"} {"code": "def pretty_str(something, indent=0):\n if isinstance(something, CodeEntity):\n return something.pretty_str(indent=indent)\n else:\n return ((' ' * indent) + repr(something))", "docstring": "Return a human-readable string representation of an object.\n\nUses `pretty_str` if the given value is an instance of\n`CodeEntity` and `repr` otherwise.\n\nArgs:\nsomething: Some value to convert.\n\nKwargs:\nindent (int): The amount of spaces to use as indentation.", "source": "codesearchnet"} {"code": "def match_validator(expression):\n \n if isinstance(expression, str):\n compiled = re.compile(expression)\n elif hasattr(expression, 'match'):\n \n compiled = expression\n else:\n raise TypeError(\n 'Provided match is nor a string nor has a match method '\n '(like re expressions)'\n )\n\n def validator(value):\n if not compiled.match(value):\n \n raise ValidationError(\n \"{} does not match pattern: {}\".format(\n value,\n compiled.pattern\n if hasattr(compiled, 'pattern')\n else compiled\n )\n )\n\n return validator", "docstring": "Return validator function that will check if matches given expression.\n\nArgs:\nmatch: if string then this will be converted to regular expression\nusing ``re.compile``. Can be also any object that has ``match()``\nmethod like already compiled regular regular expression or custom\nmatching object/class.", "source": "juraj-google-style"} {"code": "def scored_to_phenotype(self, phenotypes):\n\n def _apply_score(scored_calls, phenotypes):\n present = sorted(list((set(phenotypes) & set(scored_calls.keys()))))\n total = sum([scored_calls[x] for x in present])\n if (total > 1):\n raise ValueError('You cant extract phenotypes from scores if they are not mutually exclusive')\n if (total == 0):\n return np.nan\n for label in present:\n if (scored_calls[label] == 1):\n return label\n raise ValueError('Should have hit an exit criteria already')\n output = self.copy()\n output['phenotype_label'] = output.apply((lambda x: _apply_score(x['scored_calls'], phenotypes)), 1)\n output['phenotype_calls'] = output.apply((lambda x: dict([(y, (1 if (x['phenotype_label'] == y) else 0)) for y in phenotypes])), 1)\n return output", "docstring": "Convert binary pehnotypes to mutually exclusive phenotypes.\nIf none of the phenotypes are set, then phenotype_label becomes nan\nIf any of the phenotypes are multiply set then it throws a fatal error.\n\nArgs:\nphenotypes (list): a list of scored_names to convert to phenotypes\n\nReturns:\nCellDataFrame", "source": "codesearchnet"} {"code": "def resolve_mode(self, name):\n if (name not in settings.CODEMIRROR_MODES):\n msg = \"Given config name '{}' does not exists in 'settings.CODEMIRROR_MODES'.\"\n raise UnknowModeError(msg.format(name))\n return settings.CODEMIRROR_MODES.get(name)", "docstring": "From given mode name, return mode file path from\n``settings.CODEMIRROR_MODES`` map.\n\nArguments:\nname (string): Mode name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_MODES``.\n\nReturns:\nstring: Mode file path.", "source": "codesearchnet"} {"code": "def aggregate(self, index):\n if isinstance(index, string_types):\n col_df_grouped = self.col_df.groupby(self.df[index])\n else:\n self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])\n col_df_grouped = self.col_df.groupby(level=index)\n self.col_df.index = self.df.index\n self.reduced_df = pd.DataFrame({colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions})\n reduced_dfs = []\n for cf in self.column_functions:\n reduced_dfs.append(cf.apply_and_name(self))\n return pd.concat(reduced_dfs, axis=1)", "docstring": "Performs a groupby of the unique Columns by index, as constructed from self.df.\n\nArgs:\nindex (str, or pd.Index): Index or column name of self.df.\n\nReturns:\npd.DataFrame: A dataframe, aggregated by index, that contains the result\nof the various ColumnFunctions, and named accordingly.", "source": "codesearchnet"} {"code": "def set_uid(self, uid, schema=None):\n \n try:\n uid, schema = author_id_normalize_and_schema(uid, schema)\n except UnknownUIDSchema:\n \n \n \n pass\n\n self._ensure_field('ids', [])\n self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]\n self._add_uid(uid, schema)", "docstring": "Set a unique ID.\n\nIf a UID of a given schema already exists in a record it will\nbe overwritten, otherwise it will be appended to the record.\n\nArgs:\nuid (string): unique identifier.\nschema (Optional[string]): schema of the unique identifier. If\n``None``, the schema will be guessed based on the shape of\n``uid``.\n\nRaises:\nSchemaUIDConflict: it UID and schema are not matching", "source": "juraj-google-style"} {"code": "def text_filepaths_for_task(self, tmp_dir, task_id):\n assert (task_id >= 0)\n assert (task_id < (self.num_train_shards + self.num_dev_shards))\n if (task_id < self.num_train_shards):\n return [f for (i, f) in enumerate(self.train_text_filepaths(tmp_dir)) if ((i % self.num_train_shards) == task_id)]\n else:\n return [f for (i, f) in enumerate(self.dev_text_filepaths(tmp_dir)) if ((i % self.num_dev_shards) == (task_id - self.num_train_shards))]", "docstring": "List of input filepaths for a particular training or dev shard.\n\nArgs:\ntmp_dir: a string\ntask_id: an integer less than self.num_shards\nReturns:\na list of tuples (filepath, start_pos, num_bytes)", "source": "codesearchnet"} {"code": "def __response_message_descriptor(self, message_type, method_id):\n descriptor = {'200': {'description': 'A successful response'}}\n if (message_type != message_types.VoidMessage()):\n self.__parser.add_message(message_type.__class__)\n self.__response_schema[method_id] = self.__parser.ref_for_message_type(message_type.__class__)\n descriptor['200']['schema'] = {'$ref': '\n return dict(descriptor)", "docstring": "Describes the response.\n\nArgs:\nmessage_type: messages.Message class, The message to describe.\nmethod_id: string, Unique method identifier (e.g. 'myapi.items.method')\n\nReturns:\nDictionary describing the response.", "source": "codesearchnet"} {"code": "def __init__(self, feature_dict, length=None, **kwargs):\n \n \n \n \n self._length = length\n super(SequenceDict, self).__init__(feature_dict, **kwargs)", "docstring": "Construct a sequence dict.\n\nArgs:\nfeature_dict: `dict`, the features to wrap\nlength: `int`, length of the sequence if static and known in advance\n**kwargs: `dict`, constructor kwargs of `tfds.features.FeaturesDict`", "source": "juraj-google-style"} {"code": "def to_json_string(self, use_diff: bool=True) -> str:\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nArgs:\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\nis serialized to JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this configuration instance in JSON format.", "source": "github-repos"} {"code": "def save_pickle(obj, outfile, protocol=2):\n \n with open(outfile, 'wb') as f:\n pickle.dump(obj, f, protocol=protocol)\n\n return outfile", "docstring": "Save the object as a pickle file\n\nArgs:\noutfile (str): Filename\nprotocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2\n\nReturns:\nstr: Path to pickle file", "source": "juraj-google-style"} {"code": "def create_s3_event(app_name, env, region, bucket, triggers):\n session = boto3.Session(profile_name=env, region_name=region)\n s3_client = session.client('s3')\n lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)\n LOG.debug('Lambda ARN for lambda function %s is %s.', app_name, lambda_alias_arn)\n LOG.debug('Creating S3 events for bucket %s', bucket)\n principal = 's3.amazonaws.com'\n statement_id = '{}_s3_{}'.format(app_name, bucket).replace('.', '')\n source_arn = 'arn:aws:s3:::{}'.format(bucket)\n add_lambda_permissions(function=lambda_alias_arn, env=env, region=region, principal=principal, statement_id=statement_id, source_arn=source_arn)\n template_kwargs = {'lambda_arn': lambda_alias_arn, 'triggers': triggers}\n config = get_template(template_file='infrastructure/lambda/s3_event.json.j2', **template_kwargs)\n s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=json.loads(config))\n LOG.info('Created lambda %s S3 event on bucket %s', app_name, bucket)", "docstring": "Create S3 lambda events from triggers\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\ntriggers (list): List of triggers from the settings", "source": "codesearchnet"} {"code": "def wait(self, duration=None, count=0):\n \n start = time.time()\n total = 0\n while True:\n type, result = self._recv(MSG, PING, OK)\n if type is MSG:\n total += 1\n if self._handle_msg(result) is False:\n break\n\n if count and total >= count:\n break\n\n elif type is PING:\n self._handle_ping()\n\n if duration and time.time() - start > duration:\n break", "docstring": "Publish publishes the data argument to the given subject.\n\nArgs:\nduration (float): will wait for the given number of seconds\ncount (count): stop of wait after n messages from any subject", "source": "juraj-google-style"} {"code": "def operate_multi(self, points):\n \n points = np.array(points)\n affine_points = np.concatenate(\n [points, np.ones(points.shape[:-1] + (1,))], axis=-1)\n return np.inner(affine_points, self.affine_matrix)[..., :-1]", "docstring": "Apply the operation on a list of points.\n\nArgs:\npoints: List of Cartesian coordinates\n\nReturns:\nNumpy array of coordinates after operation", "source": "juraj-google-style"} {"code": "def ip(self, value):\n \n if value == self._defaults['ai.location.ip'] and 'ai.location.ip' in self._values:\n del self._values['ai.location.ip']\n else:\n self._values['ai.location.ip'] = value", "docstring": "The ip property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def _make_parser_func(sep):\n \n\n def parser_func(\n filepath_or_buffer,\n sep=sep,\n delimiter=None,\n header=\"infer\",\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n iterator=False,\n chunksize=None,\n compression=\"infer\",\n thousands=None,\n decimal=b\".\",\n lineterminator=None,\n quotechar='\"',\n quoting=0,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=None,\n error_bad_lines=True,\n warn_bad_lines=True,\n skipfooter=0,\n doublequote=True,\n delim_whitespace=False,\n low_memory=True,\n memory_map=False,\n float_precision=None,\n ):\n _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())\n if not kwargs.get(\"sep\", sep):\n kwargs[\"sep\"] = \"\\t\"\n return _read(**kwargs)\n\n return parser_func", "docstring": "Creates a parser function from the given sep.\n\nArgs:\nsep: The separator default to use for the parser.\n\nReturns:\nA function object.", "source": "juraj-google-style"} {"code": "def from_raw(self, robj: RawObject) -> RootNode:\n cooked = self.schema.from_raw(robj)\n return RootNode(cooked, self.schema, cooked.timestamp)", "docstring": "Create an instance node from a raw data tree.\n\nArgs:\nrobj: Dictionary representing a raw data tree.\n\nReturns:\nRoot instance node.", "source": "codesearchnet"} {"code": "def _generate(cls, strategy, params):\n if cls._meta.abstract:\n raise errors.FactoryError(('Cannot generate instances of abstract factory %(f)s; Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract is either not set or False.' % dict(f=cls.__name__)))\n step = builder.StepBuilder(cls._meta, params, strategy)\n return step.build()", "docstring": "generate the object.\n\nArgs:\nparams (dict): attributes to use for generating the object\nstrategy: the strategy to use", "source": "codesearchnet"} {"code": "def device_ids(self):\n if self._device_ids is None:\n with ops.init_scope():\n device_ids_list = []\n for index, device in enumerate(self.components):\n with ops.device(device):\n device_ids_list.append(array_ops.identity(constant_op.constant(index)))\n self._device_ids = self.pack(device_ids_list)\n return self._device_ids", "docstring": "A parallel tensor with scalar integers numbering component devices.\n\nEach device ID is placed on its corresponding device, in the same order as\nthe `components` constructor argument.\n\nReturns:\nA parallel tensor containing 0 on the first device, 1 on the second, etc.", "source": "github-repos"} {"code": "def dft_task(cls, mol, xc=\"b3lyp\", **kwargs):\n \n t = NwTask.from_molecule(mol, theory=\"dft\", **kwargs)\n t.theory_directives.update({\"xc\": xc,\n \"mult\": t.spin_multiplicity})\n return t", "docstring": "A class method for quickly creating DFT tasks with optional\ncosmo parameter .\n\nArgs:\nmol: Input molecule\nxc: Exchange correlation to use.\n\\\\*\\\\*kwargs: Any of the other kwargs supported by NwTask. Note the\ntheory is always \"dft\" for a dft task.", "source": "juraj-google-style"} {"code": "def from_class(cls, target_class):\n \n module_name = target_class.__module__\n class_name = target_class.__name__\n return cls(module_name, \"__init__\", class_name)", "docstring": "Create a FunctionDescriptor from a class.\n\nArgs:\ncls: Current class which is required argument for classmethod.\ntarget_class: the python class used to create the function\ndescriptor.\n\nReturns:\nThe FunctionDescriptor instance created according to the class.", "source": "juraj-google-style"} {"code": "def __init__(self, app, env, region, prop_path, artifact_path, artifact_version, primary_region='us-east-1'):\n \n self.app_name = app\n self.env = env\n self.region = region\n self.artifact_path = artifact_path\n self.version = artifact_version\n self.properties = get_properties(prop_path, env=self.env, region=self.region)\n self.s3props = self.properties['s3']\n generated = get_details(app=app, env=env, region=region)\n\n include_region = True\n if self.region == primary_region:\n include_region = False\n if self.s3props.get('shared_bucket_master'):\n self.bucket = generated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n elif self.s3props.get('shared_bucket_target'):\n shared_app = self.s3props['shared_bucket_target']\n newgenerated = get_details(app=shared_app, env=env, region=region)\n self.bucket = newgenerated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n else:\n self.bucket = generated.s3_app_bucket(include_region=include_region)\n self.s3path = self.s3props['path'].lstrip('/')\n\n self.s3_version_uri = ''\n self.s3_latest_uri = ''\n self.setup_pathing()", "docstring": "S3 deployment object.\n\nArgs:\napp (str): Application name\nenv (str): Environment/Account\nregion (str): AWS Region\nprop_path (str): Path of environment property file\nartifact_path (str): Path to tar.gz artifact\nprimary_region (str): The primary region for the application.", "source": "juraj-google-style"} {"code": "def _get_css_files(cls, extra_files):\n packager = Packager()\n css_packages = getattr(cls, 'css_packages', {})\n return dict(((media_target, cls._get_media_files(packager=packager, media_packages=media_packages, media_type='css', extra_files=extra_files.get(media_target, []))) for (media_target, media_packages) in six.iteritems(css_packages)))", "docstring": "Return all CSS files from the Media class.\n\nArgs:\nextra_files (dict):\nThe contents of the Media class's original :py:attr:`css`\nattribute, if one was provided.\n\nReturns:\ndict:\nThe CSS media types and files to return for the :py:attr:`css`\nattribute.", "source": "codesearchnet"} {"code": "def populate_development(version):\n with open(DEVELOPMENT_TEMPLATE, 'r') as file_obj:\n template = file_obj.read()\n contents = template.format(revision=version, rtd_version=version)\n with open(DEVELOPMENT_FILE, 'w') as file_obj:\n file_obj.write(contents)", "docstring": "Populates ``DEVELOPMENT.rst`` with release-specific data.\n\nThis is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation.\n\nArgs:\nversion (str): The current version.", "source": "codesearchnet"} {"code": "def parse_cartouche_text(lines):\n indent_lines = unindent(lines)\n indent_lines = pad_blank_lines(indent_lines)\n indent_lines = first_paragraph_indent(indent_lines)\n indent_paragraphs = gather_lines(indent_lines)\n parse_tree = group_paragraphs(indent_paragraphs)\n syntax_tree = extract_structure(parse_tree)\n result = syntax_tree.render_rst()\n ensure_terminal_blank(result)\n return result", "docstring": "Parse text in cartouche format and return a reStructuredText equivalent\n\nArgs:\nlines: A sequence of strings representing the lines of a single\ndocstring as read from the source by Sphinx. This string should be\nin a format that can be parsed by cartouche.\n\nReturns:\nA list of lines containing the transformed docstring as\nreStructuredText as produced by cartouche.\n\nRaises:\nRuntimeError: If the docstring cannot be parsed.", "source": "codesearchnet"} {"code": "def sampler(sample_function: Callable) -> Callable:\n\n def generate_sampler(continuous_pulse: Callable) -> Callable:\n 'Return a decorated sampler function.'\n\n @functools.wraps(continuous_pulse)\n def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:\n 'Replace the call to the continuous function with a call to the sampler applied\\n to the anlytic pulse function.'\n sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)\n return np.asarray(sampled_pulse, dtype=np.complex_)\n call_sampler = _update_annotations(call_sampler)\n call_sampler = _update_docstring(call_sampler, sample_function)\n call_sampler.__dict__.pop('__wrapped__')\n return commands.functional_pulse(call_sampler)\n return generate_sampler", "docstring": "Sampler decorator base method.\n\nSamplers are used for converting an continuous function to a discretized pulse.\n\nThey operate on a function with the signature:\n`def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`\nWhere `times` is a numpy array of floats with length n_times and the output array\nis a complex numpy array with length n_times. The output of the decorator is an\ninstance of `FunctionalPulse` with signature:\n`def g(duration: int, *args, **kwargs) -> SamplePulse`\n\nNote if your continuous pulse function outputs a `complex` scalar rather than a\n`np.ndarray`, you should first vectorize it before applying a sampler.\n\n\nThis class implements the sampler boilerplate for the sampler.\n\nArgs:\nsample_function: A sampler function to be decorated.", "source": "codesearchnet"} {"code": "def get_object(self, file_path):\n \n file_path = make_string_path(file_path)\n file_path = self.absnormpath(self._original_path(file_path))\n return self.get_object_from_normpath(file_path)", "docstring": "Search for the specified filesystem object within the fake\nfilesystem.\n\nArgs:\nfile_path: Specifies the target FakeFile object to retrieve.\n\nReturns:\nThe FakeFile object corresponding to `file_path`.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"} {"code": "def concat(self, array_like):\n arr = list(array_like)\n if (len(set([x.microns_per_pixel for x in arr])) != 1):\n raise ValueError('Multiple microns per pixel set')\n cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr]))\n cdf.microns_per_pixel = arr[0].microns_per_pixel\n return cdf", "docstring": "Concatonate multiple CellDataFrames\n\nthrows an error if the microns_per_pixel is not uniform across the frames\n\nArgs:\narray_like (list): a list of CellDataFrames with 1 or more CellDataFrames\n\nReturns:\nCellDataFrame", "source": "codesearchnet"} {"code": "def s_url(self, path, method=None, type_cast=None):\n if (not type_cast):\n type_cast = {}\n\n def decorator(function):\n self.s_add(path, function, method, type_cast)\n return function\n return decorator", "docstring": "Decorator for registering a simple path.\n\nArgs:\npath (str): Path to be matched.\nmethod (str, optional): Usually used to define one of GET, POST,\nPUT, DELETE. You may use whatever fits your situation though.\nDefaults to None.\ntype_cast (dict, optional): Mapping between the param name and\none of `int`, `float` or `bool`. The value reflected by the\nprovided param name will than be casted to the given type.\nDefaults to None.", "source": "codesearchnet"} {"code": "def mode_group(self):\n hmodegroup = self._libinput.libinput_event_tablet_pad_get_mode_group(self._handle)\n return TabletPadModeGroup(hmodegroup, self._libinput)", "docstring": "The mode group that the button, ring, or strip that\ntriggered this event is considered in.\n\nThe mode is a virtual grouping of functionality, usually based on some\nvisual feedback like LEDs on the pad. See `Tablet pad modes`_\nfor details.\n\nReturns:\n~libinput.define.TabletPadModeGroup: The mode group of the button,\nring or strip that caused this event.", "source": "codesearchnet"} {"code": "def trees_by_subpath(self, sub_path):\n \n matches = (\n self.path_db[tree_path].keys()\n for tree_path in self.path_db.iterkeys()\n if tree_path.startswith(sub_path)\n )\n\n return set(sum(matches, []))", "docstring": "Search trees by `sub_path` using ``Tree.path.startswith(sub_path)``\ncomparison.\n\nArgs:\nsub_path (str): Part of the :attr:`.Tree.path` property of\n:class:`.Tree`.\n\nReturns:\nset: Set of matching :class:`Tree` instances.", "source": "juraj-google-style"} {"code": "def random_get_instance() -> tcod.random.Random:\n return tcod.random.Random._new_from_cdata(ffi.cast('mersenne_data_t*', lib.TCOD_random_get_instance()))", "docstring": "Return the default Random instance.\n\nReturns:\nRandom: A Random instance using the default random number generator.", "source": "codesearchnet"} {"code": "def crop(self, extent, copy=False):\n \n try:\n if extent[0] is None:\n extent = (self.start.z, extent[1])\n if extent[1] is None:\n extent = (extent[0], self.stop.z)\n except:\n m = \"You must provide a 2-tuple for the new extents. Use None for\"\n m += \" the existing start or stop.\"\n raise StriplogError(m)\n\n first_ix = self.read_at(extent[0], index=True)\n last_ix = self.read_at(extent[1], index=True)\n\n first = self[first_ix].split_at(extent[0])[1]\n last = self[last_ix].split_at(extent[1])[0]\n\n new_list = self.__list[first_ix:last_ix+1].copy()\n new_list[0] = first\n new_list[-1] = last\n\n if copy:\n return Striplog(new_list)\n else:\n self.__list = new_list\n return", "docstring": "Crop to a new depth range.\n\nArgs:\nextent (tuple): The new start and stop depth. Must be 'inside'\nexisting striplog.\ncopy (bool): Whether to operate in place or make a copy.\n\nReturns:\nOperates in place by deault; if copy is True, returns a striplog.", "source": "juraj-google-style"} {"code": "def delete(self, remove_tombstone=True):\n\n\t\t\n\n\t\tresponse = self.repo.api.http_request('DELETE', self.uri)\n\n\t\t\n\t\tif response.status_code == 204:\n\t\t\t\n\t\t\tself._empty_resource_attributes()\n\n\t\tif remove_tombstone:\n\t\t\tself.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)\n\n\t\treturn True", "docstring": "Method to delete resources.\n\nArgs:\nremove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.\n\nReturns:\n(bool)", "source": "juraj-google-style"} {"code": "def _convert_single_op_hint_to_stub(call, graph_def, function_def_nodes=None, is_last_run=True):\n if function_def_nodes is None:\n function_def_nodes = set()\n name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(graph_def)\n input_names, output_names = call.flattened_inputs_and_outputs()\n reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)\n reachable_by_output = _bfs_for_reachable_nodes(output_names, name_to_input_name)\n output_nodes_set = set(output_names)\n nodes_after_fuse = []\n nodes_deleted_by_fuse = set()\n for node in graph_def.node:\n n = _tensor_name_base(node.name)\n if n in reachable_by_output:\n if n not in reachable_by_input and n not in output_nodes_set:\n nodes_deleted_by_fuse.add(n)\n elif n not in reachable_by_input and n not in function_def_nodes:\n nodes_after_fuse.append(n)\n elif not is_last_run:\n nodes_after_fuse.append(n)\n out = _graph_pb2.GraphDef()\n reachable_by_input_sorted = sorted(list(reachable_by_input), key=lambda n: name_to_seq_num[n])\n for node in reachable_by_input_sorted:\n out.node.extend([_copy.deepcopy(name_to_node[node])])\n sorted_input_indices = list(call.inputs.keys())\n sorted_input_indices.sort()\n sorted_output_indices = list(call.outputs.keys())\n sorted_output_indices.sort()\n new_node = _node_def_pb2.NodeDef()\n optional_input_node = _node_def_pb2.NodeDef()\n optional_input_node.name = 'Const' + str(_uuid.uuid1().hex)\n optional_input_node.op = 'Const'\n optional_input_node.attr['dtype'].CopyFrom(_attr_value_pb2.AttrValue(type=_dtypes.float32.as_datatype_enum))\n optional_input_node.attr['value'].CopyFrom(_attr_value_pb2.AttrValue(tensor=_tensor_util.make_tensor_proto([-1], _dtypes.float32, [1])))\n out.node.extend([optional_input_node])\n max_index = max(sorted_input_indices) + 1\n for cur_index in range(max_index):\n if cur_index in sorted_input_indices:\n inputs = call.inputs[cur_index]\n input_name = inputs.aggregate_and_return_name_for_input(out)\n new_node.input.append(input_name)\n else:\n new_node.input.append(optional_input_node.name)\n new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices)\n new_node.op = call.function_name\n new_node.name = call.uuid\n out.node.extend([new_node])\n output_dtypes = []\n max_output_index = max(sorted_output_indices) + 1\n for cur_index in range(max_output_index):\n if cur_index in sorted_output_indices:\n output = call.outputs[cur_index]\n output_dtype = output.aggregate_and_return_name_for_output(new_node.name, cur_index, out)\n else:\n output_dtype = optional_input_node.attr['type'].i\n output_dtypes.append(output_dtype)\n new_node.attr['_output_types'].list.type[:] = output_dtypes\n new_node.attr['_output_quantized'].b = False\n for n in nodes_after_fuse:\n should_keep = True\n for input_name in name_to_input_name[n]:\n if input_name in nodes_deleted_by_fuse:\n should_keep = False\n if should_keep:\n out.node.extend([_copy.deepcopy(name_to_node[n])])\n out.library.CopyFrom(graph_def.library)\n out.versions.CopyFrom(graph_def.versions)\n return out", "docstring": "Given a graph_def, converts `call` into a stub and returns a new graph_def.\n\nArgs:\ncall: A single function call to be converted.\ngraph_def: A graph_def to use as input (that has call obviously).\nfunction_def_nodes: Nodes inside the function def those are not connected to\nthe graph.\nis_last_run: Whether it is the last run for a given pass (for OpHint has\nchildren).\n\nReturns:\nA new transformed graph-def that has call as a stub (single op).\n\nNote: after this process, the graph_def can no longer be loaded into\nthe tensorflow runtime, so all future manipulations are done in graph_def\nlevel.", "source": "github-repos"} {"code": "def _usage_id_from_node(self, node, parent_id, id_generator=None):\n \n if id_generator is not None:\n warnings.warn(\n \"Passing an id_generator directly is deprecated \"\n \"in favor of constructing the Runtime with the id_generator\",\n DeprecationWarning,\n stacklevel=3,\n )\n\n id_generator = id_generator or self.id_generator\n\n block_type = node.tag\n \n node.attrib.pop('xblock-family', None)\n \n def_id = id_generator.create_definition(block_type)\n usage_id = id_generator.create_usage(def_id)\n keys = ScopeIds(None, block_type, def_id, usage_id)\n block_class = self.mixologist.mix(self.load_block_type(block_type))\n \n aside_children = []\n for child in node.iterchildren():\n \n xblock_family = child.attrib.pop('xblock-family', None)\n if xblock_family:\n xblock_family = self._family_id_to_superclass(xblock_family)\n if issubclass(xblock_family, XBlockAside):\n aside_children.append(child)\n \n for child in aside_children:\n self._aside_from_xml(child, def_id, usage_id, id_generator)\n node.remove(child)\n block = block_class.parse_xml(node, self, keys, id_generator)\n block.parent = parent_id\n block.save()\n return usage_id", "docstring": "Create a new usage id from an XML dom node.\n\nArgs:\nnode (lxml.etree.Element): The DOM node to interpret.\nparent_id: The usage ID of the parent block\nid_generator (IdGenerator): The :class:`.IdGenerator` to use\nfor creating ids", "source": "juraj-google-style"} {"code": "def _clone_helper(op_to_clone, variant_tensor_ops):\n remap_dict = {}\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in variant_tensor_ops:\n recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n remap_dict.update(recursive_map)\n inputs_list = []\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in remap_dict:\n remapped_input = remap_dict[input_tensor_op].outputs[0]\n inputs_list.append(remapped_input)\n else:\n inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n g = ops.get_default_graph()\n new_op = g.create_op(op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone))\n remap_dict[op_to_clone] = new_op\n return remap_dict", "docstring": "Helper method that recursively clones `op_to_clone`.\n\nArgs:\nop_to_clone: The op we want to clone.\nvariant_tensor_ops: A list of ops that we have to clone along the way.\n\nReturns:\nA dictionary mapping old_ops to new_ops created. Includes op_to_clone\nas a key.", "source": "github-repos"} {"code": "def create_snapshot(self, volume_id_or_uri, snapshot, timeout=-1):\n \n uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n\n return self._client.create(snapshot, uri=uri, timeout=timeout, default_values=self.DEFAULT_VALUES_SNAPSHOT)", "docstring": "Creates a snapshot for the specified volume.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume ID or the volume URI.\nsnapshot (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Storage volume.", "source": "juraj-google-style"} {"code": "def get_all_profiles(store='local'):\n \n return {\n 'Domain Profile': get_all_settings(profile='domain', store=store),\n 'Private Profile': get_all_settings(profile='private', store=store),\n 'Public Profile': get_all_settings(profile='public', store=store)\n }", "docstring": "Gets all properties for all profiles in the specified store\n\nArgs:\n\nstore (str):\nThe store to use. This is either the local firewall policy or the\npolicy defined by local group policy. Valid options are:\n\n- lgpo\n- local\n\nDefault is ``local``\n\nReturns:\ndict: A dictionary containing the specified settings for each profile", "source": "juraj-google-style"} {"code": "def _plot(self, axes_list):\n \n\n axes = axes_list[0]\n\n if self.plot_settings:\n axes.imshow(self.data['image_data'], cmap=self.plot_settings['cmap'], interpolation=self.plot_settings['interpol'], extent=self.data['extent'])\n axes.set_xlabel(self.plot_settings['xlabel'])\n axes.set_ylabel(self.plot_settings['ylabel'])\n axes.set_title(self.plot_settings['title'])\n\n self._update(axes_list)", "docstring": "Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are\nlisted.\nPrecondition: must have an existing image in figure_list[0] to plot over\nArgs:\nfigure_list:", "source": "juraj-google-style"} {"code": "def is_enrolled(self, username, course_run_id):\n enrollment = self.get_course_enrollment(username, course_run_id)\n return ((enrollment is not None) and enrollment.get('is_active', False))", "docstring": "Query the enrollment API and determine if a learner is enrolled in a course run.\n\nArgs:\nusername (str): The username by which the user goes on the OpenEdX platform\ncourse_run_id (str): The string value of the course's unique identifier\n\nReturns:\nbool: Indicating whether the user is enrolled in the course run. Returns False under any errors.", "source": "codesearchnet"} {"code": "def run(self, data):\n result_type = namedtuple('Result', 'code messages')\n if (self.passes is True):\n result = result_type(Checker.Code.PASSED, '')\n elif (self.passes is False):\n if self.allow_failure:\n result = result_type(Checker.Code.IGNORED, '')\n else:\n result = result_type(Checker.Code.FAILED, '')\n else:\n try:\n result = self.check(data, **self.arguments)\n messages = ''\n if isinstance(result, tuple):\n (result, messages) = result\n if (result not in Checker.Code):\n result = (Checker.Code.PASSED if bool(result) else Checker.Code.FAILED)\n if ((result == Checker.Code.FAILED) and self.allow_failure):\n result = Checker.Code.IGNORED\n result = result_type(result, messages)\n except NotImplementedError:\n result = result_type(Checker.Code.NOT_IMPLEMENTED, '')\n self.result = result", "docstring": "Run the check method and format the result for analysis.\n\nArgs:\ndata (DSM/DMM/MDM): DSM/DMM/MDM instance to check.\n\nReturns:\ntuple (int, str): status constant from Checker class and messages.", "source": "codesearchnet"} {"code": "def from_celery(cls, worker_name, job_dict, celery_app):\n \n if not isinstance(job_dict, dict) or 'id' not in job_dict:\n raise JobStatInvalid('The job description is missing important fields.')\n\n async_result = AsyncResult(id=job_dict['id'], app=celery_app)\n a_info = async_result.info if isinstance(async_result.info, dict) else None\n\n return JobStats(\n name=a_info.get('name', '') if a_info is not None else '',\n job_id=job_dict['id'],\n job_type=a_info.get('type', '') if a_info is not None else '',\n workflow_id=a_info.get('workflow_id', '') if a_info is not None else '',\n queue=a_info.get('queue', '') if a_info is not None else '',\n start_time=a_info.get('start_time', None) if a_info is not None else None,\n arguments=a_info.get('arguments', {}) if a_info is not None else {},\n acknowledged=job_dict['acknowledged'],\n func_name=job_dict['type'],\n hostname=job_dict['hostname'],\n worker_name=worker_name,\n worker_pid=job_dict['worker_pid'],\n routing_key=job_dict['delivery_info']['routing_key']\n )", "docstring": "Create a JobStats object from the dictionary returned by celery.\n\nArgs:\nworker_name (str): The name of the worker this jobs runs on.\njob_dict (dict): The dictionary as returned by celery.\ncelery_app: Reference to a celery application object.\n\nReturns:\nJobStats: A fully initialized JobStats object.", "source": "juraj-google-style"} {"code": "def _input_valid(input_, operation, message, output_condition_uri=None):\n ccffill = input_.fulfillment\n try:\n parsed_ffill = Fulfillment.from_uri(ccffill.serialize_uri())\n except (TypeError, ValueError, ParsingError, ASN1DecodeError, ASN1EncodeError):\n return False\n if (operation == Transaction.CREATE):\n output_valid = True\n else:\n output_valid = (output_condition_uri == ccffill.condition_uri)\n message = sha3_256(message.encode())\n if input_.fulfills:\n message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode())\n ffill_valid = parsed_ffill.validate(message=message.digest())\n return (output_valid and ffill_valid)", "docstring": "Validates a single Input against a single Output.\n\nNote:\nIn case of a `CREATE` Transaction, this method\ndoes not validate against `output_condition_uri`.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\noperation (str): The type of Transaction.\nmessage (str): The fulfillment message.\noutput_condition_uri (str, optional): An Output to check the\nInput against.\n\nReturns:\nbool: If the Input is valid.", "source": "codesearchnet"} {"code": "def submit(self, cmd_string, blocksize, tasks_per_node, job_name=\"parsl\"):\n \n if not self.resources:\n cur_timestamp = str(time.time() * 1000).split(\".\")[0]\n job_name = \"{0}-{1}\".format(job_name, cur_timestamp)\n\n if not self.deployment_name:\n deployment_name = '{}-deployment'.format(job_name)\n else:\n deployment_name = '{}-{}-deployment'.format(self.deployment_name,\n cur_timestamp)\n\n formatted_cmd = template_string.format(command=cmd_string,\n worker_init=self.worker_init)\n\n self.deployment_obj = self._create_deployment_object(job_name,\n self.image,\n deployment_name,\n cmd_string=formatted_cmd,\n replicas=self.init_blocks,\n volumes=self.persistent_volumes)\n logger.debug(\"Deployment name :{}\".format(deployment_name))\n self._create_deployment(self.deployment_obj)\n self.resources[deployment_name] = {'status': 'RUNNING',\n 'pods': self.init_blocks}\n\n return deployment_name", "docstring": "Submit a job\nArgs:\n- cmd_string :(String) - Name of the container to initiate\n- blocksize :(float) - Number of replicas\n- tasks_per_node (int) : command invocations to be launched per node\n\nKwargs:\n- job_name (String): Name for job, must be unique\nReturns:\n- None: At capacity, cannot provision more\n- job_id: (string) Identifier for the job", "source": "juraj-google-style"} {"code": "def memory_usage(self, string=False):\n \n if string:\n n = getsizeof(self)\n return ' '.join((str(s) for s in convert_bytes(n)))\n return self.info()['size']", "docstring": "Get the memory usage estimate of the container.\n\nArgs:\nstring (bool): Human readable string (default false)\n\nSee Also:\n:func:`~exa.core.container.Container.info`", "source": "juraj-google-style"} {"code": "def get_body(name):\n try:\n (body, propag) = _bodies[name.lower()]\n body.propagate = propag.propagate\n except KeyError as e:\n raise UnknownBodyError(e.args[0])\n return body", "docstring": "Retrieve a given body orbits and parameters\n\nArgs:\nname (str): Object name\nReturn:\nBody:", "source": "codesearchnet"} {"code": "def truncate(text, max_len=350, end='...'):\n if (len(text) <= max_len):\n return text\n return (text[:max_len].rsplit(' ', maxsplit=1)[0] + end)", "docstring": "Truncate the supplied text for display.\n\nArguments:\ntext (:py:class:`str`): The text to truncate.\nmax_len (:py:class:`int`, optional): The maximum length of the\ntext before truncation (defaults to 350 characters).\nend (:py:class:`str`, optional): The ending to use to show that\nthe text was truncated (defaults to ``'...'``).\n\nReturns:\n:py:class:`str`: The truncated text.", "source": "codesearchnet"} {"code": "def validate_labels(known_classes, passed_labels, argument_name):\n known_classes = np.array(known_classes)\n passed_labels = np.array(passed_labels)\n (unique_labels, unique_indexes) = np.unique(passed_labels, return_index=True)\n if (len(passed_labels) != len(unique_labels)):\n indexes = np.arange(0, len(passed_labels))\n duplicate_indexes = indexes[(~ np.in1d(indexes, unique_indexes))]\n duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]]\n msg = 'The following duplicate labels were passed into {0}: {1}'.format(argument_name, ', '.join(duplicate_labels))\n raise ValueError(msg)\n passed_labels_absent = (~ np.in1d(passed_labels, known_classes))\n if np.any(passed_labels_absent):\n absent_labels = [str(x) for x in passed_labels[passed_labels_absent]]\n msg = 'The following labels were passed into {0}, but were not found in labels: {1}'.format(argument_name, ', '.join(absent_labels))\n raise ValueError(msg)\n return", "docstring": "Validates the labels passed into the true_labels or pred_labels\narguments in the plot_confusion_matrix function.\n\nRaises a ValueError exception if any of the passed labels are not in the\nset of known classes or if there are duplicate labels. Otherwise returns\nNone.\n\nArgs:\nknown_classes (array-like):\nThe classes that are known to appear in the data.\npassed_labels (array-like):\nThe labels that were passed in through the argument.\nargument_name (str):\nThe name of the argument being validated.\n\nExample:\n>>> known_classes = [\"A\", \"B\", \"C\"]\n>>> passed_labels = [\"A\", \"B\"]\n>>> validate_labels(known_classes, passed_labels, \"true_labels\")", "source": "codesearchnet"} {"code": "def WriteOutput(title, locations, limit, f):\n \n output_prefix = % locals()\n\n output_suffix = % locals()\n\n f.write(transitfeed.EncodeUnicode(output_prefix))\n for source, destination in zip(locations[0:limit], locations[1:limit + 1]):\n f.write(transitfeed.EncodeUnicode(\"