{"code": "def FromMany(cls, samples):\n \n if not samples:\n raise ValueError(\"Empty `samples` argument\")\n\n \n \n cpu_percent = sum(sample.cpu_percent for sample in samples) / len(samples)\n\n return CpuSample(\n timestamp=max(sample.timestamp for sample in samples),\n cpu_percent=cpu_percent,\n user_cpu_time=max(sample.user_cpu_time for sample in samples),\n system_cpu_time=max(sample.system_cpu_time for sample in samples))", "docstring": "Constructs a single sample that best represents a list of samples.\n\nArgs:\nsamples: An iterable collection of `CpuSample` instances.\n\nReturns:\nA `CpuSample` instance representing `samples`.\n\nRaises:\nValueError: If `samples` is empty.", "source": "juraj-google-style"} {"code": "def timestamp(stamp, tolerance=150):\n \n try:\n tolerance = datetime.timedelta(0, tolerance)\n timestamp_low = dateutil.parser.parse(stamp)\n timestamp_high = timestamp_low + tolerance\n now = datetime.datetime.now(timestamp_low.tzinfo)\n except ValueError:\n return False\n\n return now >= timestamp_low and now <= timestamp_high", "docstring": "Validate timestamp specified by request.\n\nSee `validate.request` for additional info.\n\nArgs:\nstamp: str. Time request was made as ISO 8601 timestamp.\ntolerance: int. Number of seconds request remains valid from timestamp.\n\nReturns\nbool: True if valid, False otherwise.", "source": "juraj-google-style"} {"code": "def _set_initial_contents(self, contents):\n contents = self._encode_contents(contents)\n changed = (self._byte_contents != contents)\n st_size = len(contents)\n if self._byte_contents:\n self.size = 0\n current_size = (self.st_size or 0)\n self.filesystem.change_disk_usage((st_size - current_size), self.name, self.st_dev)\n self._byte_contents = contents\n self.st_size = st_size\n self.epoch += 1\n return changed", "docstring": "Sets the file contents and size.\nCalled internally after initial file creation.\n\nArgs:\ncontents: string, new content of file.\n\nReturns:\nTrue if the contents have been changed.\n\nRaises:\nIOError: if the st_size is not a non-negative integer,\nor if st_size exceeds the available file system space", "source": "codesearchnet"} {"code": "def get_all_users(configuration=None, **kwargs):\n \n \n user = User(configuration=configuration)\n user['id'] = 'all users' \n result = user._write_to_hdx('list', kwargs, 'id')\n users = list()\n if result:\n for userdict in result:\n user = User(userdict, configuration=configuration)\n users.append(user)\n else:\n logger.debug(result)\n return users", "docstring": "Get all users in HDX\n\nArgs:\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n**kwargs: See below\nq (str): Restrict to names containing a string. Defaults to all users.\norder_by (str): Field by which to sort - any user field or edits (number_of_edits). Defaults to 'name'.\n\nReturns:\nList[User]: List of all users in HDX", "source": "juraj-google-style"} {"code": "def deprecated_graph_mode_only(func: Union[_TC, _F]) -> Union[_TC, _F]:\n if tf_inspect.isclass(func):\n setup = func.__dict__.get('setUp')\n if setup is not None:\n setattr(func, 'setUp', deprecated_graph_mode_only(setup))\n for name, value in func.__dict__.copy().items():\n if callable(value) and name.startswith(unittest.TestLoader.testMethodPrefix):\n setattr(func, name, deprecated_graph_mode_only(value))\n return func\n\n def decorated(*args, **kwargs):\n if context.executing_eagerly():\n with context.graph_mode():\n return func(*args, **kwargs)\n else:\n return func(*args, **kwargs)\n return tf_decorator.make_decorator(func, decorated)", "docstring": "Execute the decorated test in graph mode.\n\nThis is a decorator intended to be applied to tests that are not compatible\nwith eager mode. When this decorator is applied, the test body will be run in\nan environment where API calls construct graphs instead of executing eagerly.\n\n`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and\n`run_in_graph_and_eager_modes` are available decorators for different\nv1/v2/eager/graph combinations.\n\nArgs:\nfunc: function or class to be annotated.\nIf `func` is a function this returns the decorator applied to `func`.\nIf `func` is a unit test class this returns that class with the decorator\napplied to all test functions within that class.\n\nReturns:\nReturns a function or class that will run the decorated test(s)\nin graph mode.", "source": "github-repos"} {"code": "def isOpeningTag(self):\n if (self.isTag() and (not self.isComment()) and (not self.isEndTag()) and (not self.isNonPairTag())):\n return True\n return False", "docstring": "Detect whether this tag is opening or not.\n\nReturns:\nbool: True if it is opening.", "source": "codesearchnet"} {"code": "def create_initial_tree(channel):\n \n \n config.LOGGER.info(\" Setting up initial channel structure... \")\n tree = ChannelManager(channel)\n\n \n config.LOGGER.info(\" Validating channel structure...\")\n channel.print_tree()\n tree.validate()\n config.LOGGER.info(\" Tree is valid\\n\")\n return tree", "docstring": "create_initial_tree: Create initial tree structure\nArgs:\nchannel (Channel): channel to construct\nReturns: tree manager to run rest of steps", "source": "juraj-google-style"} {"code": "def delete_issue(self, issue_id, params=None):\n \n return self._delete(self.API_URL + 'issue/{}'.format(issue_id), params=params)", "docstring": "Deletes an individual issue.\n\nIf the issue has sub-tasks you must set the deleteSubtasks=true parameter to delete the issue. You cannot delete\nan issue without deleting its sub-tasks.\n\nArgs:\nissue_id:\nparams:\n\nReturns:", "source": "juraj-google-style"} {"code": "def add_string(self, data):\n \n lines = []\n while data:\n match = self._line_end_re.search(data)\n if match is None:\n chunk = data\n else:\n chunk = data[:match.end()]\n\n data = data[len(chunk):]\n\n if self._buf and self._buf[-1].endswith(b('\\r')) and not chunk.startswith(b('\\n')):\n \n \n \n \n \n \n \n \n \n \n lines.append(self._finish_line())\n\n self._buf.append(chunk)\n if chunk.endswith(b('\\n')):\n lines.append(self._finish_line())\n\n return lines", "docstring": "Process some data splitting it into complete lines and buffering the rest\n\nArgs:\ndata: A `str` in Python 2 or `bytes` in Python 3\nReturns:\nlist of complete lines ending with a carriage return (eg. a progress\nbar) or a newline.", "source": "juraj-google-style"} {"code": "def ParseOptions(cls, options, configuration_object):\n \n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n process_memory_limit = cls._ParseNumericOption(\n options, 'process_memory_limit')\n\n if process_memory_limit and process_memory_limit < 0:\n raise errors.BadConfigOption(\n 'Invalid process memory limit value cannot be negative.')\n\n setattr(configuration_object, '_process_memory_limit', process_memory_limit)", "docstring": "Parses and validates options.\n\nArgs:\noptions (argparse.Namespace): parser options.\nconfiguration_object (CLITool): object to be configured by the argument\nhelper.\n\nRaises:\nBadConfigObject: when the configuration object is of the wrong type.\nBadConfigOption: when a configuration parameter fails validation.", "source": "juraj-google-style"} {"code": "def _send_impression_event(self, experiment, variation, user_id, attributes):\n impression_event = self.event_builder.create_impression_event(experiment, variation.id, user_id, attributes)\n self.logger.debug(('Dispatching impression event to URL %s with params %s.' % (impression_event.url, impression_event.params)))\n try:\n self.event_dispatcher.dispatch_event(impression_event)\n except:\n self.logger.exception('Unable to dispatch impression event!')\n self.notification_center.send_notifications(enums.NotificationTypes.ACTIVATE, experiment, user_id, attributes, variation, impression_event)", "docstring": "Helper method to send impression event.\n\nArgs:\nexperiment: Experiment for which impression event is being sent.\nvariation: Variation picked for user for the given experiment.\nuser_id: ID for user.\nattributes: Dict representing user attributes and values which need to be recorded.", "source": "codesearchnet"} {"code": "def tag(self, image, repository, tag=None, force=False):\n params = {'tag': tag, 'repo': repository, 'force': (1 if force else 0)}\n url = self._url('/images/{0}/tag', image)\n res = self._post(url, params=params)\n self._raise_for_status(res)\n return (res.status_code == 201)", "docstring": "Tag an image into a repository. Similar to the ``docker tag`` command.\n\nArgs:\nimage (str): The image to tag\nrepository (str): The repository to set for the tag\ntag (str): The tag name\nforce (bool): Force\n\nReturns:\n(bool): ``True`` if successful\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nExample:\n\n>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',\nforce=True)", "source": "codesearchnet"} {"code": "def print_layer_summary_with_connections(layer):\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n connections = []\n for node in layer._inbound_nodes:\n if relevant_nodes and node not in relevant_nodes:\n continue\n for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():\n connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index, tensor_index))\n name = layer.name\n cls_name = layer.__class__.__name__\n if not connections:\n first_connection = ''\n else:\n first_connection = connections[0]\n fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]\n print_row(fields, positions)\n if len(connections) > 1:\n for i in range(1, len(connections)):\n fields = ['', '', '', connections[i]]\n print_row(fields, positions)", "docstring": "Prints a summary for a single layer (including topological connections).\n\nArgs:\nlayer: target layer.", "source": "github-repos"} {"code": "def destroy_s3_event(app, env, region):\n \n\n \n \n \n \n generated = get_details(app=app, env=env)\n\n bucket = generated.s3_app_bucket()\n\n session = boto3.Session(profile_name=env, region_name=region)\n s3_client = session.client('s3')\n\n config = {}\n\n s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=config)\n LOG.debug(\"Deleted Lambda S3 notification\")\n\n return True", "docstring": "Destroy S3 event.\n\nArgs:\napp (str): Spinnaker Application name.\nenv (str): Deployment environment.\nregion (str): AWS region.\nReturns:\nbool: True upon successful completion.", "source": "juraj-google-style"} {"code": "def timezone(self, value=0.0):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `timezone`'.format(value))\n if (value < (- 12.0)):\n raise ValueError('value need to be greater or equal -12.0 for field `timezone`')\n if (value > 12.0):\n raise ValueError('value need to be smaller 12.0 for field `timezone`')\n self._timezone = value", "docstring": "Corresponds to IDD Field `timezone` Time relative to GMT.\n\nArgs:\nvalue (float): value for IDD Field `timezone`\nUnit: hr - not on standard units list???\nDefault value: 0.0\nvalue >= -12.0\nvalue <= 12.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def adjust_column_width(worksheet):\n dims = {}\n padding = 1\n for row in worksheet.rows:\n for cell in row:\n if (not cell.value):\n continue\n dims[cell.column] = max(dims.get(cell.column, 0), len(str(cell.value)))\n for (col, value) in list(dims.items()):\n worksheet.column_dimensions[col].width = (value + padding)", "docstring": "Adjust column width in worksheet.\n\nArgs:\nworksheet: worksheet to be adjusted", "source": "codesearchnet"} {"code": "def reliability_curve(self):\n total = self.frequencies['Total_Freq'].sum()\n curve = pd.DataFrame(columns=['Bin_Start', 'Bin_End', 'Bin_Center', 'Positive_Relative_Freq', 'Total_Relative_Freq'])\n curve['Bin_Start'] = self.thresholds[:(- 1)]\n curve['Bin_End'] = self.thresholds[1:]\n curve['Bin_Center'] = (0.5 * (self.thresholds[:(- 1)] + self.thresholds[1:]))\n curve['Positive_Relative_Freq'] = (self.frequencies['Positive_Freq'] / self.frequencies['Total_Freq'])\n curve['Total_Relative_Freq'] = (self.frequencies['Total_Freq'] / total)\n return curve", "docstring": "Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq\n\nReturns:\npandas.DataFrame", "source": "codesearchnet"} {"code": "def get_el_amount(self, element):\n return (sum([(self._all_comp[i][element] * abs(self._coeffs[i])) for i in range(len(self._all_comp))]) / 2)", "docstring": "Returns the amount of the element in the reaction.\n\nArgs:\nelement (Element/Specie): Element in the reaction\n\nReturns:\nAmount of that element in the reaction.", "source": "codesearchnet"} {"code": "def __init__(self, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, require_hyper_name: bool=False, per_thread: bool=True, dna_spec: Optional[geno.DNASpec]=None) -> None:\n self._where = where\n self._require_hyper_name: bool = require_hyper_name\n self._name_to_hyper: Dict[str, base.HyperPrimitive] = dict()\n self._annoymous_hyper_name_accumulator = DynamicEvaluationContext._AnnoymousHyperNameAccumulator()\n self._hyper_dict = symbolic.Dict() if dna_spec is None else None\n self._dna_spec: Optional[geno.DNASpec] = dna_spec\n self._per_thread = per_thread\n self._decision_getter = None", "docstring": "Create a dynamic evaluation context.\n\nArgs:\nwhere: A callable object that decide whether a hyper primitive should be\nincluded when being instantiated under `collect`.\nIf None, all hyper primitives under `collect` will be\nincluded.\nrequire_hyper_name: If True, all hyper primitives (e.g. pg.oneof) must\ncome with a `name`. This option helps to eliminate errors when a\nfunction that contains hyper primitive definition may be called multiple\ntimes. Since hyper primitives sharing the same name will be registered\nto the same decision point, repeated call to the hyper primitive\ndefinition will not matter.\nper_thread: If True, the context manager will be applied to current thread\nonly. Otherwise, it will be applied on current process.\ndna_spec: External provided search space. If None, the dynamic evaluation\ncontext can be used to create new search space via `colelct` context\nmanager. Otherwise, current context will use the provided DNASpec to\napply decisions.", "source": "github-repos"} {"code": "def build_kw_dict(kw_list):\n kw_dict = OrderedDict()\n sorted_list = sorted(kw_list, key=(lambda x: x.get('zahlavi').encode('utf-8')))\n for keyword_data in sorted_list:\n if ('zahlavi' not in keyword_data):\n continue\n zahlavi = keyword_data['zahlavi'].encode('utf-8')\n old_record = kw_dict.get(zahlavi)\n if (not old_record):\n kw_dict[zahlavi] = keyword_data\n continue\n key = 'angl_ekvivalent'\n if ((not old_record.get(key)) and keyword_data.get(key)):\n kw_dict[zahlavi] = keyword_data\n continue\n key = 'zdroj_angl_ekvivalentu'\n if ((not old_record.get(key)) and keyword_data.get(key)):\n kw_dict[zahlavi] = keyword_data\n continue\n if (len(str(keyword_data)) > len(str(old_record))):\n kw_dict[zahlavi] = keyword_data\n continue\n return kw_dict", "docstring": "Build keyword dictionary from raw keyword data. Ignore invalid or\ninvalidated records.\n\nArgs:\nkw_list (list): List of dicts from :func:`read_kw_file`.\n\nReturns:\nOrderedDict: dictionary with keyword data.", "source": "codesearchnet"} {"code": "def _on_connection_error(self, connection, error_message):\n \n self._channel = None\n if isinstance(error_message, pika_errs.AMQPConnectionError):\n error_message = repr(error_message.args[0])\n _log.error(error_message)\n self.call_later(1, self.reconnect)", "docstring": "Callback invoked when the connection failed to be established.\n\nArgs:\nconnection (pika.connection.SelectConnection): The connection that\nfailed to open.\nerror_message (str): The reason the connection couldn't be opened.", "source": "juraj-google-style"} {"code": "def normalize_to_element(self, element, factor=1):\n all_comp = self._all_comp\n coeffs = self._coeffs\n current_el_amount = (sum([(all_comp[i][element] * abs(coeffs[i])) for i in range(len(all_comp))]) / 2)\n scale_factor = (factor / current_el_amount)\n self._coeffs = [(c * scale_factor) for c in coeffs]", "docstring": "Normalizes the reaction to one of the elements.\nBy default, normalizes such that the amount of the element is 1.\nAnother factor can be specified.\n\nArgs:\nelement (Element/Specie): Element to normalize to.\nfactor (float): Factor to normalize to. Defaults to 1.", "source": "codesearchnet"} {"code": "def __init__(self, logger=logging):\n \n self.logger = logger\n self.interfaces = self._CreateInterfaceMap()", "docstring": "Constructor.\n\nArgs:\nlogger: logger object, used to write to SysLog and serial port.", "source": "juraj-google-style"} {"code": "def _reset_non_empty(self, indices):\n \n reset_video_op = tf.cond(\n self._video_condition,\n lambda: tf.py_func(self._video_reset_writer, [], []),\n tf.no_op)\n with tf.control_dependencies([reset_video_op]):\n inc_op = tf.assign_add(self._episode_counter, 1)\n with tf.control_dependencies([self.history_buffer.reset(indices),\n inc_op]):\n initial_frame_dump_op = tf.cond(\n self._video_condition,\n lambda: tf.py_func(self._video_dump_frames, \n [self.history_buffer.get_all_elements()], []),\n tf.no_op)\n observ_assign_op = self._observ.assign(\n self.history_buffer.get_all_elements()[:, -1, ...])\n with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]):\n reset_model_op = tf.assign(self._reset_model, tf.constant(1.0))\n with tf.control_dependencies([reset_model_op]):\n return tf.gather(self._observ.read_value(), indices)", "docstring": "Reset the batch of environments.\n\nArgs:\nindices: The batch indices of the environments to reset; defaults to all.\n\nReturns:\nBatch tensor of the new observations.", "source": "juraj-google-style"} {"code": "def get_country_by_id(self, country_id) -> 'Country':\n VALID_POSITIVE_INT.validate(country_id, 'get_country_by_id', exc=ValueError)\n if (country_id not in self._countries_by_id.keys()):\n for country in self.countries:\n if (country.country_id == country_id):\n return country\n raise ValueError(country_id)\n else:\n return self._countries_by_id[country_id]", "docstring": "Gets a country in this coalition by its ID\n\nArgs:\ncountry_id: country Id\n\nReturns: Country", "source": "codesearchnet"} {"code": "def forward(self, hidden_state, output_hidden_states: bool=False):\n all_hidden_states = []\n embedding = hidden_state\n for mod in self.mixers:\n embedding = mod(embedding)\n if output_hidden_states:\n all_hidden_states.append(embedding)\n if output_hidden_states:\n return (embedding, all_hidden_states)\n else:\n return (embedding, None)", "docstring": "Args:\nhidden_state (`torch.Tensor`): The input tensor.\noutput_hidden_states (`bool`, *optional*, defaults to False.):\nWhether to output the hidden states as well.\n\nReturns:\n`torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to\n`True`.", "source": "github-repos"} {"code": "def transform(self, args):\n \n if self.parse_error():\n \n AliasManager.write_alias_config_hash(empty_hash=True)\n return args\n\n \n if self.detect_alias_config_change():\n self.load_full_command_table()\n self.collided_alias = AliasManager.build_collision_table(self.alias_table.sections())\n build_tab_completion_table(self.alias_table)\n else:\n self.load_collided_alias()\n\n transformed_commands = []\n alias_iter = enumerate(args, 1)\n for alias_index, alias in alias_iter:\n is_collided_alias = alias in self.collided_alias and alias_index in self.collided_alias[alias]\n \n \n is_named_arg = alias_index > 1 and args[alias_index - 2].startswith('-')\n is_named_arg_flag = alias.startswith('-')\n excluded_commands = is_alias_command(['remove', 'export'], transformed_commands)\n if not alias or is_collided_alias or is_named_arg or is_named_arg_flag or excluded_commands:\n transformed_commands.append(alias)\n continue\n\n full_alias = self.get_full_alias(alias)\n\n if self.alias_table.has_option(full_alias, 'command'):\n cmd_derived_from_alias = self.alias_table.get(full_alias, 'command')\n telemetry.set_alias_hit(full_alias)\n else:\n transformed_commands.append(alias)\n continue\n\n pos_args_table = build_pos_args_table(full_alias, args, alias_index)\n if pos_args_table:\n logger.debug(POS_ARG_DEBUG_MSG, full_alias, cmd_derived_from_alias, pos_args_table)\n transformed_commands += render_template(cmd_derived_from_alias, pos_args_table)\n\n \n for pos_arg in pos_args_table: \n next(alias_iter)\n else:\n logger.debug(DEBUG_MSG, full_alias, cmd_derived_from_alias)\n transformed_commands += shlex.split(cmd_derived_from_alias)\n\n return self.post_transform(transformed_commands)", "docstring": "Transform any aliases in args to their respective commands.\n\nArgs:\nargs: A list of space-delimited command input extracted directly from the console.\n\nReturns:\nA list of transformed commands according to the alias configuration file.", "source": "juraj-google-style"} {"code": "def _wrap_definition_section(source, width):\n \n \n index = source.index('\\n') + 1\n definitions, max_len = _get_definitions(source[index:])\n sep = '\\n' + ' ' * (max_len + 4)\n lines = [source[:index].strip()]\n for arg, desc in six.iteritems(definitions):\n wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4))\n lines.append(' {arg:{size}} {desc}'.format(\n arg=arg,\n size=str(max_len),\n desc=wrapped_desc\n ))\n return '\\n'.join(lines)", "docstring": "Wrap the given definition section string to the current terminal size.\n\nNote:\nAuto-adjusts the spacing between terms and definitions.\n\nArgs:\nsource: The section string to wrap.\n\nReturns:\nThe wrapped section string.", "source": "juraj-google-style"} {"code": "def actnorm_center(name, x, reverse=False, init=False):\n \n shape = common_layers.shape_list(x)\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n assert len(shape) == 2 or len(shape) == 4\n if len(shape) == 2:\n x_mean = tf.reduce_mean(x, [0], keepdims=True)\n b = get_variable_ddi(\"b\", (1, shape[1]), initial_value=-x_mean,\n init=init)\n elif len(shape) == 4:\n x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True)\n b = get_variable_ddi(\n \"b\", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init)\n\n if not reverse:\n x += b\n else:\n x -= b\n return x", "docstring": "Add a bias to x.\n\nInitialize such that the output of the first minibatch is zero centered\nper channel.\n\nArgs:\nname: scope\nx: 2-D or 4-D Tensor.\nreverse: Forward or backward operation.\ninit: data-dependent initialization.\n\nReturns:\nx_center: (x + b), if reverse is True and (x - b) otherwise.", "source": "juraj-google-style"} {"code": "def with_forward_compatibility_horizons(*horizons: Optional[tuple[int, int, int]]) -> Callable[[Callable[..., Any]], Callable[..., None]]:\n if not horizons:\n raise ValueError('Expected at least one horizon.')\n for horizon in horizons:\n if not (horizon is None or (len(horizon) == 3 and all((isinstance(x, int) for x in horizon)))):\n raise ValueError('Bad horizon value: %r' % horizon)\n\n def decorator(f: Callable[..., Any]) -> Callable[..., None]:\n if tf_inspect.isclass(f):\n raise ValueError('`with_forward_compatibility_horizons` only supports test methods.')\n\n def decorated(*args, **kwargs):\n for horizon in horizons:\n if horizon is None:\n f(*args, **kwargs)\n else:\n year, month, day = horizon\n with forward_compatibility_horizon(year, month, day):\n f(*args, **kwargs)\n return tf_decorator.make_decorator(f, decorated)\n return decorator", "docstring": "Executes the decorated test with the specified forward-compat horizons.\n\nArgs:\n*horizons: A list of (year, month, day) tuples. If the list includes\n`None`, then the test will also be run with no forward-compatibility\nhorizon set.\n\nReturns:\nA decorator that will execute the test with the specified horizons.", "source": "github-repos"} {"code": "def from_config(cls, config_dict: dict, schema_path: str=None):\n if (schema_path is None):\n schema_path = join(dirname(__file__), 'schema', 'configure_sbi.json')\n with open(schema_path, 'r') as file:\n schema = json.loads(file.read())\n validate(config_dict, schema)\n config_dict['status'] = 'created'\n if ('subarray_id' not in config_dict):\n config_dict['subarray_id'] = 'None'\n timestamp = datetime.datetime.utcnow().isoformat()\n config_dict['created'] = timestamp\n config_dict['updated'] = timestamp\n pb_list = copy.deepcopy(config_dict['processing_blocks'])\n config_dict.pop('processing_blocks', None)\n config_dict['processing_block_ids'] = []\n for pb in pb_list:\n config_dict['processing_block_ids'].append(pb['id'])\n key = SchedulingObject.get_key(SBI_KEY, config_dict['id'])\n DB.save_dict(key, config_dict, hierarchical=False)\n key = '{}:active'.format(SBI_KEY)\n DB.append_to_list(key, config_dict['id'])\n sbi = SchedulingObject(SBI_KEY, config_dict['id'])\n sbi.set_status('created')\n for pb in pb_list:\n pb['sbi_id'] = config_dict['id']\n cls._add_pb(pb)\n return cls(config_dict['id'])", "docstring": "Create an SBI object from the specified configuration dict.\n\nNOTE(BM) This should really be done as a single atomic db transaction.\n\nArgs:\nconfig_dict(dict): SBI configuration dictionary\nschema_path(str, optional): Path to the SBI config schema.", "source": "codesearchnet"} {"code": "def _readvalue(sock, buf, size):\n chunks = []\n rlen = (size + 2)\n while ((rlen - len(buf)) > 0):\n if buf:\n rlen -= len(buf)\n chunks.append(buf)\n buf = _recv(sock, RECV_SIZE)\n if (not buf):\n raise MemcacheUnexpectedCloseError()\n if (rlen == 1):\n chunks[(- 1)] = chunks[(- 1)][:(- 1)]\n else:\n chunks.append(buf[:(rlen - 2)])\n return (buf[rlen:], b''.join(chunks))", "docstring": "Read specified amount of bytes from the socket.\n\nRead size bytes, followed by the \"\\r\\n\" characters, from the socket,\nand return those bytes and any trailing bytes read after the \"\\r\\n\".\n\nArgs:\nsock: Socket object, should be connected.\nbuf: String, zero or more characters, returned from an earlier\ncall to _readline or _readvalue (pass an empty string on the\nfirst call).\nsize: Integer, number of bytes to read from the socket.\n\nReturns:\nA tuple of (buf, value) where value is the bytes read from the\nsocket (there will be exactly size bytes) and buf is trailing\ncharacters read after the \"\\r\\n\" following the bytes (but not\nincluding the \\r\\n).", "source": "codesearchnet"} {"code": "def List(self, request, global_params=None):\n config = self.GetMethodConfig('List')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Lists all models in the specified dataset. Requires the READER dataset role.\n\nArgs:\nrequest: (BigqueryModelsListRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(ListModelsResponse) The response message.", "source": "github-repos"} {"code": "def sort_dict(d, desc=True):\n sort = sorted(d.items(), key=(lambda x: x[1]), reverse=desc)\n return OrderedDict(sort)", "docstring": "Sort an ordered dictionary by value, descending.\n\nArgs:\nd (OrderedDict): An ordered dictionary.\ndesc (bool): If true, sort desc.\n\nReturns:\nOrderedDict: The sorted dictionary.", "source": "codesearchnet"} {"code": "def size_filter(labeled_grid, min_size):\n \n out_grid = np.zeros(labeled_grid.shape, dtype=int)\n slices = find_objects(labeled_grid)\n j = 1\n for i, s in enumerate(slices):\n box = labeled_grid[s]\n size = np.count_nonzero(box.ravel() == (i + 1))\n if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:\n out_grid[np.where(labeled_grid == i + 1)] = j\n j += 1\n return out_grid", "docstring": "Remove labeled objects that do not meet size threshold criteria.\n\nArgs:\nlabeled_grid: 2D output from label method.\nmin_size: minimum size of object in pixels.\n\nReturns:\nlabeled grid with smaller objects removed.", "source": "juraj-google-style"} {"code": "def get_filename(self, tag):\n if (tag.find('filename', recursive=False) is not None):\n return tag.filename.contents[0]\n elif (tag.find('anchorfile', recursive=False) is not None):\n return ((tag.anchorfile.contents[0] + '", "docstring": "Extract and return a documentation filename from a tag.\n\nOverride as necessary, though this default implementation probably\ncovers all the cases of interest.\n\nArgs:\ntag: A BeautifulSoup Tag that satisfies match_criterion.\n\nReturns:\nA string that would be appropriate to use as the documentation\nfilename for an entry in a Zeal database.", "source": "codesearchnet"} {"code": "def validate_is_primary(self, is_primary):\n if (is_primary and (not (self.instance and self.instance.is_verified))):\n raise serializers.ValidationError(_('Unverified email addresses may not be used as the primary address.'))\n return is_primary", "docstring": "Validate the provided 'is_primary' parameter.\n\nReturns:\nThe validated 'is_primary' value.\n\nRaises:\nserializers.ValidationError:\nIf the user attempted to mark an unverified email as\ntheir primary email address.", "source": "codesearchnet"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n eos = [self.eos_token_id]\n if token_ids_1 is None:\n return len(token_ids_0 + eos) * [0]\n return len(token_ids_0 + eos + token_ids_1 + eos) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"} {"code": "def render(self,\n trajectories: Tuple[NonFluents, Fluents, Fluents, Fluents, np.array],\n batch: Optional[int] = None) -> None:\n \n raise NotImplementedError", "docstring": "Renders the simulated `trajectories` for the given `batch`.\n\nArgs:\ntrajectories: NonFluents, states, actions, interms and rewards.\nbatch: Number of batches to render.", "source": "juraj-google-style"} {"code": "def visit_comparison(self, comparison: _evaluation.ComparisonNode) -> _sql_data_types.Select:\n lhs_result = self.visit(comparison.left)\n rhs_result = self.visit(comparison.right)\n lhs_subquery = lhs_result.as_operand()\n rhs_subquery = rhs_result.as_operand()\n sql_value = f'({lhs_subquery} {comparison.op} {rhs_subquery})'\n sql_alias = 'comparison_'\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias=sql_alias), from_part=None)", "docstring": "Translates a FHIRPath comparison to Standard SQL.\n\nEach operand is expected to be a collection of a single element. Operands\ncan be strings, integers, decimals, dates, datetimes, and times. Comparison\nwill perform implicit conversion between applicable types.\n\nArgs:\ncomparison: The `Comparison` Expression node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"} {"code": "def add_delta_deltas(filterbanks, name=None):\n \n delta_filter = np.array([2, 1, 0, -1, -2])\n delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, \"full\")\n\n delta_filter_stack = np.array(\n [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2,\n list(delta_delta_filter)],\n dtype=np.float32).T[:, None, None, :]\n\n delta_filter_stack /= np.sqrt(\n np.sum(delta_filter_stack**2, axis=0, keepdims=True))\n\n filterbanks = tf.nn.conv2d(\n filterbanks, delta_filter_stack, [1, 1, 1, 1], \"SAME\", data_format=\"NHWC\",\n name=name)\n return filterbanks", "docstring": "Compute time first and second-order derivative channels.\n\nArgs:\nfilterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\nname: scope name\n\nReturns:\nfloat32 tensor with shape [batch_size, len, num_bins, 3]", "source": "juraj-google-style"} {"code": "def NormalizePath(path):\n \n path = os.path.normpath(path)\n\n for sys_path in sys.path:\n if not sys_path:\n continue\n\n \n sys_path = os.path.join(sys_path, '')\n\n if path.startswith(sys_path):\n return path[len(sys_path):]\n\n return path", "docstring": "Removes any Python system path prefix from the given path.\n\nPython keeps almost all paths absolute. This is not what we actually\nwant to return. This loops through system paths (directories in which\nPython will load modules). If \"path\" is relative to one of them, the\ndirectory prefix is removed.\n\nArgs:\npath: absolute path to normalize (relative paths will not be altered)\n\nReturns:\nRelative path if \"path\" is within one of the sys.path directories or\nthe input otherwise.", "source": "juraj-google-style"} {"code": "def to_json(self, is_admin=False):\n if is_admin:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': (True if (self.enabled == 1) else False), 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties}}\n else:\n return {'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts}", "docstring": "Returns a dict representation of the object\n\nArgs:\nis_admin (`bool`): If true, include information about the account that should be avaiable only to admins\n\nReturns:\n`dict`", "source": "codesearchnet"} {"code": "def gpio_properties(self):\n res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n if (res < 0):\n raise errors.JLinkException(res)\n num_props = res\n buf = (structs.JLinkGPIODescriptor * num_props)()\n res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n if (res < 0):\n raise errors.JLinkException(res)\n return list(buf)", "docstring": "Returns the properties of the user-controllable GPIOs.\n\nProvided the device supports user-controllable GPIOs, they will be\nreturned by this method.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA list of ``JLinkGPIODescriptor`` instances totalling the number of\nrequested properties.\n\nRaises:\nJLinkException: on error.", "source": "codesearchnet"} {"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n if token_ids_1 is not None:\n output += token_ids_1 + [self.sep_token_id]\n return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A SqueezeBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"} {"code": "def size():\n try:\n assert ((os != 'nt') and sys.stdout.isatty())\n (rows, columns) = os.popen('stty size', 'r').read().split()\n except (AssertionError, AttributeError, ValueError):\n (rows, columns) = (DEFAULT_HEIGHT, DEFAULT_WIDTH)\n return (int(rows), int(columns))", "docstring": "Determines the height and width of the console window\n\nReturns:\ntuple of int: The height in lines, then width in characters", "source": "codesearchnet"} {"code": "def project_texture_on_surface(texture, surface, angle=DEFAULT_ANGLE):\n projected_surface = project_surface(surface, angle)\n (texture_x, _) = texture\n texture_y = map_texture_to_surface(texture, projected_surface)\n return (texture_x, texture_y)", "docstring": "Maps a texture onto a surface, then projects to 2D and returns a layer.\n\nArgs:\ntexture (texture): the texture to project\nsurface (surface): the surface to project onto\nangle (float): the projection angle in degrees (0 = top-down, 90 = side view)\n\nReturns:\nlayer: A layer.", "source": "codesearchnet"} {"code": "def states():\n states = {}\n fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')\n with open(fname, 'rU') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n states[row[0]] = row[1]\n return states", "docstring": "Get a dictionary of Backpage city names mapped to their respective states.\n\nReturns:\ndictionary of Backpage city names mapped to their states", "source": "codesearchnet"} {"code": "def list_metadata(self, resource):\n \n self.metadata_service.set_auth(self._token_metadata)\n return self.metadata_service.list(resource)", "docstring": "List all keys associated with the given resource.\n\nArgs:\nresource (intern.resource.boss.BossResource)\n\nReturns:\n(list)\n\nRaises:\nrequests.HTTPError on a failure.", "source": "juraj-google-style"} {"code": "def __init__(self, distributed_variables=None, name=None, **unused_kwargs):\n if not ops.executing_eagerly_outside_functions():\n raise ValueError('PackedDistributedVariable should be created in eager mode.')\n if not distributed_variables:\n raise ValueError('Expect a non-empty list of variables to pack.')\n for i, var in enumerate(distributed_variables):\n if not resource_variable_ops.is_resource_variable(var):\n raise ValueError('Expect a list of ResourceVariables to pack, but the %d-th variable is %s' % (i, type(var)))\n self._distributed_variables = distributed_variables\n self._devices = [v.device for v in distributed_variables]\n with ops.init_scope():\n with ops.name_scope(name, 'Variable', skip_on_eager=False) as name:\n handle = ops.pack_eager_tensors([var.handle for var in distributed_variables])\n handle_name = ops.name_from_scope_name(name)\n unique_id = '%s_%d' % (handle_name, ops.uid())\n super(PackedDistributedVariable, self).__init__(trainable=distributed_variables[0].trainable, shape=distributed_variables[0].shape, dtype=distributed_variables[0].dtype, handle=handle, synchronization=distributed_variables[0].synchronization, constraint=distributed_variables[0].constraint, aggregation=distributed_variables[0].aggregation, distribute_strategy=distributed_variables[0]._distribute_strategy, name=name, unique_id=unique_id, handle_name=handle_name, graph_element=None, initial_value=None, initializer_op=None, is_initialized_op=None, cached_value=None, caching_device=None, is_distributed_variables=True)", "docstring": "Packs a list of variables which are distributed across devices.\n\nArgs:\ndistributed_variables: A list of distributed Variables to pack.\nname: Optional name for the variable. Defaults to `'Variable'` and gets\nuniquified automatically.", "source": "github-repos"} {"code": "def Validate(self, value):\n if (value is None):\n return None\n if (not isinstance(value, self.rdfclass)):\n try:\n r = self.rdfclass()\n r.FromDict(value)\n return r\n except (AttributeError, TypeError, rdfvalue.InitializeError):\n raise TypeValueError(('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)))\n return value", "docstring": "Validate the value.\n\nArgs:\nvalue: Value is expected to be a dict-like object that a given RDFStruct\ncan be initialized from.\n\nRaises:\nTypeValueError: If the value is not a valid dict-like object that a given\nRDFStruct can be initialized from.\n\nReturns:\nA valid instance of self.rdfclass or None.", "source": "codesearchnet"} {"code": "def load_project_tests(test_path, dot_env_path=None):\n \n \n debugtalk_path = locate_debugtalk_py(test_path)\n\n if debugtalk_path:\n \n project_working_directory = os.path.dirname(debugtalk_path)\n else:\n \n project_working_directory = os.getcwd()\n\n \n sys.path.insert(0, project_working_directory)\n\n \n \n \n \n dot_env_path = dot_env_path or os.path.join(project_working_directory, \".env\")\n project_mapping[\"env\"] = load_dot_env_file(dot_env_path)\n\n if debugtalk_path:\n \n debugtalk_functions = load_debugtalk_functions()\n else:\n debugtalk_functions = {}\n\n \n\n project_mapping[\"PWD\"] = project_working_directory\n built_in.PWD = project_working_directory\n project_mapping[\"functions\"] = debugtalk_functions\n\n \n tests_def_mapping[\"api\"] = load_api_folder(os.path.join(project_working_directory, \"api\"))\n tests_def_mapping[\"PWD\"] = project_working_directory", "docstring": "load api, testcases, .env, debugtalk.py functions.\napi/testcases folder is relative to project_working_directory\n\nArgs:\ntest_path (str): test file/folder path, locate pwd from this path.\ndot_env_path (str): specified .env file path\n\nReturns:\ndict: project loaded api/testcases definitions, environments and debugtalk.py functions.", "source": "juraj-google-style"} {"code": "def predict(fqdn, result, *argl, **argd):\n out = None\n if (len(argl) > 0):\n machine = argl[0]\n if isclassifier(machine):\n out = classify_predict(fqdn, result, None, *argl, **argd)\n elif isregressor(machine):\n out = regress_predict(fqdn, result, None, *argl, **argd)\n return out", "docstring": "Analyzes the result of a generic predict operation performed by\n`sklearn`.\n\nArgs:\nfqdn (str): full-qualified name of the method that was called.\nresult: result of calling the method with `fqdn`.\nargl (tuple): positional arguments passed to the method call.\nargd (dict): keyword arguments passed to the method call.", "source": "codesearchnet"} {"code": "def verify_path(path, is_collection):\n num_elements = len(path)\n if (num_elements == 0):\n raise ValueError('Document or collection path cannot be empty')\n if is_collection:\n if ((num_elements % 2) == 0):\n raise ValueError('A collection must have an odd number of path elements')\n elif ((num_elements % 2) == 1):\n raise ValueError('A document must have an even number of path elements')\n for element in path:\n if (not isinstance(element, six.string_types)):\n msg = BAD_PATH_TEMPLATE.format(element, type(element))\n raise ValueError(msg)", "docstring": "Verifies that a ``path`` has the correct form.\n\nChecks that all of the elements in ``path`` are strings.\n\nArgs:\npath (Tuple[str, ...]): The components in a collection or\ndocument path.\nis_collection (bool): Indicates if the ``path`` represents\na document or a collection.\n\nRaises:\nValueError: if\n\n* the ``path`` is empty\n* ``is_collection=True`` and there are an even number of elements\n* ``is_collection=False`` and there are an odd number of elements\n* an element is not a string", "source": "codesearchnet"} {"code": "def ParseMany(text):\n \n precondition.AssertType(text, Text)\n\n if compatibility.PY2:\n text = text.encode(\"utf-8\")\n\n return list(yaml.safe_load_all(text))", "docstring": "Parses many YAML documents into a list of Python objects.\n\nArgs:\ntext: A YAML source with multiple documents embedded.\n\nReturns:\nA list of Python data structures corresponding to the YAML documents.", "source": "juraj-google-style"} {"code": "def success(channel, post):\n datapacks = [('Game', post[0], True), ('Upvotes', post[2], True)]\n gui = ui_embed.UI(channel, 'Link', post[1], modulename=modulename, colour=16746496, thumbnail=post[1], datapacks=datapacks)\n return gui", "docstring": "Creates an embed UI containing the Reddit posts\n\nArgs:\nchannel (discord.Channel): The Discord channel to bind the embed to\npost (tuple): Tuples of (field, value, percentile)\n\nReturns:", "source": "codesearchnet"} {"code": "def distance(self, other):\n return distance(self.lat, self.lon, None, other.lat, other.lon, None)", "docstring": "Distance between points\n\nArgs:\nother (:obj:`Point`)\nReturns:\nfloat: Distance in km", "source": "codesearchnet"} {"code": "def _AddForwardedIps(self, forwarded_ips, interface):\n \n for address in forwarded_ips:\n self.ip_forwarding_utils.AddForwardedIp(address, interface)", "docstring": "Configure the forwarded IP address on the network interface.\n\nArgs:\nforwarded_ips: list, the forwarded IP address strings to configure.\ninterface: string, the output device to use.", "source": "juraj-google-style"} {"code": "def get_cpu_props(cls, family, arch='x86'):\n \n\n cpus = cls.get_cpus_by_arch(arch)\n try:\n return cpus.xpath('model[@name=\"{0}\"]'.format(family))[0]\n except IndexError:\n raise LagoException('No such CPU family: {0}'.format(family))", "docstring": "Get CPU info XML\n\nArgs:\nfamily(str): CPU family\narch(str): CPU arch\n\nReturns:\nlxml.etree.Element: CPU xml\n\nRaises:\n:exc:`~LagoException`: If no such CPU family exists", "source": "juraj-google-style"} {"code": "def assert_lines_equal_ignoring_whitespace(test, expected_lines, actual_lines):\n test.assertEqual(len(expected_lines), len(actual_lines), 'Mismatch in the number of lines: %d vs %d' % (len(expected_lines), len(actual_lines)))\n for expected_line, actual_line in zip(expected_lines, actual_lines):\n test.assertEqual(''.join(expected_line.split()), ''.join(actual_line.split()))", "docstring": "Assert equality in lines, ignoring all whitespace.\n\nArgs:\ntest: An instance of unittest.TestCase or its subtypes (e.g.,\nTensorFlowTestCase).\nexpected_lines: Expected lines as an iterable of strings.\nactual_lines: Actual lines as an iterable of strings.", "source": "github-repos"} {"code": "def __init__(self, proj_info):\n \n self._proj_info = proj_info\n self.__docfolder = DOC_FOLDER\n self.__htmlfolder = HTML_FOLDER\n\n self.conf_fpath = os.path.abspath(\n os.path.join(self.__docfolder, 'conf.py'))\n self.code_fdpath = os.path.abspath(\n os.path.join(SRC_FOLDER, self.proj_info.project_name))\n\n self._sphinx_quickstart_cmd = [\n 'sphinx-quickstart', self.__docfolder, '-p',\n self.proj_info.project_name, '-a', self.proj_info.author_fakename,\n '-v', self.proj_info.project_version, '-r',\n self.proj_info.project_version, '-l', 'en', '--ext-autodoc',\n '--makefile', '--quiet'\n ]\n self._sphinx_apidoc_cmd = [\n 'sphinx-apidoc', self.code_fdpath, '-o', self.__docfolder, '-M',\n '--force'\n ]\n\n \n self._sphinx_buildhtml_cmd = [\n 'sphinx-build', '-b', 'html', self.__docfolder, self.__htmlfolder\n ]\n\n \n mkdir_exist(self.__docfolder)\n mkdir_exist(self.__htmlfolder)", "docstring": "TODO: to be defined1.\n\nArgs:\nproj_info (ProjectInfo): TODO", "source": "juraj-google-style"} {"code": "def populate_audit_fields(self, event):\n \n event.updated = self._data\n event.original = self.get_original()._data", "docstring": "Populates the the audit JSON fields with raw data from the model, so\nall changes can be tracked and diffed.\n\nArgs:\nevent (Event): The Event instance to attach the data to\ninstance (fleaker.db.Model): The newly created/updated model", "source": "juraj-google-style"} {"code": "def _validate_isvalid_composition(self, isvalid_composition, field, value):\n \n sum_amount = 0.0\n if value['kind'] in ['mass fraction', 'mole fraction']:\n low_lim = 0.0\n up_lim = 1.0\n total_amount = 1.0\n elif value['kind'] in ['mole percent']:\n low_lim = 0.0\n up_lim = 100.0\n total_amount = 100.0\n else:\n self._error(field, 'composition kind must be \"mole percent\", \"mass fraction\", or '\n '\"mole fraction\"')\n return False\n\n for sp in value['species']:\n amount = sp['amount'][0]\n sum_amount += amount\n\n \n if amount < low_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be greater than {:.1f}'.format(low_lim)\n )\n elif amount > up_lim:\n self._error(field, 'Species ' + sp['species-name'] + ' ' +\n value['kind'] + ' must be less than {:.1f}'.format(up_lim)\n )\n\n \n if not np.isclose(total_amount, sum_amount):\n self._error(field, 'Species ' + value['kind'] +\n 's do not sum to {:.1f}: '.format(total_amount) +\n '{:f}'.format(sum_amount)\n )", "docstring": "Checks for valid specification of composition.\n\nArgs:\nisvalid_composition (bool): flag from schema indicating\ncomposition to be checked.\nfield (str): 'composition'\nvalue (dict): dictionary of composition\n\nThe rule's arguments are validated against this schema:\n{'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "juraj-google-style"} {"code": "def _get_url_scheme_regexes():\n output = sh(\"hg showconfig | grep '^schemes.'\", shell=True).split('\\n')\n log.debug(output)\n schemes = (l.split('.', 1)[1].split('=') for l in output if ('=' in l))\n regexes = sorted(((k, v, re.compile((v.replace('{1}', '(.*)') + '(.*)'))) for (k, v) in schemes), key=(lambda x: (len(x[0]), x)), reverse=True)\n return regexes", "docstring": "Get configured mercurial schemes and convert them to regexes\n\nReturns:\ntuple: (scheme_name, scheme_value, compiled scheme_regex)", "source": "codesearchnet"} {"code": "def increment_lessons(self, measure_vals, reward_buff_sizes=None):\n ret = {}\n if reward_buff_sizes:\n for (brain_name, buff_size) in reward_buff_sizes.items():\n if self._lesson_ready_to_increment(brain_name, buff_size):\n measure_val = measure_vals[brain_name]\n ret[brain_name] = self.brains_to_curriculums[brain_name].increment_lesson(measure_val)\n else:\n for (brain_name, measure_val) in measure_vals.items():\n ret[brain_name] = self.brains_to_curriculums[brain_name].increment_lesson(measure_val)\n return ret", "docstring": "Attempts to increments all the lessons of all the curriculums in this\nMetaCurriculum. Note that calling this method does not guarantee the\nlesson of a curriculum will increment. The lesson of a curriculum will\nonly increment if the specified measure threshold defined in the\ncurriculum has been reached and the minimum number of episodes in the\nlesson have been completed.\n\nArgs:\nmeasure_vals (dict): A dict of brain name to measure value.\nreward_buff_sizes (dict): A dict of brain names to the size of their\ncorresponding reward buffers.\n\nReturns:\nA dict from brain name to whether that brain's lesson number was\nincremented.", "source": "codesearchnet"} {"code": "def current_api_key():\n if app.config.get('IGNORE_AUTH'):\n return models.ApiKey(id='anonymous_superuser', secret='', superuser=True)\n ops = _get_api_key_ops()\n api_key = ops.get()\n logging.debug('Authenticated as API key=%r', api_key.id)\n return api_key", "docstring": "Determines the API key for the current request.\n\nReturns:\nThe ApiKey instance.", "source": "codesearchnet"} {"code": "def assert_finite(x, data=None, summarize=None, message=None, name=None):\n with tf.compat.v2.name_scope((name or 'assert_finite')):\n x_ = tf.get_static_value(x)\n if (x_ is not None):\n if (~ np.all(np.isfinite(x_))):\n raise ValueError(message)\n return x\n assertion = tf.compat.v1.assert_equal(tf.math.is_finite(x), tf.ones_like(x, tf.bool), data=data, summarize=summarize, message=message)\n with tf.control_dependencies([assertion]):\n return tf.identity(x)", "docstring": "Assert all elements of `x` are finite.\n\nArgs:\nx: Numeric `Tensor`.\ndata: The tensors to print out if the condition is False. Defaults to\nerror message and first few entries of `x`.\nsummarize: Print this many entries of each tensor.\nmessage: A string to prefix to the default message.\nname: A name for this operation (optional).\nDefaults to \"assert_finite\".\n\nReturns:\nOp raising `InvalidArgumentError` unless `x` has specified rank or lower.\nIf static checks determine `x` has correct rank, a `no_op` is returned.\n\nRaises:\nValueError: If static checks determine `x` has wrong rank.", "source": "codesearchnet"} {"code": "def encode_message(self, message):\n \n message.check_initialized()\n\n return json.dumps(message, cls=MessageJSONEncoder,\n protojson_protocol=self)", "docstring": "Encode Message instance to JSON string.\n\nArgs:\nMessage instance to encode in to JSON string.\n\nReturns:\nString encoding of Message instance in protocol JSON format.\n\nRaises:\nmessages.ValidationError if message is not initialized.", "source": "juraj-google-style"} {"code": "class RootMeanSquaredError(reduction_metrics.Mean):\n\n def __init__(self, name='root_mean_squared_error', dtype=None):\n super().__init__(name, dtype=dtype)\n self._direction = 'down'\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n \n y_true = ops.convert_to_tensor(y_true, self._dtype)\n y_pred = ops.convert_to_tensor(y_pred, self._dtype)\n y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n error_sq = ops.square(y_pred - y_true)\n return super().update_state(error_sq, sample_weight=sample_weight)\n\n def result(self):\n return ops.sqrt(super().result())", "docstring": "Computes root mean squared error metric between `y_true` and `y_pred`.\n\nFormula:\n\n```python\nloss = sqrt(mean((y_pred - y_true) ** 2))\n```\n\nArgs:\nname: (Optional) string name of the metric instance.\ndtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.RootMeanSquaredError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result()\n0.5\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n... sample_weight=[1, 0])\n>>> m.result()\n0.70710677\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\noptimizer='sgd',\nloss='mse',\nmetrics=[keras.metrics.RootMeanSquaredError()])\n```", "source": "github-repos"} {"code": "def send_notifications(self, notification_type, *args):\n \n\n if notification_type in self.notifications:\n for notification_id, callback in self.notifications[notification_type]:\n try:\n callback(*args)\n except:\n self.logger.exception('Problem calling notify callback!')", "docstring": "Fires off the notification for the specific event. Uses var args to pass in a\narbitrary list of parameter according to which notification type was fired.\n\nArgs:\nnotification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)\nargs: variable list of arguments to the callback.", "source": "juraj-google-style"} {"code": "def _get_node(self, token: str) -> dict:\n node = self.data\n for char in token:\n if char not in node:\n break\n node = node[char]\n return node", "docstring": "Retrieves the node corresponding to the given token in the Trie.\n\nArgs:\ntoken (str): The token for which the corresponding node needs to be retrieved.\n\nReturns:\ndict: The node in the Trie corresponding to the given token.", "source": "github-repos"} {"code": "def matrix_rank(a, tol=None, validate_args=False, name=None):\n with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]):\n a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a')\n assertions = _maybe_validate_matrix(a, validate_args)\n if assertions:\n with tf.control_dependencies(assertions):\n a = tf.identity(a)\n s = tf.linalg.svd(a, compute_uv=False)\n if (tol is None):\n if a.shape[(- 2):].is_fully_defined():\n m = np.max(a.shape[(- 2):].as_list())\n else:\n m = tf.reduce_max(input_tensor=tf.shape(input=a)[(- 2):])\n eps = np.finfo(a.dtype.as_numpy_dtype).eps\n tol = ((eps * tf.cast(m, a.dtype)) * tf.reduce_max(input_tensor=s, axis=(- 1), keepdims=True))\n return tf.reduce_sum(input_tensor=tf.cast((s > tol), tf.int32), axis=(- 1))", "docstring": "Compute the matrix rank; the number of non-zero SVD singular values.\n\nArguments:\na: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be\npseudo-inverted.\ntol: Threshold below which the singular value is counted as \"zero\".\nDefault value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).\nvalidate_args: When `True`, additional assertions might be embedded in the\ngraph.\nDefault value: `False` (i.e., no graph assertions are added).\nname: Python `str` prefixed to ops created by this function.\nDefault value: \"matrix_rank\".\n\nReturns:\nmatrix_rank: (Batch of) `int32` scalars representing the number of non-zero\nsingular values.", "source": "codesearchnet"} {"code": "def stat(self, follow_symlinks=True):\n \n return self._system.stat(\n path=self._path, client_kwargs=self._client_kwargs,\n header=self._header)", "docstring": "Return a stat_result object for this entry.\n\nThe result is cached on the os.DirEntry object.\n\nArgs:\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nReturns:\nos.stat_result: Stat result object", "source": "juraj-google-style"} {"code": "def _get_request(self, auth=None):\n self.request = HSRequest((auth or self.auth), self.env)\n self.request.response_callback = self.response_callback\n return self.request", "docstring": "Return an http request object\n\nauth: Auth data to use\n\nReturns:\nA HSRequest object", "source": "codesearchnet"} {"code": "def _process(self, input):\n input = re.sub('<[^>]*>', ' ', input)\n punct = list(string.punctuation)\n for symbol in punct:\n input = input.replace(symbol, (' %s ' % symbol))\n input = filter((lambda x: (x != u'')), input.lower().split(' '))\n return input", "docstring": "Takes in html-mixed body text as a string and returns a list of strings,\nlower case and with punctuation given spacing.\n\nCalled by self._gen_sentence()\n\nArgs:\ninpnut (string): body text", "source": "codesearchnet"} {"code": "def format_to_string(self, pretty: bool=False) -> str:\n trace = {}\n trace['traceEvents'] = self._metadata + self._events\n if pretty:\n return json.dumps(trace, indent=4, separators=(',', ': '))\n else:\n return json.dumps(trace, separators=(',', ':'))", "docstring": "Formats the chrome trace to a string.\n\nArgs:\npretty: (Optional.) If True, produce human-readable JSON output.\n\nReturns:\nA JSON-formatted string in Chrome Trace format.", "source": "github-repos"} {"code": "def resolve(self, file_path, follow_symlinks=True, allow_fd=False):\n \n if isinstance(file_path, int):\n if allow_fd and sys.version_info >= (3, 3):\n return self.get_open_file(file_path).get_object()\n raise TypeError('path should be string, bytes or '\n 'os.PathLike (if supported), not int')\n\n if follow_symlinks:\n file_path = make_string_path(file_path)\n return self.get_object_from_normpath(self.resolve_path(file_path))\n return self.lresolve(file_path)", "docstring": "Search for the specified filesystem object, resolving all links.\n\nArgs:\nfile_path: Specifies the target FakeFile object to retrieve.\nfollow_symlinks: If `False`, the link itself is resolved,\notherwise the object linked to.\nallow_fd: If `True`, `file_path` may be an open file descriptor\n\nReturns:\nThe FakeFile object corresponding to `file_path`.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"} {"code": "def concatenate(self, other):\n other = as_shape(other)\n if ((self._dims is None) or (other.dims is None)):\n return unknown_shape()\n else:\n return TensorShape((self._dims + other.dims))", "docstring": "Returns the concatenation of the dimension in `self` and `other`.\n\n*N.B.* If either `self` or `other` is completely unknown,\nconcatenation will discard information about the other shape. In\nfuture, we might support concatenation that preserves this\ninformation for use with slicing.\n\nArgs:\nother: Another `TensorShape`.\n\nReturns:\nA `TensorShape` whose dimensions are the concatenation of the\ndimensions in `self` and `other`.", "source": "codesearchnet"} {"code": "def get_policies(self):\n prefix = (_IDENTITY_NS + _POLICY_NS)\n policylist_list = [_create_from_bytes(d, identity_pb2.PolicyList) for (_, d) in self._state_view.leaves(prefix=prefix)]\n policies = []\n for policy_list in policylist_list:\n for policy in policy_list.policies:\n policies.append(policy)\n return sorted(policies, key=(lambda p: p.name))", "docstring": "Returns all the Policies under the Identity namespace.\n\nReturns:\n(list): A list containing all the Policies under the Identity\nnamespace.", "source": "codesearchnet"} {"code": "def chr_range(*args, **kw):\n if (len(args) == 1):\n (stop,) = args\n (start, step) = (0, 1)\n elif (len(args) == 2):\n (start, stop) = args\n step = 1\n elif (len(args) == 3):\n (start, stop, step) = args\n else:\n raise ValueError('incorrect args')\n chr_ = six.unichr\n base = ord(kw.get('base', 'a'))\n if isinstance(start, int):\n start = (base + start)\n if isinstance(stop, int):\n stop = (base + stop)\n if isinstance(start, six.string_types):\n start = ord(start)\n if isinstance(stop, six.string_types):\n stop = ord(stop)\n if (step is None):\n step = 1\n list_ = list(map(six.text_type, map(chr_, range(start, stop, step))))\n return list_", "docstring": "r\"\"\"\nLike range but returns characters\n\nArgs:\nstart (None): (default = None)\nstop (None): (default = None)\nstep (None): (default = None)\n\nKwargs:\nbase (str): charater to start with (default='a')\n\nReturns:\nlist: list of characters\n\nCommandLine:\npython -m utool.util_str --exec-chr_range\n\nExample:\n>>> # ENABLE_DOCTEST\n>>> from utool.util_str import * # NOQA\n>>> import utool as ut\n>>> args = (5,)\n>>> result = ut.repr2(chr_range(2, base='a'))\n>>> print(chr_range(0, 5))\n>>> print(chr_range(0, 50))\n>>> print(chr_range(0, 5, 2))\n>>> print(result)\n['a', 'b']", "source": "codesearchnet"} {"code": "def create_and_fill_np_array(start_or_end_logits, dataset, max_len):\n step = 0\n logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)\n for i, output_logit in enumerate(start_or_end_logits):\n batch_size = output_logit.shape[0]\n cols = output_logit.shape[1]\n if step + batch_size < len(dataset):\n logits_concat[step:step + batch_size, :cols] = output_logit\n else:\n logits_concat[step:, :cols] = output_logit[:len(dataset) - step]\n step += batch_size\n return logits_concat", "docstring": "Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor\n\nArgs:\nstart_or_end_logits(:obj:`tensor`):\nThis is the output predictions of the model. We can only enter either start or end logits.\neval_dataset: Evaluation dataset\nmax_len(:obj:`int`):\nThe maximum length of the output tensor. ( See the model.eval() part for more details )", "source": "github-repos"} {"code": "def is_attribute_applicable_to_object_type(self, attribute, object_type):\n rule_set = self._attribute_rule_sets.get(attribute)\n if (object_type in rule_set.applies_to_object_types):\n return True\n else:\n return False", "docstring": "Check if the attribute is supported by the given object type.\n\nArgs:\nattribute (string): The name of the attribute (e.g., 'Name').\nRequired.\nobject_type (ObjectType): An ObjectType enumeration\n(e.g., ObjectType.SYMMETRIC_KEY). Required.\nReturns:\nbool: True if the attribute is applicable to the object type.\nFalse otherwise.", "source": "codesearchnet"} {"code": "def _new_ass_hierarchy(self, file_ass):\n ret_struct = {'source': '', 'subhierarchy': {}, 'attrs': {}, 'snippets': {}}\n ret_struct['source'] = file_ass['source']\n self._ass_refresh_attrs(ret_struct, file_ass)\n for (name, subhierarchy) in file_ass['subhierarchy'].items():\n ret_struct['subhierarchy'][name] = self._new_ass_hierarchy(subhierarchy)\n return ret_struct", "docstring": "Returns a completely new cache hierarchy for given assistant file.\n\nArgs:\nfile_ass: the assistant from filesystem hierarchy to create cache hierarchy for\n(for format see what refresh_role accepts)\nReturns:\nthe newly created cache hierarchy", "source": "codesearchnet"} {"code": "def sample(self, n_samples):\n \n if self.tau > 1 or self.tau < -1:\n raise ValueError(\"The range for correlation measure is [-1,1].\")\n\n v = np.random.uniform(0, 1, n_samples)\n c = np.random.uniform(0, 1, n_samples)\n\n u = self.percent_point(c, v)\n return np.column_stack((u, v))", "docstring": "Generate specified `n_samples` of new data from model. `v~U[0,1],v~C^-1(u|v)`\n\nArgs:\nn_samples: `int`, amount of samples to create.\n\nReturns:\nnp.ndarray: Array of length `n_samples` with generated data from the model.", "source": "juraj-google-style"} {"code": "def _construct_punctuation_token(self, d: Dict, nlp) -> List[Dict]:\n \n\n result = []\n if not d[\"token\"]:\n this_token = {attrs.IS_PUNCT: True}\n elif len(d[\"token\"]) == 1:\n this_token = {attrs.ORTH: d[\"token\"][0]}\n else:\n global FLAG_ID\n punct_set = set(d[\"token\"])\n\n def is_selected_punct(x):\n return x in punct_set\n\n FLAG_DICT[FLAG_ID] = nlp.vocab.add_flag(is_selected_punct)\n this_token = {FLAG_DICT[FLAG_ID]: True}\n FLAG_ID += 1\n result.append(this_token)\n result = self._add_common_constrain(result, d)\n return result", "docstring": "Construct a shape token\nArgs:\nd: Dict\nnlp\n\nReturns: List[Dict]", "source": "juraj-google-style"} {"code": "def condense(input_string):\n try:\n assert isinstance(input_string, basestring)\n except AssertionError:\n raise TypeError\n removed_leading_whitespace = re.sub('>\\\\s+', '>', input_string).strip()\n removed_trailing_whitespace = re.sub('\\\\s+<', '<', removed_leading_whitespace).strip()\n return removed_trailing_whitespace", "docstring": "Trims leadings and trailing whitespace between tags in an html document\n\nArgs:\ninput_string: A (possible unicode) string representing HTML.\n\nReturns:\nA (possibly unicode) string representing HTML.\n\nRaises:\nTypeError: Raised if input_string isn't a unicode string or string.", "source": "codesearchnet"} {"code": "def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int:\n return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\nThe number of bytes of data for a batch of numpy arrays.", "source": "github-repos"} {"code": "def reorder_resource_views(self, resource_views):\n if (not isinstance(resource_views, list)):\n raise HDXError('ResourceViews should be a list!')\n ids = list()\n for resource_view in resource_views:\n if isinstance(resource_view, str):\n resource_view_id = resource_view\n else:\n resource_view_id = resource_view['id']\n if (is_valid_uuid(resource_view_id) is False):\n raise HDXError(('%s is not a valid resource view id!' % resource_view))\n ids.append(resource_view_id)\n (_, result) = self._read_from_hdx('resource view', self.data['id'], 'id', ResourceView.actions()['reorder'], order=ids)", "docstring": "Order resource views in resource.\n\nArgs:\nresource_views (List[Union[ResourceView,Dict,str]]): A list of either resource view ids or resource views metadata from ResourceView objects or dictionaries\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def tokens(self, tokenset='internal'):\n toks = self.get('tokens', {}).get(tokenset)\n if (toks is not None):\n if isinstance(toks, stringtypes):\n toks = YyTokenLattice.from_string(toks)\n elif isinstance(toks, Sequence):\n toks = YyTokenLattice.from_list(toks)\n return toks", "docstring": "Deserialize and return a YyTokenLattice object for the\ninitial or internal token set, if provided, from the YY\nformat or the JSON-formatted data; otherwise return the\noriginal string.\n\nArgs:\ntokenset (str): return `'initial'` or `'internal'` tokens\n(default: `'internal'`)\nReturns:\n:class:`YyTokenLattice`", "source": "codesearchnet"} {"code": "def c_to_f(temperature):\n if temperature is None:\n return None\n return temperature * 9 / 5 + 32", "docstring": "Converts temperature from celcius to fahrenheit\n\nArgs:\ntemperature: floating point representing the temperature in celcius\nReturns: temperature in fahrenheit", "source": "github-repos"} {"code": "def get_files_in_branch(profile, branch_sha):\n tree_sha = get_commit_tree(profile, branch_sha)\n files = get_files_in_tree(profile, tree_sha)\n tree = [prepare(x) for x in files]\n return tree", "docstring": "Get all files in a branch's tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nbranch_sha\nThe SHA a branch's HEAD points to.\n\nReturns:\nA list of dicts containing info about each blob in the tree.", "source": "codesearchnet"} {"code": "def top_1_tpu(inputs):\n \n inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True)\n mask = tf.to_int32(tf.equal(inputs_max, inputs))\n index = tf.range(tf.shape(inputs)[-1]) * mask\n return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1)", "docstring": "find max and argmax over the last dimension.\n\nWorks well on TPU\n\nArgs:\ninputs: A tensor with shape [..., depth]\n\nReturns:\nvalues: a Tensor with shape [...]\nindices: a Tensor with shape [...]", "source": "juraj-google-style"} {"code": "def validate_inputs(x, y):\n if isinstance(x, iterator_ops.Iterator) or isinstance(y, iterator_ops.Iterator):\n raise ValueError('`DistributionStrategy` does not support inputs of type Iterator. You must pass a `tf.data.Dataset` object or a numpy array as input.')", "docstring": "Validate inputs when using DistributionStrategy.\n\nArgs:\nx: Model Inputs.\ny: Model Targets.\n\nRaises:\nValueError: if input is not a Dataset or a numpy array(when we use\nMirroredStrategy).", "source": "github-repos"} {"code": "def _get_environment_updates(self, display_all_distributions=False):\n updates = []\n for distribution in self.pip.get_installed_distributions():\n versions = self.get_available_versions(distribution.project_name)\n max_version = (max(versions.keys()) if versions else UNKNOW_NUM)\n update = None\n distribution_version = self._parse_version(distribution.version)\n if (versions and (max_version > distribution_version)):\n update = Update(distribution.project_name, distribution.version, versions[max_version], prelease=max_version[(- 1)])\n elif (display_all_distributions and (max_version == distribution_version)):\n update = Update(distribution.project_name, distribution.version, versions[max_version])\n elif display_all_distributions:\n update = Update(distribution.project_name, distribution.version, UNKNOWN)\n if update:\n updates.append(update)\n return sorted(updates, key=(lambda x: x.name))", "docstring": "Check all pacakges installed in the environment to see if there are\nany updates availalble.\n\nArgs:\ndisplay_all_distributions (bool): Return distribution even if it is\nup-to-date. Defaults to ``False``.\n\nReturns:\nlist: A list of Update objects ordered based on ``instance.name``.", "source": "codesearchnet"} {"code": "def experimental_make_numpy_dataset(self, numpy_input, session=None):\n _require_cross_replica_or_default_context_extended(self)\n return self._experimental_make_numpy_dataset(numpy_input, session=session)", "docstring": "Makes a dataset for input provided via a numpy array.\n\nThis avoids adding `numpy_input` as a large constant in the graph,\nand copies the data to the machine or machines that will be processing\nthe input.\n\nArgs:\nnumpy_input: A nest of NumPy input arrays that will be distributed evenly\nacross all replicas. Note that lists of Numpy arrays are stacked, as\nthat is normal `tf.data.Dataset` behavior.\nsession: (TensorFlow v1.x graph execution only) A session used for\ninitialization.\n\nReturns:\nA `tf.data.Dataset` representing `numpy_input`.", "source": "github-repos"} {"code": "def default_pass_manager(basis_gates, coupling_map, initial_layout, seed_transpiler):\n pass_manager = PassManager()\n pass_manager.property_set['layout'] = initial_layout\n pass_manager.append(Unroller(basis_gates))\n pass_manager.append(TrivialLayout(coupling_map), condition=(lambda property_set: (not property_set['layout'])))\n pass_manager.append(CheckMap(coupling_map))\n pass_manager.append(DenseLayout(coupling_map), condition=(lambda property_set: (not property_set['is_swap_mapped'])))\n pass_manager.append(FullAncillaAllocation(coupling_map))\n pass_manager.append(EnlargeWithAncilla())\n pass_manager.append(Unroll3qOrMore())\n pass_manager.append(LegacySwap(coupling_map, trials=20, seed=seed_transpiler))\n pass_manager.append(Decompose(SwapGate))\n pass_manager.append(CXDirection(coupling_map))\n pass_manager.append(Unroller(['u1', 'u2', 'u3', 'id', 'cx']))\n simplification_passes = [Optimize1qGates(), CXCancellation(), RemoveResetInZeroState()]\n pass_manager.append((simplification_passes + [Depth(), FixedPoint('depth')]), do_while=(lambda property_set: (not property_set['depth_fixed_point'])))\n return pass_manager", "docstring": "The default pass manager that maps to the coupling map.\n\nArgs:\nbasis_gates (list[str]): list of basis gate names supported by the target.\ncoupling_map (CouplingMap): coupling map to target in mapping.\ninitial_layout (Layout or None): initial layout of virtual qubits on physical qubits\nseed_transpiler (int or None): random seed for stochastic passes.\n\nReturns:\nPassManager: A pass manager to map and optimize.", "source": "codesearchnet"} {"code": "def _get_match(self, key):\n \n\n return self._get_string_match(key=key) or \\\n self._get_non_string_match(key=key)", "docstring": "Gets a MatchObject for the given key.\n\nArgs:\nkey (str): Key of the property to look-up.\n\nReturn:\nMatchObject: The discovered match.", "source": "juraj-google-style"} {"code": "def _ParseDataStreamWithParser(\n self, parser_mediator, parser, file_entry, data_stream_name):\n \n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n raise RuntimeError(\n 'Unable to retrieve file-like object from file entry.')\n\n try:\n self._ParseFileEntryWithParser(\n parser_mediator, parser, file_entry, file_object=file_object)\n\n finally:\n file_object.close()", "docstring": "Parses a data stream of a file entry with a specific parser.\n\nArgs:\nparser_mediator (ParserMediator): parser mediator.\nparser (BaseParser): parser.\nfile_entry (dfvfs.FileEntry): file entry.\ndata_stream_name (str): data stream name.\n\nRaises:\nRuntimeError: if the file-like object is missing.", "source": "juraj-google-style"} {"code": "def Process(self, parser_mediator, root_item=None, **kwargs):\n \n \n super(DocumentSummaryInformationOLECFPlugin, self).Process(\n parser_mediator, **kwargs)\n\n if not root_item:\n raise ValueError('Root item not set.')\n\n root_creation_time, root_modification_time = self._GetTimestamps(root_item)\n\n for item_name in self.REQUIRED_ITEMS:\n item = root_item.get_sub_item_by_name(item_name)\n if not item:\n continue\n\n summary_information = OLECFDocumentSummaryInformation(item)\n event_data = summary_information.GetEventData(\n data_type='olecf:document_summary_info')\n event_data.name = 'Document Summary Information'\n\n if root_creation_time:\n date_time = dfdatetime_filetime.Filetime(\n timestamp=root_creation_time)\n event = OLECFDocumentSummaryInformationEvent(\n date_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n if root_modification_time:\n date_time = dfdatetime_filetime.Filetime(\n timestamp=root_modification_time)\n event = OLECFDocumentSummaryInformationEvent(\n date_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a document summary information OLECF item.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nroot_item (Optional[pyolecf.item]): root item of the OLECF file.\n\nRaises:\nValueError: If the root item is not set.", "source": "juraj-google-style"} {"code": "async def send_message(self, name, level, message):\n \n\n if name not in self.services:\n raise ArgumentError(\"Unknown service name\", short_name=name)\n\n msg = self.services[name]['state'].post_message(level, message)\n await self._notify_update(name, 'new_message', msg.to_dict())", "docstring": "Post a message for a service.\n\nArgs:\nname (string): The short name of the service to query\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents", "source": "juraj-google-style"} {"code": "def compiled_sub_dn(self, prepend):\n \n prepend = prepend.strip()\n if prepend == '':\n return self.config.get('LDAP_BASE_DN')\n return '{prepend},{base}'.format(\n prepend=prepend,\n base=self.config.get('LDAP_BASE_DN')\n )", "docstring": "Returns:\nstr: A DN with the DN Base appended to the end.\n\nArgs:\nprepend (str): The dn to prepend to the base.", "source": "juraj-google-style"} {"code": "def removeUserGroups(self, users=None):\n \n admin = None\n userCommunity = None\n portal = None\n groupAdmin = None\n user = None\n userCommData = None\n group = None\n try:\n\n admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler)\n if users is None:\n print (\"You have selected to remove all users groups, you must modify the code to do this\")\n usersObj = []\n commUsers = admin.portals.portalSelf.users(start=1, num=100)\n usersObj = commUsers['users']\n\n return\n else:\n usersObj = []\n userStr = users.split(',')\n for user in userStr:\n try:\n user = admin.community.users.user(str(user).strip())\n usersObj.append(user)\n except:\n print (\"%s does not exist\" % str(user).strip())\n if usersObj:\n for userCommData in usersObj:\n print (\"Loading groups for user: %s\" % userCommData.username)\n\n if userCommData.groups:\n for group in userCommData.groups:\n groupObj = admin.community.groups.group(groupId=group['id'])\n if groupObj.owner == userCommData.username:\n print (groupObj.delete())\n else:\n print (\"No Groups Found\")\n except:\n line, filename, synerror = trace()\n raise common.ArcRestHelperError({\n \"function\": \"removeUserGroups\",\n \"line\": line,\n \"filename\": filename,\n \"synerror\": synerror,\n }\n )\n finally:\n admin = None\n userCommunity = None\n portal = None\n groupAdmin = None\n user = None\n userCommData = None\n group = None\n\n del admin\n del userCommunity\n del portal\n del groupAdmin\n del user\n del userCommData\n del group\n\n gc.collect()", "docstring": "Removes users' groups.\n\nArgs:\nusers (str): A comma delimited list of user names.\nDefaults to ``None``.\n\nWarning:\nWhen ``users`` is not provided (``None``), all users\nin the organization will have their groups deleted!", "source": "juraj-google-style"} {"code": "def find_in_mailbox(cls, session, mailbox_or_id):\n if hasattr(mailbox_or_id, 'id'):\n mailbox_or_id = mailbox_or_id.id\n return cls(('/mailboxes/%d/users.json' % mailbox_or_id), session=session)", "docstring": "Get the users that are associated to a Mailbox.\n\nArgs:\nsession (requests.sessions.Session): Authenticated session.\nmailbox_or_id (MailboxRef or int): Mailbox of the ID of the\nmailbox to get the folders for.\n\nReturns:\nRequestPaginator(output_type=helpscout.models.User): Users\niterator.", "source": "codesearchnet"} {"code": "def decode_array(bytestring: bytes) -> np.ndarray:\n return tf.make_ndarray(_CLS.FromString(bytestring))", "docstring": "Decodes a bytestring into a numpy array.\n\nThe bytestring should be a serialized `TensorProto` instance. For more details\nsee `tf.make_tensor_proto`.\n\nArgs:\nbytestring: The serialized `TensorProto`.\n\nReturns:\nA numpy array.", "source": "github-repos"} {"code": "def decode_field(self, field, value):\n \n if isinstance(field, messages.EnumField):\n try:\n return field.type(value)\n except TypeError:\n raise messages.DecodeError(\n 'Invalid enum value \"%s\"' % (value or ''))\n\n elif isinstance(field, messages.BytesField):\n try:\n return base64.b64decode(value)\n except (binascii.Error, TypeError) as err:\n raise messages.DecodeError('Base64 decoding error: %s' % err)\n\n elif isinstance(field, message_types.DateTimeField):\n try:\n return util.decode_datetime(value)\n except ValueError as err:\n raise messages.DecodeError(err)\n\n elif (isinstance(field, messages.MessageField) and\n issubclass(field.type, messages.Message)):\n return self.__decode_dictionary(field.type, value)\n\n elif (isinstance(field, messages.FloatField) and\n isinstance(value, (six.integer_types, six.string_types))):\n try:\n return float(value)\n except: \n pass\n\n elif (isinstance(field, messages.IntegerField) and\n isinstance(value, six.string_types)):\n try:\n return int(value)\n except: \n pass\n\n return value", "docstring": "Decode a JSON value to a python value.\n\nArgs:\nfield: A ProtoRPC field instance.\nvalue: A serialized JSON value.\n\nReturn:\nA Python value compatible with field.", "source": "juraj-google-style"} {"code": "def pnum_to_group(mesh_shape, group_dims, pnum):\n coord = pnum_to_processor_coordinates(mesh_shape, pnum)\n remaining_shape = Shape([d for (i, d) in enumerate(mesh_shape) if (i not in group_dims)])\n remaining_coord = [d for (i, d) in enumerate(coord) if (i not in group_dims)]\n return processor_coordinates_to_pnum(remaining_shape, remaining_coord)", "docstring": "Group number for grouped allreduce.\n\nArgs:\nmesh_shape: a Shape\ngroup_dims: a list of integers (the dimensions reduced over)\npnum: an integer\n\nReturns:\nan integer", "source": "codesearchnet"} {"code": "def exit_handler(signum, frame):\n \n\n LOGGER.debug('signal {} was caught'.format(signum))\n sys.exit(128 + signum)", "docstring": "Catch SIGTERM and SIGHUP and call \"sys.exit\" which raises\n\"SystemExit\" exception.\nThis will trigger all the cleanup code defined in ContextManagers\nand \"finally\" statements.\n\nFor more details about the arguments see \"signal\" documentation.\n\nArgs:\nsignum(int): The signal's number\nframe(frame): The current stack frame, can be None", "source": "juraj-google-style"} {"code": "def GetSubClasses():\n return utils.invert_dict(GetSuperClasses())", "docstring": "Get a reverse Python type hierarchy mapping.\n\nThis generates a dictionary that can be used to look up the (known)\nsubclasses of a type in the abstract base class hierarchy.\n\nReturns:\nA dictionary mapping a type, as string, to a list of direct\nsubclasses (also as strings).\nE.g. \"Sized\" -> [\"Set\", \"Mapping\", \"MappingView\", \"Sequence\"].", "source": "github-repos"} {"code": "def findall_operations(self, predicate: Callable[([ops.Operation], bool)]) -> Iterable[Tuple[(int, ops.Operation)]]:\n for (index, moment) in enumerate(self._moments):\n for op in moment.operations:\n if predicate(op):\n (yield (index, op))", "docstring": "Find the locations of all operations that satisfy a given condition.\n\nThis returns an iterator of (index, operation) tuples where each\noperation satisfies op_cond(operation) is truthy. The indices are\nin order of the moments and then order of the ops within that moment.\n\nArgs:\npredicate: A method that takes an Operation and returns a Truthy\nvalue indicating the operation meets the find condition.\n\nReturns:\nAn iterator (index, operation)'s that satisfy the op_condition.", "source": "codesearchnet"} {"code": "def identify_link_type(filename):\n mime_type = mimetypes.guess_type(filename)[0]\n if (not mime_type):\n return\n if (mime_type == 'text/css'):\n return LinkType.css\n elif (mime_type == 'application/javascript'):\n return LinkType.javascript\n elif ((mime_type == 'text/html') or mime_type.endswith('xml')):\n return LinkType.html\n elif (mime_type.startswith('video') or mime_type.startswith('image') or mime_type.startswith('audio') or mime_type.endswith('shockwave-flash')):\n return LinkType.media", "docstring": "Return link type guessed by filename extension.\n\nReturns:\nstr: A value from :class:`.item.LinkType`.", "source": "codesearchnet"} {"code": "def _convert_to_compatible_tensor(value, target, error_prefix):\n try:\n tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype)\n except TypeError as e:\n raise TypeError(('%s: %s' % (error_prefix, e)))\n if (_is_sparse(tensor) != _is_sparse(target)):\n if _is_sparse(tensor):\n raise TypeError(('%s: Is sparse. Expected dense.' % error_prefix))\n else:\n raise TypeError(('%s: Is dense. Expected sparse.' % error_prefix))\n if (not tensor.get_shape().is_compatible_with(target.get_shape())):\n raise TypeError(('%s: Shape %r is incompatible with %r' % (error_prefix, tensor.get_shape(), target.get_shape())))\n return tensor", "docstring": "Converts `value` into a tensor that can be feed into `tensor_info`.\n\nArgs:\nvalue: A value to convert into Tensor or SparseTensor.\ntarget: An object returned by `parse_tensor_info_map`.\nerror_prefix: A string to prefix on raised TypeErrors.\n\nRaises:\nTypeError: If it fails to convert.\n\nReturns:\nA Tensor or SparseTensor compatible with tensor_info.", "source": "codesearchnet"} {"code": "def forward(self, x, prev_bin, prev_bin_embedding=None, interpolate=True):\n if prev_bin_embedding is not None:\n if interpolate:\n prev_bin_embedding = nn.functional.interpolate(prev_bin_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_bin_embedding\n x = self.conv1(x)\n x = self.act1(x)\n x = self.conv2(x)\n attractors = self.act2(x)\n height, width = attractors.shape[-2:]\n bin_centers = nn.functional.interpolate(prev_bin, (height, width), mode='bilinear', align_corners=True)\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n delta_c = func(inv_attractor(attractors.unsqueeze(2) - bin_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(bin_centers, device=bin_centers.device)\n for i in range(self.n_attractors):\n delta_c += inv_attractor(attractors[:, i, ...].unsqueeze(1) - bin_centers)\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n bin_new_centers = bin_centers + delta_c\n bin_centers = bin_new_centers\n return (bin_new_centers, bin_centers)", "docstring": "The forward pass of the attractor layer. This layer predicts the new bin centers based on the previous bin centers\nand the attractor points (the latter are predicted by the MLP).\n\nArgs:\nx (`torch.Tensor` of shape (batch_size, num_channels, height, width)`):\nFeature block.\nprev_bin (`torch.Tensor` of shape (batch_size, prev_num_bins, height, width)`):\nPrevious bin centers normed.\nprev_bin_embedding (`torch.Tensor`, *optional*):\nOptional previous bin embeddings.\ninterpolate (`bool`, *optional*, defaults to `True`):\nWhether to interpolate the previous bin embeddings to the size of the input features.\n\nReturns:\n`Tuple[`torch.Tensor`, `torch.Tensor`]:\nNew bin centers unbounded. Two outputs just to keep the API consistent with the normed version.", "source": "github-repos"} {"code": "def _ensure_tf_install():\n try:\n import tensorflow.compat.v2 as tf\n except ImportError:\n print('\\n\\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TF Quant Finance library. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TF Quant Finance library, please install the most recent version of TensorFlow, by following instructions at https:\n raise\n import distutils.version\n if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(_REQUIRED_TENSORFLOW_VERSION):\n raise ImportError('This version of TF Quant Finance library requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required=_REQUIRED_TENSORFLOW_VERSION, present=tf.__version__))", "docstring": "Attempt to import tensorflow, and ensure its version is sufficient.\n\nRaises:\nImportError: if either tensorflow is not importable or its version is\ninadequate.", "source": "github-repos"} {"code": "def locked_put(self, credentials):\n filters = {self.key_name: self.key_value}\n query = self.session.query(self.model_class).filter_by(**filters)\n entity = query.first()\n if (not entity):\n entity = self.model_class(**filters)\n setattr(entity, self.property_name, credentials)\n self.session.add(entity)", "docstring": "Write a credentials to the SQLAlchemy datastore.\n\nArgs:\ncredentials: :class:`oauth2client.Credentials`", "source": "codesearchnet"} {"code": "def set_row_count(self, count):\n current_row_count = self.row_count()\n current_column_count = self.column_count()\n if (count > current_row_count):\n cl = (TableEditableItem if self._editable else TableItem)\n for i in range(current_row_count, count):\n tr = TableRow()\n for c in range(0, current_column_count):\n tr.append(cl(), str(c))\n if self._editable:\n tr.children[str(c)].onchange.connect(self.on_item_changed, int(i), int(c))\n self.append(tr, str(i))\n self._update_first_row()\n elif (count < current_row_count):\n for i in range(count, current_row_count):\n self.remove_child(self.children[str(i)])", "docstring": "Sets the table row count.\n\nArgs:\ncount (int): number of rows", "source": "codesearchnet"} {"code": "def union(self, other):\n if (not hasattr(other, '__iter__')):\n other = [other]\n bounds = self.bounds[:]\n for range in other:\n bounds += range.bounds\n bounds = self._union(bounds)\n range = VersionRange(None)\n range.bounds = bounds\n return range", "docstring": "OR together version ranges.\n\nCalculates the union of this range with one or more other ranges.\n\nArgs:\nother: VersionRange object (or list of) to OR with.\n\nReturns:\nNew VersionRange object representing the union.", "source": "codesearchnet"} {"code": "def create_string_array(self, key, value):\n \n data = None\n if key is not None and value is not None:\n if isinstance(value, (list)):\n data = self.db.create(key.strip(), json.dumps(value))\n else:\n \n data = self.db.create(key.strip(), value)\n else:\n self.tcex.log.warning(u'The key or value field was None.')\n return data", "docstring": "Create method of CRUD operation for string array data.\n\nArgs:\nkey (string): The variable to write to the DB.\nvalue (any): The data to write to the DB.\n\nReturns:\n(string): Result of DB write.", "source": "juraj-google-style"} {"code": "def multithread_predict_dataflow(dataflows, model_funcs):\n num_worker = len(model_funcs)\n assert (len(dataflows) == num_worker)\n if (num_worker == 1):\n return predict_dataflow(dataflows[0], model_funcs[0])\n kwargs = ({'thread_name_prefix': 'EvalWorker'} if (sys.version_info.minor >= 6) else {})\n with ThreadPoolExecutor(max_workers=num_worker, **kwargs) as executor, tqdm.tqdm(total=sum([df.size() for df in dataflows])) as pbar:\n futures = []\n for (dataflow, pred) in zip(dataflows, model_funcs):\n futures.append(executor.submit(predict_dataflow, dataflow, pred, pbar))\n all_results = list(itertools.chain(*[fut.result() for fut in futures]))\n return all_results", "docstring": "Running multiple `predict_dataflow` in multiple threads, and aggregate the results.\n\nArgs:\ndataflows: a list of DataFlow to be used in :func:`predict_dataflow`\nmodel_funcs: a list of callable to be used in :func:`predict_dataflow`\n\nReturns:\nlist of dict, in the format used by\n`DetectionDataset.eval_or_save_inference_results`", "source": "codesearchnet"} {"code": "def _grappler_config(self, optimizers=None):\n if not optimizers:\n optimizers = []\n if not self.experimental_new_converter:\n optimizers.append('constfold')\n is_only_flex_enabled = set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops)\n if is_only_flex_enabled:\n optimizers.append('layout')\n return _get_grappler_config(optimizers)", "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\nArgs:\noptimizers: List of strings that represents the list of optimizers.\n\nReturns:\ntf.ConfigProto.", "source": "github-repos"} {"code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n return self._lazy_read(gen_resource_variable_ops.resource_scatter_min(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))", "docstring": "Updates this variable with the min of `tf.IndexedSlices` and itself.\n\nArgs:\nsparse_delta: `tf.IndexedSlices` to use as an argument of min with this\nvariable.\nuse_locking: If `True`, use locking during the operation.\nname: the name of the operation.\n\nReturns:\nThe updated variable.\n\nRaises:\nTypeError: if `sparse_delta` is not an `IndexedSlices`.", "source": "github-repos"} {"code": "def GetProgressTrackerSymbols(self):\n return self._progress_tracker_symbols", "docstring": "Returns the progress tracker characters object.\n\nReturns:\nA ProgressTrackerSymbols object for the console output device.", "source": "github-repos"} {"code": "def _remove_double_brackets(text):\n \n def replacement_fn(s):\n if u\":\" in s:\n \n return \"\"\n \n bar_pos = s.find(u\"|\")\n if bar_pos == -1:\n return s\n return s[bar_pos + 1:]\n return _find_and_replace(text, u\"[[\", u\"]]\", replacement_fn)", "docstring": "Remove double brackets (internal links) but leave the viewable text.\n\nArgs:\ntext: a unicode string\nReturns:\na unicode string", "source": "juraj-google-style"} {"code": "def forward(self, seq_length=None, position=None):\n if position is None and seq_length is None:\n raise ValueError('Either position or seq_length must be provided')\n if position is None:\n position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0)\n elif position.ndim != 2:\n raise ValueError(f'position must be 2-dimensional, got shape {position.shape}')\n scaled_time = position.view(*position.shape, 1) * self.inv_timescales.view(1, 1, -1)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)\n signal = F.pad(signal, (0, 0, 0, self.embedding_dims % 2))\n return signal", "docstring": "Generates a Tensor of sinusoids with different frequencies.\n\nArgs:\nseq_length: an optional Python int defining the output sequence length.\nif the `position` argument is specified.\nposition: [B, seq_length], optional position for each token in the\nsequence, only required when the sequence is packed.\n\nReturns:\n[B, seqlen, D] if `position` is specified, else [1, seqlen, D]", "source": "github-repos"} {"code": "def equals(self, rhs):\n try:\n return (round((rhs - self._float_value), self._places) == 0)\n except TypeError:\n return False", "docstring": "Check to see if RHS is almost equal to float_value\n\nArgs:\nrhs: the value to compare to float_value\n\nReturns:\nbool", "source": "codesearchnet"} {"code": "def add_timeout_arg(a_func, timeout, **kwargs):\n\n def inner(*args):\n 'Updates args with the timeout.'\n updated_args = (args + (timeout,))\n return a_func(*updated_args, **kwargs)\n return inner", "docstring": "Updates a_func so that it gets called with the timeout as its final arg.\n\nThis converts a callable, a_func, into another callable with an additional\npositional arg.\n\nArgs:\na_func (callable): a callable to be updated\ntimeout (int): to be added to the original callable as it final positional\narg.\nkwargs: Addtional arguments passed through to the callable.\n\nReturns:\ncallable: the original callable updated to the timeout arg", "source": "codesearchnet"} {"code": "def get_relative_name(prefix: str, absolute_name: str) -> str:\n if absolute_name.startswith('.'):\n return absolute_name\n prefix_path: list[str] = prefix.split('.') if prefix else []\n name_path: list[str] = absolute_name.split('.') if absolute_name else []\n num_match = 0\n for prefix_seg, name_seg in zip(prefix_path, name_path):\n if prefix_seg != name_seg:\n break\n num_match += 1\n if not num_match:\n return absolute_name\n name = '.'.join(name_path[num_match:])\n ndots = len(prefix_path) - num_match\n if ndots > 0:\n name = '.' * (ndots + 1) + name\n return name", "docstring": "Transfoms an absolute name to a relative one based on the given prefix.\n\nArgs:\nprefix: A dotted name, e.g. foo.bar.baz\nabsolute_name: A fully-qualified name, e.g. foo.bar.baz.x\n\nReturns:\nThe absolute name with the prefix removed, with a leading dot added\nfor each segment of the prefix not present in the absolute name.\ne.g. foo.bar.baz + foo.bar.hello.world -> ..hello.world\nIf the prefix is disjoint from the absolute name, the absolute name is\nreturned verbatim.\ne.g. foo.bar.baz + hello.world -> hello.world\nIf the given absolute name has one or more leading dots, it is returned\nverbatim.\ne.g. foo.bar + ..hello.world -> ..hello.world", "source": "github-repos"} {"code": "def sort_models(self):\n model_names = [table.name for table in self.Base.metadata.sorted_tables if (table.name in self.models)]\n logger.debug('Unsorted models: %s', model_names)\n model_count = len(model_names)\n swapped = True\n sort_round = 0\n while swapped:\n sort_round += 1\n logger.debug('Sorting round: %d (%s)', sort_round, model_names)\n sorted_models = []\n for i in range(model_count):\n model = self.models[model_names[i]]\n for foreign_model_name in model.foreign_models:\n if (foreign_model_name not in sorted_models):\n sorted_models.append(foreign_model_name)\n if (model.name not in sorted_models):\n sorted_models.append(model.name)\n if (model_names == sorted_models):\n swapped = False\n model_names = sorted_models\n logger.debug('Sorted models: %s (%d rounds)', model_names, sort_round)\n return model_names", "docstring": "Sorts the database models appropriately based on their relationships so that we load our data\nin the appropriate order.\n\nReturns:\nA sorted list containing the names of the models.", "source": "codesearchnet"} {"code": "def KillOldFlows(self):\n if (not self.IsRunning()):\n return False\n start_time = self.Get(self.Schema.LAST_RUN_TIME)\n lifetime = self.Get(self.Schema.CRON_ARGS).lifetime\n elapsed = (rdfvalue.RDFDatetime.Now() - start_time)\n if (lifetime and (elapsed > lifetime)):\n self.StopCurrentRun()\n stats_collector_instance.Get().IncrementCounter('cron_job_timeout', fields=[self.urn.Basename()])\n stats_collector_instance.Get().RecordEvent('cron_job_latency', elapsed.seconds, fields=[self.urn.Basename()])\n return True\n return False", "docstring": "Disable cron flow if it has exceeded CRON_ARGS.lifetime.\n\nReturns:\nbool: True if the flow is was killed.", "source": "codesearchnet"} {"code": "def dimension_name(dimension):\n \n if isinstance(dimension, Dimension):\n return dimension.name\n elif isinstance(dimension, basestring):\n return dimension\n elif isinstance(dimension, tuple):\n return dimension[0]\n elif isinstance(dimension, dict):\n return dimension['name']\n elif dimension is None:\n return None\n else:\n raise ValueError('%s type could not be interpreted as Dimension. '\n 'Dimensions must be declared as a string, tuple, '\n 'dictionary or Dimension type.'\n % type(dimension).__name__)", "docstring": "Return the Dimension.name for a dimension-like object.\n\nArgs:\ndimension: Dimension or dimension string, tuple or dict\n\nReturns:\nThe name of the Dimension or what would be the name if the\ninput as converted to a Dimension.", "source": "juraj-google-style"} {"code": "def get_heat_capacity(self, temperature, structure, n, u, cutoff=100.0):\n k = 1.38065e-23\n kt = (k * temperature)\n hbar_w = (1.05457e-34 * self.omega(structure, n, u))\n if (hbar_w > (kt * cutoff)):\n return 0.0\n c = (k * ((hbar_w / kt) ** 2))\n c *= (np.exp((hbar_w / kt)) / ((np.exp((hbar_w / kt)) - 1) ** 2))\n return (c * 6.022e+23)", "docstring": "Gets the directional heat capacity for a higher order tensor\nexpansion as a function of direction and polarization.\n\nArgs:\ntemperature (float): Temperature in kelvin\nstructure (float): Structure to be used in directional heat\ncapacity determination\nn (3x1 array-like): direction for Cv determination\nu (3x1 array-like): polarization direction, note that\nno attempt for verification of eigenvectors is made\ncutoff (float): cutoff for scale of kt / (hbar * omega)\nif lower than this value, returns 0", "source": "codesearchnet"} {"code": "def path_fraction_id_offset(points, fraction, relative_offset=False):\n if (not (0.0 <= fraction <= 1.0)):\n raise ValueError(('Invalid fraction: %.3f' % fraction))\n pts = np.array(points)[(:, COLS.XYZ)]\n lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)\n cum_lengths = np.cumsum(lengths)\n offset = (cum_lengths[(- 1)] * fraction)\n seg_id = np.argmin((cum_lengths < offset))\n if (seg_id > 0):\n offset -= cum_lengths[(seg_id - 1)]\n if relative_offset:\n offset /= lengths[seg_id]\n return (seg_id, offset)", "docstring": "Find the segment which corresponds to the fraction\nof the path length along the piecewise linear curve which\nis constructed from the set of points.\n\nArgs:\npoints: an iterable of indexable objects with indices\n0, 1, 2 correspoding to 3D cartesian coordinates\nfraction: path length fraction (0.0 <= fraction <= 1.0)\nrelative_offset: return absolute or relative segment distance\n\nReturns:\n(segment ID, segment offset) pair.", "source": "codesearchnet"} {"code": "def remove_backup(name):\n \n if name not in list_backups():\n log.debug('Backup already removed: %s', name)\n return True\n\n ps_cmd = ['Remove-WebConfigurationBackup',\n '-Name', \"'{0}'\".format(name)]\n\n cmd_ret = _srvmgr(ps_cmd)\n\n if cmd_ret['retcode'] != 0:\n msg = 'Unable to remove web configuration: {0}\\nError: {1}' \\\n ''.format(name, cmd_ret['stderr'])\n raise CommandExecutionError(msg)\n\n return name not in list_backups()", "docstring": "Remove an IIS Configuration backup from the System.\n\n.. versionadded:: 2017.7.0\n\nArgs:\nname (str): The name of the backup to remove\n\nReturns:\nbool: True if successful, otherwise False\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' win_iis.remove_backup backup_20170209", "source": "juraj-google-style"} {"code": "def delete(self, branch, commit_message, **kwargs):\n \n file_path = self.get_id().replace('/', '%2F')\n self.manager.delete(file_path, branch, commit_message, **kwargs)", "docstring": "Delete the file from the server.\n\nArgs:\nbranch (str): Branch from which the file will be removed\ncommit_message (str): Commit message for the deletion\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server cannot perform the request", "source": "juraj-google-style"} {"code": "def FromLdapToTimestamp(self, ldap_ts_string):\n if isinstance(ldap_ts_string, bytes):\n ldap_ts_string = ldap_ts_string.decode('utf-8')\n try:\n if self.conf.get('ad'):\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')\n else:\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')\n except ValueError:\n m = re.match('([0-9]*)(\\\\.[0-9]*)?(Z)', ldap_ts_string)\n if m:\n ldap_ts_string = m.group(1) + m.group(3)\n if self.conf.get('ad'):\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')\n else:\n t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')\n return int(calendar.timegm(t))", "docstring": "Transforms a LDAP timestamp into the nss_cache internal timestamp.\n\nArgs:\nldap_ts_string: An LDAP timestamp string in the format %Y%m%d%H%M%SZ\n\nReturns:\nnumber of seconds since epoch.", "source": "github-repos"} {"code": "def AddToBalance(self, assetId, fixed8_val):\n found = False\n for (key, balance) in self.Balances.items():\n if (key == assetId):\n self.Balances[assetId] = (self.Balances[assetId] + fixed8_val)\n found = True\n if (not found):\n self.Balances[assetId] = fixed8_val", "docstring": "Add amount to the specified balance.\n\nArgs:\nassetId (UInt256):\nfixed8_val (Fixed8): amount to add.", "source": "codesearchnet"} {"code": "def _validate_isvalid_composition(self, isvalid_composition, field, value):\n sum_amount = 0.0\n if (value['kind'] in ['mass fraction', 'mole fraction']):\n low_lim = 0.0\n up_lim = 1.0\n total_amount = 1.0\n elif (value['kind'] in ['mole percent']):\n low_lim = 0.0\n up_lim = 100.0\n total_amount = 100.0\n else:\n self._error(field, 'composition kind must be \"mole percent\", \"mass fraction\", or \"mole fraction\"')\n return False\n for sp in value['species']:\n amount = sp['amount'][0]\n sum_amount += amount\n if (amount < low_lim):\n self._error(field, (((('Species ' + sp['species-name']) + ' ') + value['kind']) + ' must be greater than {:.1f}'.format(low_lim)))\n elif (amount > up_lim):\n self._error(field, (((('Species ' + sp['species-name']) + ' ') + value['kind']) + ' must be less than {:.1f}'.format(up_lim)))\n if (not np.isclose(total_amount, sum_amount)):\n self._error(field, ((('Species ' + value['kind']) + 's do not sum to {:.1f}: '.format(total_amount)) + '{:f}'.format(sum_amount)))", "docstring": "Checks for valid specification of composition.\n\nArgs:\nisvalid_composition (bool): flag from schema indicating\ncomposition to be checked.\nfield (str): 'composition'\nvalue (dict): dictionary of composition\n\nThe rule's arguments are validated against this schema:\n{'isvalid_composition': {'type': 'bool'}, 'field': {'type': 'str'},\n'value': {'type': 'dict'}}", "source": "codesearchnet"} {"code": "def keras_serializable(cls):\n initializer = cls.__init__\n config_class = getattr(cls, 'config_class', None)\n if config_class is None:\n raise AttributeError('Must set `config_class` to use @keras_serializable')\n\n @functools.wraps(initializer)\n def wrapped_init(self, *args, **kwargs):\n config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop('config', None)\n if isinstance(config, dict):\n config = config_class.from_dict(config)\n initializer(self, config, *args, **kwargs)\n elif isinstance(config, PretrainedConfig):\n if len(args) > 0:\n initializer(self, *args, **kwargs)\n else:\n initializer(self, config, *args, **kwargs)\n else:\n raise ValueError('Must pass either `config` (PretrainedConfig) or `config` (dict)')\n self._config = config\n self._kwargs = kwargs\n cls.__init__ = wrapped_init\n if not hasattr(cls, 'get_config'):\n raise TypeError('Only use @keras_serializable on keras.layers.Layer subclasses')\n if hasattr(cls.get_config, '_is_default'):\n\n def get_config(self):\n cfg = super(cls, self).get_config()\n cfg['config'] = self._config.to_dict()\n cfg.update(self._kwargs)\n return cfg\n cls.get_config = get_config\n cls._keras_serializable = True\n if hasattr(keras.utils, 'register_keras_serializable'):\n cls = keras.utils.register_keras_serializable()(cls)\n return cls", "docstring": "Decorate a Keras Layer class to support Keras serialization.\n\nThis is done by:\n\n1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at\nserialization time.\n2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and\nconvert it to a config object for the actual layer initializer.\n3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not\nneed to be supplied in `custom_objects` in the call to `keras.models.load_model`.\n\nArgs:\ncls (a `keras.layers.Layers subclass`):\nTypically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its\ninitializer.\n\nReturns:\nThe same class object, with modifications for Keras deserialization.", "source": "github-repos"} {"code": "def outputZip(self, figtype='png'):\n from zipfile import ZipFile\n with ZipFile((self.outfile + '.zip'), 'w') as zipcontainer:\n zipcontainer.writestr('summary.txt', '\n c = count(1)\n for section in self.sections:\n section.sectionOutZip(zipcontainer, 's{}_{}/'.format(next(c), section.title.replace(' ', '_')), figtype=figtype)", "docstring": "Outputs the report in a zip container.\nFigs and tabs as pngs and excells.\n\nArgs:\nfigtype (str): Figure type of images in the zip folder.", "source": "codesearchnet"} {"code": "def exp(array, ty):\n weld_obj = WeldObject(encoder_, decoder_)\n array_var = weld_obj.update(array)\n if isinstance(array, WeldObject):\n array_var = array.obj_id\n weld_obj.dependencies[array_var] = array\n weld_template = '\\n map(\\n %(array)s,\\n |ele: %(ty)s| exp(ele)\\n )\\n '\n weld_obj.weld_code = (weld_template % {'array': array_var, 'ty': ty})\n return weld_obj", "docstring": "Computes the per-element exponenet of the passed-in array.\n\nArgs:\narray (WeldObject / Numpy.ndarray): Input array\nty (WeldType): Type of each element in the input array\n\nReturns:\nA WeldObject representing this computation", "source": "codesearchnet"} {"code": "def describe(self, model_name):\n \n model_yaml = yaml.safe_dump(self.get_model_details(model_name), default_flow_style=False)\n print(model_yaml)", "docstring": "Print information of a specified model.\n\nArgs:\nmodel_name: the name of the model to print details on.", "source": "juraj-google-style"} {"code": "def getSubjectInfo(self, subject, vendorSpecific=None):\n \n response = self.getSubjectInfoResponse(subject, vendorSpecific)\n return self._read_dataone_type_response(response, 'SubjectInfo')", "docstring": "See Also: getSubjectInfoResponse()\n\nArgs:\nsubject:\nvendorSpecific:\n\nReturns:", "source": "juraj-google-style"} {"code": "def get_available_references(self, datas):\n names = []\n for (k, v) in datas.items():\n if k.startswith(RULE_REFERENCE):\n names.append(k[(len(RULE_REFERENCE) + 1):])\n return names", "docstring": "Get available manifest reference names.\n\nEvery rules starting with prefix from ``nomenclature.RULE_REFERENCE``\nare available references.\n\nOnly name validation is performed on these references.\n\nArguments:\ndatas (dict): Data where to search for reference declarations.\n\nReturns:\nlist: List of every available reference names. This is the real\nname unprefixed.", "source": "codesearchnet"} {"code": "def crcMeterRead(self, raw_read, def_buf):\n \n try:\n if len(raw_read) == 0:\n ekm_log(\"(\" + self.m_context + \") Empty return read.\")\n return False\n sent_crc = self.calc_crc16(raw_read[1:-2])\n logstr = \"(\" + self.m_context + \")CRC sent = \" + str(def_buf[\"crc16\"][MeterData.StringValue])\n logstr += \" CRC calc = \" + sent_crc\n ekm_log(logstr)\n if int(def_buf[\"crc16\"][MeterData.StringValue], 16) == int(sent_crc, 16):\n return True\n\n \n \n \n \n \n \n except struct.error:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n except TypeError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n except ValueError:\n ekm_log(str(sys.exc_info()))\n for frame in traceback.extract_tb(sys.exc_info()[2]):\n fname, lineno, fn, text = frame\n ekm_log(\"Error in %s on line %d\" % (fname, lineno))\n return False\n\n return False", "docstring": "Internal read CRC wrapper.\n\nArgs:\nraw_read (str): Bytes with implicit string cast from serial read\ndef_buf (SerialBlock): Populated read buffer.\n\nReturns:\nbool: True if passed CRC equals calculated CRC.", "source": "juraj-google-style"} {"code": "def __init__(self, input_size: int, num_experts: int, top_k: int):\n super().__init__()\n self.num_experts = num_experts\n self.input_size = input_size\n self.top_k = top_k\n self.layer = nn.Linear(input_size, num_experts, bias=False)", "docstring": "Initialize the top-k gating mechanism.\nArgs:\ninput_size (`int`):\nSize of the input.\nnum_experts (`int`):\nNumber of experts.\ntop_k (`int`):\nNumber of top experts to select.", "source": "github-repos"} {"code": "def _parse_symbol(self, sym):\n \n \n \n special = {\"Hw\": \"H\", \"Ow\": \"O\", \"Wat\": \"O\",\n \"wat\": \"O\", \"OH\": \"\", \"OH2\": \"\", \"NO3\": \"N\"}\n\n parsed_sym = None\n \n \n \n m_sp = re.match(\"|\".join(special.keys()), sym)\n if m_sp:\n parsed_sym = special[m_sp.group()]\n elif Element.is_valid_symbol(sym[:2].title()):\n parsed_sym = sym[:2].title()\n elif Element.is_valid_symbol(sym[0].upper()):\n parsed_sym = sym[0].upper()\n else:\n m = re.match(r\"w?[A-Z][a-z]*\", sym)\n if m:\n parsed_sym = m.group()\n\n if parsed_sym is not None and (m_sp or not re.match(r\"{}\\d*\".format(parsed_sym), sym)):\n msg = \"{} parsed as {}\".format(sym, parsed_sym)\n warnings.warn(msg)\n self.errors.append(msg)\n\n return parsed_sym", "docstring": "Parse a string with a symbol to extract a string representing an element.\n\nArgs:\nsym (str): A symbol to be parsed.\n\nReturns:\nA string with the parsed symbol. None if no parsing was possible.", "source": "juraj-google-style"} {"code": "def _collect_layer_statistics(self) -> Dict[str, Dict[str, float]]:\n layer_statistics = collections.defaultdict(lambda: collections.defaultdict(list))\n initialize = True\n for tensor_data in self._data_gen():\n self._set_input_tensors(self._quant_interpreter, tensor_data, initialize)\n initialize = False\n self._quant_interpreter.invoke()\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n diffs = self._quant_interpreter.get_tensor(tensor_detail['index'])\n for metric_name, metric_fn in self._layer_debug_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(diffs))\n if self._debug_options.layer_direct_compare_metrics is not None:\n for tensor_detail in self._get_numeric_verify_tensor_details():\n tensor_name = tensor_detail['name']\n op_idx = self._defining_op[tensor_detail['index']]\n op_detail = self._quant_interpreter._get_op_details(op_idx)\n q_idx, f_idx = op_detail['inputs']\n quant_input_detail = self._quant_interpreter._get_tensor_details(q_idx, subgraph_index=0)\n for metric_name, metric_fn in self._debug_options.layer_direct_compare_metrics.items():\n layer_statistics[tensor_name][metric_name].append(metric_fn(self._quant_interpreter.get_tensor(f_idx), self._quant_interpreter.get_tensor(q_idx), quant_input_detail['quantization_parameters']['scales'][0], quant_input_detail['quantization_parameters']['zero_points'][0]))\n for metrics in layer_statistics.values():\n for metric_name in metrics:\n metrics[metric_name] = np.nanmean(metrics[metric_name])\n return layer_statistics", "docstring": "Collects layer statistics by applying layer debug metrics.\n\nFor all data from the given RepresentativeDataset, collect statistics per\nexample by getting the NumericVerify op results in _quant_interpreter\nand calculating layer debug metrics on the results.\n\nReturns:\naggregated per-layer statistics of NumericVerify results.\n{layer_name: {metric_name: metric}}", "source": "github-repos"} {"code": "def register_actor(name, actor_handle):\n \n if not isinstance(name, str):\n raise TypeError(\"The name argument must be a string.\")\n if not isinstance(actor_handle, ray.actor.ActorHandle):\n raise TypeError(\"The actor_handle argument must be an ActorHandle \"\n \"object.\")\n actor_name = _calculate_key(name)\n pickled_state = pickle.dumps(actor_handle)\n\n \n already_exists = _internal_kv_put(actor_name, pickled_state)\n if already_exists:\n \n \n actor_handle._ray_new_actor_handles.pop()\n raise ValueError(\n \"Error: the actor with name={} already exists\".format(name))", "docstring": "Register a named actor under a string key.\n\nArgs:\nname: The name of the named actor.\nactor_handle: The actor object to be associated with this name", "source": "juraj-google-style"} {"code": "def parent(self):\n if (len(self._path) == 1):\n return None\n else:\n parent_path = self._path[:(- 1)]\n return self._client.document(*parent_path)", "docstring": "Document that owns the current collection.\n\nReturns:\nOptional[~.firestore_v1beta1.document.DocumentReference]: The\nparent document, if the current collection is not a\ntop-level collection.", "source": "codesearchnet"} {"code": "def view(molecule, viewer=settings['defaults']['viewer'], use_curr_dir=False):\n try:\n molecule.view(viewer=viewer, use_curr_dir=use_curr_dir)\n except AttributeError:\n if pd.api.types.is_list_like(molecule):\n cartesian_list = molecule\n else:\n raise ValueError('Argument is neither list nor Cartesian.')\n if use_curr_dir:\n TEMP_DIR = os.path.curdir\n else:\n TEMP_DIR = tempfile.gettempdir()\n\n def give_filename(i):\n filename = (('ChemCoord_list_' + str(i)) + '.molden')\n return os.path.join(TEMP_DIR, filename)\n i = 1\n while os.path.exists(give_filename(i)):\n i = (i + 1)\n to_molden(cartesian_list, buf=give_filename(i))\n\n def open_file(i):\n 'Open file and close after being finished.'\n try:\n subprocess.check_call([viewer, give_filename(i)])\n except (subprocess.CalledProcessError, FileNotFoundError):\n raise\n finally:\n if use_curr_dir:\n pass\n else:\n os.remove(give_filename(i))\n Thread(target=open_file, args=(i,)).start()", "docstring": "View your molecule or list of molecules.\n\n.. note:: This function writes a temporary file and opens it with\nan external viewer.\nIf you modify your molecule afterwards you have to recall view\nin order to see the changes.\n\nArgs:\nmolecule: Can be a cartesian, or a list of cartesians.\nviewer (str): The external viewer to use. The default is\nspecified in settings.viewer\nuse_curr_dir (bool): If True, the temporary file is written to\nthe current diretory. Otherwise it gets written to the\nOS dependendent temporary directory.\n\nReturns:\nNone:", "source": "codesearchnet"} {"code": "def visit(self, visitor):\n visited = set()\n self._root_transform().visit(visitor, self, visited)", "docstring": "Visits depth-first every node of a pipeline's DAG.\n\nRunner-internal implementation detail; no backwards-compatibility guarantees\n\nArgs:\nvisitor (~apache_beam.pipeline.PipelineVisitor):\n:class:`~apache_beam.pipeline.PipelineVisitor` object whose callbacks\nwill be called for each node visited. See\n:class:`~apache_beam.pipeline.PipelineVisitor` comments.\n\nRaises:\nTypeError: if node is specified and is not a\n:class:`~apache_beam.pvalue.PValue`.\n~apache_beam.error.PipelineError: if node is specified and does not\nbelong to this pipeline instance.", "source": "github-repos"} {"code": "def _iceberg_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):\n if (pipeline := test_spec.get('pipeline', None)):\n for transform in pipeline.get('transforms', []):\n if transform.get('type', '') == 'ReadFromIceberg':\n config = transform['config']\n db_name, table_name, field_value_dynamic_destinations = config['table'].split('.')\n transform['type'] = 'Create'\n transform['config'] = {k: v for k, v in config.items() if k.startswith('__')}\n transform['config']['elements'] = INPUT_TABLES[str(db_name), str(table_name), str(field_value_dynamic_destinations)]\n return test_spec", "docstring": "Preprocessor for tests that involve reading from Iceberg.\n\nThis preprocessor replaces any ReadFromIceberg transform with a Create\ntransform that reads from a predefined in-memory dictionary. This allows\nthe test to verify the pipeline's correctness without relying on Iceberg\ntables stored externally.\n\nArgs:\ntest_spec: The dictionary representation of the YAML pipeline specification.\nexpected: A list of strings representing the expected output of the\npipeline.\nenv: The TestEnvironment object providing utilities for creating temporary\nfiles.\n\nReturns:\nThe modified test_spec dictionary with ReadFromIceberg transforms replaced.", "source": "github-repos"} {"code": "def get_path(url):\n \n\n if url not in URLHelper.__cache:\n URLHelper.__cache[url] = urlparse(url)\n\n return URLHelper.__cache[url].path", "docstring": "Get the path (e.g /page/23) of the given URL.\n\nArgs:\nurl (str): The URL to get the path from.\n\nReturns:\nstr: The path", "source": "juraj-google-style"} {"code": "def update(self, friendly_name=None, description=None, expiry=None, schema=None):\n \n self._load_info()\n if friendly_name is not None:\n self._info['friendlyName'] = friendly_name\n if description is not None:\n self._info['description'] = description\n if expiry is not None:\n if isinstance(expiry, datetime.datetime):\n expiry = calendar.timegm(expiry.utctimetuple()) * 1000\n self._info['expirationTime'] = expiry\n if schema is not None:\n if isinstance(schema, _schema.Schema):\n schema = schema._bq_schema\n self._info['schema'] = {'fields': schema}\n try:\n self._api.table_update(self._name_parts, self._info)\n except datalab.utils.RequestException:\n \n self._info = None\n except Exception as e:\n raise e", "docstring": "Selectively updates Table information.\n\nAny parameters that are omitted or None are not updated.\n\nArgs:\nfriendly_name: if not None, the new friendly name.\ndescription: if not None, the new description.\nexpiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.\nschema: if not None, the new schema: either a list of dictionaries or a Schema.", "source": "juraj-google-style"} {"code": "def execute_no_wait(self, cmd, walltime=2, envs={}):\n \n\n \n stdin, stdout, stderr = self.ssh_client.exec_command(\n self.prepend_envs(cmd, envs), bufsize=-1, timeout=walltime\n )\n return None, stdout, stderr", "docstring": "Execute asynchronousely without waiting for exitcode\n\nArgs:\n- cmd (string): Commandline string to be executed on the remote side\n- walltime (int): timeout to exec_command\n\nKWargs:\n- envs (dict): A dictionary of env variables\n\nReturns:\n- None, stdout (readable stream), stderr (readable stream)\n\nRaises:\n- ChannelExecFailed (reason)", "source": "juraj-google-style"} {"code": "def trace_tensor(tensor, tracepoint_name=None):\n if tracepoint_name is None:\n tracepoint_name = tensor.name\n tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)\n tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION, (tensor, tracepoint_name))\n return tensor", "docstring": "Programmatic interface to trace a tensor with Tensor Tracer.\n\nTensor Tracer, by default, traces all tensors in the execution. This function\ncan be used to limit traced tensors. If this function is called for a subset\nof the tensors, only those will be traced.\n\nFor example, Tensor Traacer will only trace c below.\nc = tf.MatMul(a, b)\ntensor_tracer.trace_tensor(c)\nd = tf.add(c, 1)\nArgs:\ntensor: the tensor object for which the tracing is requested.\ntracepoint_name: an optional tensor tracepoint name string. A tracepoint\nname is an Tensor Tracer internal name for the tensor. It is useful when\ncomparing equivalent traces from different models that have different\ntensor namings. Equivalent tensors (with different names) can be mapped\nto each other by assigning a common tracepoint_name.\n\nReturns:\nThe provided tensor.", "source": "github-repos"} {"code": "def wrap_text(text, width=80):\n \n text = re.sub(r\"\\s+\", \" \", text).strip()\n wrapper = TextWrapper(\n width=width, break_long_words=False, replace_whitespace=True\n )\n return wrapper.fill(text)", "docstring": "Wrap text lines to maximum *width* characters.\n\nWrapped text is aligned against the left text border.\n\nArgs:\ntext (str): Text to wrap.\nwidth (int): Maximum number of characters per line.\n\nReturns:\nstr: Wrapped text.", "source": "juraj-google-style"} {"code": "def insert_at_frontier(self,\n operations: ops.OP_TREE,\n start: int,\n frontier: Dict[ops.Qid, int] = None\n ) -> Dict[ops.Qid, int]:\n \n if frontier is None:\n frontier = defaultdict(lambda: 0)\n operations = tuple(ops.flatten_op_tree(operations))\n if not operations:\n return frontier\n qubits = set(q for op in operations for q in op.qubits)\n if any(frontier[q] > start for q in qubits):\n raise ValueError('The frontier for qubits on which the operations'\n 'to insert act cannot be after start.')\n\n next_moments = self.next_moments_operating_on(qubits, start)\n\n insertion_indices, _ = self._pick_inserted_ops_moment_indices(\n operations, start, frontier)\n\n self._push_frontier(frontier, next_moments)\n\n self._insert_operations(operations, insertion_indices)\n\n return frontier", "docstring": "Inserts operations inline at frontier.\n\nArgs:\noperations: the operations to insert\nstart: the moment at which to start inserting the operations\nfrontier: frontier[q] is the earliest moment in which an operation\nacting on qubit q can be placed.", "source": "juraj-google-style"} {"code": "def datetimeobj_d_b_Y_H_M_S(value):\n \n d, b, Y, t, Z = value.split()\n H, M, S = t.split(\":\")\n return datetime.datetime(\n int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT\n )", "docstring": "Convert timestamp string to a datetime object.\n\nTimestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted\nby this function.\n\nArgs:\nvalue: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.\n\nReturns:\nA datetime object.\n\nRaises:\nValueError: If timestamp is invalid.\nKeyError: If the abbrieviated month is invalid.\n\nNote: The timezone is ignored it is simply assumed to be UTC/GMT.", "source": "juraj-google-style"} {"code": "def HandleClockSync(self, response):\n self.logger.info('Clock drift token has changed: %s.', response)\n self.distro_utils.HandleClockSync(self.logger)", "docstring": "Called when clock drift token changes.\n\nArgs:\nresponse: string, the metadata response with the new drift token value.", "source": "codesearchnet"} {"code": "def download_models(self, uniprot_acc, outdir='', force_rerun=False):\n \n downloaded = []\n subset = self.get_models(uniprot_acc)\n\n for entry in subset:\n ident = '{}_{}_{}_{}'.format(uniprot_acc, entry['template'], entry['from'], entry['to'])\n outfile = op.join(outdir, ident + '.pdb')\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):\n response = requests.get(entry['url'])\n\n if response.status_code == 404:\n log.error('{}: 404 returned, no model available.'.format(ident))\n\n else:\n with open(outfile, 'w') as f:\n f.write(response.text)\n\n log.debug('{}: downloaded homology model'.format(ident))\n downloaded.append(outfile)\n else:\n downloaded.append(outfile)\n\n return downloaded", "docstring": "Download all models available for a UniProt accession number.\n\nArgs:\nuniprot_acc (str): UniProt ACC/ID\noutdir (str): Path to output directory, uses working directory if not set\nforce_rerun (bool): Force a redownload the models if they already exist\n\nReturns:\nlist: Paths to the downloaded models", "source": "juraj-google-style"} {"code": "def save_state(self, out_path):\n \n\n state = self.dump_state()\n\n \n \n state = _clean_intenum(state)\n\n with open(out_path, \"w\") as outfile:\n json.dump(state, outfile, indent=4)", "docstring": "Save the current state of this emulated object to a file.\n\nArgs:\nout_path (str): The path to save the dumped state of this emulated\nobject.", "source": "juraj-google-style"} {"code": "def unsubscribe(self, future):\n \n assert future not in self._pending_unsubscribes, \\\n \"%r has already been unsubscribed from\" % \\\n self._pending_unsubscribes[future]\n subscribe = self._requests[future]\n self._pending_unsubscribes[future] = subscribe\n \n self._subscriptions.pop(subscribe.id)\n request = Unsubscribe(subscribe.id)\n request.set_callback(self._q.put)\n try:\n controller = self.get_controller(subscribe.path[0])\n except ValueError:\n \n pass\n else:\n self.handle_request(controller, request)", "docstring": "Terminates the subscription given by a future\n\nArgs:\nfuture (Future): The future of the original subscription", "source": "juraj-google-style"} {"code": "def sample(self, fields=None, count=5, sampling=None, use_cache=True, dialect=None, billing_tier=None):\n from . import _query\n sql = self._repr_sql_()\n return _query.Query.sampling_query(sql, context=self._context, count=count, fields=fields, sampling=sampling).results(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier)", "docstring": "Retrieves a sampling of data from the table.\n\nArgs:\nfields: an optional list of field names to retrieve.\ncount: an optional count of rows to retrieve which is used if a specific\nsampling is not specified.\nsampling: an optional sampling strategy to apply to the table.\nuse_cache: whether to use cached results or not.\ndialect : {'legacy', 'standard'}, default 'legacy'\n'legacy' : Use BigQuery's legacy SQL dialect.\n'standard' : Use BigQuery's standard SQL (beta), which is\ncompliant with the SQL 2011 standard.\nbilling_tier: Limits the billing tier for this job. Queries that have resource\nusage beyond this tier will fail (without incurring a charge). If unspecified, this\nwill be set to your project default. This can also be used to override your\nproject-wide default billing tier on a per-query basis.\nReturns:\nA QueryResultsTable object containing the resulting data.\nRaises:\nException if the sample query could not be executed or query response was malformed.", "source": "codesearchnet"} {"code": "def project_hidden(x, projection_tensors, hidden_size, num_blocks):\n (batch_size, latent_dim, _) = common_layers.shape_list(x)\n x = tf.reshape(x, shape=[1, (- 1), hidden_size])\n x_tiled = tf.reshape(tf.tile(x, multiples=[num_blocks, 1, 1]), shape=[num_blocks, (- 1), hidden_size])\n x_projected = tf.matmul(x_tiled, projection_tensors)\n x_projected = tf.transpose(x_projected, perm=[1, 0, 2])\n x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, (- 1)])\n return x_4d", "docstring": "Project encoder hidden state under num_blocks using projection tensors.\n\nArgs:\nx: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].\nprojection_tensors: Projection tensors used to project the hidden state.\nhidden_size: Dimension of the latent space.\nnum_blocks: Number of blocks in DVQ.\n\nReturns:\nx_projected: Projected states of shape [batch_size, latent_dim, num_blocks,\nhidden_size / num_blocks].", "source": "codesearchnet"} {"code": "async def wait(self, timeout=None):\n \n\n await asyncio.wait_for(self._future, timeout)\n\n if self._exception is not None:\n self._raise_exception()\n\n return self._result", "docstring": "Wait for this operation to finish.\n\nYou can specify an optional timeout that defaults to no timeout if\nNone is passed. The result of the operation is returned from this\nmethod. If the operation raised an exception, it is reraised from this\nmethod.\n\nArgs:\ntimeout (float): The maximum number of seconds to wait before timing\nout.", "source": "juraj-google-style"} {"code": "def remove(self):\n removes = 0\n for (path, info) in self._make_iter(search='depth'):\n if info.is_dir:\n self.fs.removetree(path)\n else:\n self.fs.remove(path)\n removes += 1\n return removes", "docstring": "Removed all matched paths.\n\nReturns:\nint: Number of file and directories removed.\n\nExample:\n>>> import fs\n>>> fs.open_fs('~/projects/my_project').glob('**/*.pyc').remove()\n29", "source": "codesearchnet"} {"code": "def _CreateMethod(self, method_name):\n \n soap_service_method = self.zeep_client.service[method_name]\n\n def MakeSoapRequest(*args):\n AddToUtilityRegistry('zeep')\n soap_headers = self._GetZeepFormattedSOAPHeaders()\n packed_args = self._PackArguments(method_name, args)\n try:\n return soap_service_method(\n *packed_args, _soapheaders=soap_headers)['body']['rval']\n except zeep.exceptions.Fault as e:\n error_list = ()\n if e.detail is not None:\n underlying_exception = e.detail.find(\n '{%s}ApiExceptionFault' % self._GetBindingNamespace())\n fault_type = self.zeep_client.get_element(\n '{%s}ApiExceptionFault' % self._GetBindingNamespace())\n fault = fault_type.parse(\n underlying_exception, self.zeep_client.wsdl.types)\n error_list = fault.errors or error_list\n raise googleads.errors.GoogleAdsServerFault(\n e.detail, errors=error_list, message=e.message)\n return MakeSoapRequest", "docstring": "Create a method wrapping an invocation to the SOAP service.\n\nArgs:\nmethod_name: A string identifying the name of the SOAP method to call.\n\nReturns:\nA callable that can be used to make the desired SOAP request.", "source": "juraj-google-style"} {"code": "def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:\n attn_maps = []\n prev_attn_masks = None\n for attn_masks in attentions:\n attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))\n if prev_attn_masks is None:\n prev_attn_masks = attn_masks\n else:\n prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)\n cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)\n attn_maps.append(cur_attn_map)\n final_grouping = attn_maps[-1]\n return tf.stop_gradient(final_grouping)", "docstring": "Args:\nattentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`\nhw_shape (`tuple(int)`): height and width of the output attention map\nReturns:\n`tf.Tensor`: the attention map of shape [batch_size, groups, height, width]", "source": "github-repos"} {"code": "def retry(transport: 'UDPTransport', messagedata: bytes, message_id: UDPMessageID, recipient: Address, stop_event: Event, timeout_backoff: Iterable[int]) -> bool:\n async_result = transport.maybe_sendraw_with_result(recipient, messagedata, message_id)\n event_quit = event_first_of(async_result, stop_event)\n for timeout in timeout_backoff:\n if (event_quit.wait(timeout=timeout) is True):\n break\n log.debug('retrying message', node=pex(transport.raiden.address), recipient=pex(recipient), msgid=message_id)\n transport.maybe_sendraw_with_result(recipient, messagedata, message_id)\n return async_result.ready()", "docstring": "Send messagedata until it's acknowledged.\n\nExit when:\n\n- The message is delivered.\n- Event_stop is set.\n- The iterator timeout_backoff runs out.\n\nReturns:\nbool: True if the message was acknowledged, False otherwise.", "source": "codesearchnet"} {"code": "def register_defs(self, def_list, **kwargs):\n \n for item in def_list:\n if isinstance(item, tuple):\n self.register_rml_def(*item, **kwargs)\n elif isinstance(item, dict):\n cp_kwargs = kwargs.copy()\n item.update(kwargs)\n self.register_rml_def(**item)", "docstring": "Registers a list of Rml defintions objects\n\nArgs:\n-----\ndef_list: list of objects defining the rml definitons", "source": "juraj-google-style"} {"code": "def _parse_expiry(response_data):\n \n expires_in = response_data.get('expires_in', None)\n\n if expires_in is not None:\n return _helpers.utcnow() + datetime.timedelta(\n seconds=expires_in)\n else:\n return None", "docstring": "Parses the expiry field from a response into a datetime.\n\nArgs:\nresponse_data (Mapping): The JSON-parsed response data.\n\nReturns:\nOptional[datetime]: The expiration or ``None`` if no expiration was\nspecified.", "source": "juraj-google-style"} {"code": "def get_all_for_resource(identifier, configuration=None):\n resourceview = ResourceView(configuration=configuration)\n (success, result) = resourceview._read_from_hdx('resource view', identifier, 'id', ResourceView.actions()['list'])\n resourceviews = list()\n if success:\n for resourceviewdict in result:\n resourceview = ResourceView(resourceviewdict, configuration=configuration)\n resourceviews.append(resourceview)\n return resourceviews", "docstring": "Read all resource views for a resource given by identifier from HDX and returns list of ResourceView objects\n\nArgs:\nidentifier (str): Identifier of resource\nconfiguration (Optional[Configuration]): HDX configuration. Defaults to global configuration.\n\nReturns:\nList[ResourceView]: List of ResourceView objects", "source": "codesearchnet"} {"code": "def get_pipe_series_output(commands: Sequence[str], stdinput: BinaryIO=None) -> bytes:\n processes = []\n for i in range(len(commands)):\n if (i == 0):\n processes.append(subprocess.Popen(shlex.split(commands[i]), stdin=subprocess.PIPE, stdout=subprocess.PIPE))\n else:\n processes.append(subprocess.Popen(shlex.split(commands[i]), stdin=processes[(i - 1)].stdout, stdout=subprocess.PIPE))\n return processes[(len(processes) - 1)].communicate(stdinput)[0]", "docstring": "Get the output from a piped series of commands.\n\nArgs:\ncommands: sequence of command strings\nstdinput: optional ``stdin`` data to feed into the start of the pipe\n\nReturns:\n``stdout`` from the end of the pipe", "source": "codesearchnet"} {"code": "def copyfile(src, dst, follow_symlinks=True):\n (src, src_is_storage) = format_and_is_storage(src)\n (dst, dst_is_storage) = format_and_is_storage(dst)\n if ((not src_is_storage) and (not dst_is_storage)):\n return shutil_copyfile(src, dst, follow_symlinks=follow_symlinks)\n with handle_os_exceptions():\n try:\n if ((not hasattr(dst, 'read')) and (not isdir(dirname(dst)))):\n raise IOError((\"No such file or directory: '%s'\" % dst))\n except ObjectPermissionError:\n pass\n _copy(src, dst, src_is_storage, dst_is_storage)", "docstring": "Copies a source file to a destination file.\n\nEquivalent to \"shutil.copyfile\".\n\nSource and destination can also be binary opened file-like objects.\n\nArgs:\nsrc (path-like object or file-like object): Source file.\ndst (path-like object or file-like object): Destination file.\nfollow_symlinks (bool): Follow symlinks.\nNot supported on cloud storage objects.\n\nRaises:\nIOError: Destination directory not found.", "source": "codesearchnet"} {"code": "async def __anit__(self, core, node):\n \n await s_base.Base.__anit__(self)\n\n self.core = core\n\n self.node = node\n self.iden = node.name()\n\n self.borked = None\n\n self.info = await node.dict()\n self.info.setdefault('owner', 'root')\n self.info.setdefault('layers', ())\n\n self.layers = []\n\n for iden in self.info.get('layers'):\n\n layr = core.layers.get(iden)\n\n if layr is None:\n self.borked = iden\n logger.warning('view %r has missing layer %r' % (self.iden, iden))\n continue\n\n if not self.layers and layr.readonly:\n self.borked = iden\n raise s_exc.ReadOnlyLayer(mesg=f'First layer {iden} must not be read-only')\n\n self.layers.append(layr)", "docstring": "Async init the view.\n\nArgs:\ncore (Cortex): The cortex that owns the view.\nnode (HiveNode): The hive node containing the view info.", "source": "juraj-google-style"} {"code": "def add_callback(self, name, func):\n \n\n if name not in self.callbacks:\n raise ValueError(\"Unknown callback name: %s\" % name)\n\n self.callbacks[name].add(func)", "docstring": "Add a callback when Device events happen\n\nArgs:\nname (str): currently support 'on_scan' and 'on_disconnect'\nfunc (callable): the function that should be called", "source": "juraj-google-style"} {"code": "def lsdirs(root=\".\", **kwargs):\n \n paths = ls(root=root, **kwargs)\n if isfile(root):\n return []\n return [_path for _path in paths if isdir(path(root, _path))]", "docstring": "Return only subdirectories from a directory listing.\n\nArguments:\n\nroot (str): Path to directory. Can be relative or absolute.\n**kwargs: Any additional arguments to be passed to ls().\n\nReturns:\n\nlist of str: A list of directory paths.\n\nRaises:\n\nOSError: If root directory does not exist.", "source": "juraj-google-style"} {"code": "def CredibleInterval(pmf, percentage=90):\n cdf = pmf.MakeCdf()\n prob = ((1 - (percentage / 100.0)) / 2)\n interval = (cdf.Value(prob), cdf.Value((1 - prob)))\n return interval", "docstring": "Computes a credible interval for a given distribution.\n\nIf percentage=90, computes the 90% CI.\n\nArgs:\npmf: Pmf object representing a posterior distribution\npercentage: float between 0 and 100\n\nReturns:\nsequence of two floats, low and high", "source": "codesearchnet"} {"code": "def __call__(self, shape, dtype=None):\n dtype = standardize_dtype(dtype)\n frame_length, input_channels, fft_length = shape\n win = None\n scaling = 1\n if self.window is not None:\n win = self.window\n if isinstance(win, str):\n win = scipy.signal.get_window(win, frame_length, self.periodic)\n win = ops.convert_to_tensor(win, dtype=dtype)\n if len(win.shape) != 1 or win.shape[-1] != frame_length:\n raise ValueError(f'The shape of `window` must be equal to [frame_length].Received: window shape={win.shape}')\n win = ops.reshape(win, [frame_length, 1, 1])\n if self.scaling == 'density':\n scaling = ops.sqrt(ops.sum(ops.square(win)))\n elif self.scaling == 'spectrum':\n scaling = ops.sum(ops.abs(win))\n _fft_length = (fft_length - 1) * 2\n freq = ops.reshape(ops.arange(fft_length, dtype=dtype), (1, 1, fft_length)) / _fft_length\n time = ops.reshape(ops.arange(frame_length, dtype=dtype), (frame_length, 1, 1))\n args = -2 * time * freq * ops.arccos(ops.cast(-1, dtype))\n if self.side == 'real':\n kernel = ops.cast(ops.cos(args), dtype)\n else:\n kernel = ops.cast(ops.sin(args), dtype)\n if win is not None:\n kernel = kernel * win / scaling\n return kernel", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nThe shape is assumed to be `(T, 1, F // 2 + 1)`, where `T` is the size\nof the given window, and `F` is the number of frequency bands. Only half\nthe frequency bands are used, which is a common practice in STFT,\nbecause the second half are the conjugates of the first half in\na reversed order.\n\nArgs:\nshape: Shape of the tensor.\ndtype: Optional dtype of the tensor. Only numeric or boolean dtypes\nare supported. If not specified, `keras.backend.floatx()`\nis used, which default to `float32` unless you configured it\notherwise (via `keras.backend.set_floatx(float_dtype)`).", "source": "github-repos"} {"code": "def _CheckStatusAnalysisProcess(self, pid):\n self._RaiseIfNotRegistered(pid)\n if (pid in self._completed_analysis_processes):\n status_indicator = definitions.STATUS_INDICATOR_COMPLETED\n process_status = {'processing_status': status_indicator}\n used_memory = 0\n else:\n process = self._processes_per_pid[pid]\n process_status = self._QueryProcessStatus(process)\n if (process_status is None):\n process_is_alive = False\n else:\n process_is_alive = True\n process_information = self._process_information_per_pid[pid]\n used_memory = (process_information.GetUsedMemory() or 0)\n if (self._worker_memory_limit and (used_memory > self._worker_memory_limit)):\n logger.warning('Process: {0:s} (PID: {1:d}) killed because it exceeded the memory limit: {2:d}.'.format(process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n if (status_indicator == definitions.STATUS_INDICATOR_COMPLETED):\n self._completed_analysis_processes.add(pid)\n else:\n rpc_errors = (self._rpc_errors_per_pid.get(pid, 0) + 1)\n self._rpc_errors_per_pid[pid] = rpc_errors\n if (rpc_errors > self._MAXIMUM_RPC_ERRORS):\n process_is_alive = False\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning('Unable to retrieve process: {0:s} (PID: {1:d}) status via RPC socket: http:\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n process_status = {'processing_status': processing_status_string}\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n if (status_indicator in definitions.ERROR_STATUS_INDICATORS):\n logger.error('Process {0:s} (PID: {1:d}) is not functioning correctly. Status code: {2!s}.'.format(process.name, pid, status_indicator))\n self._TerminateProcessByPid(pid)", "docstring": "Checks the status of an analysis process.\n\nArgs:\npid (int): process ID (PID) of a registered analysis process.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "codesearchnet"} {"code": "def __init__(\n self, data_stream=None, inode=None, location=None, parent=None, **kwargs):\n \n \n \n if (inode is None and not location) or not parent:\n raise ValueError('Missing inode and location, or parent value.')\n\n super(TSKPathSpec, self).__init__(parent=parent, **kwargs)\n self.data_stream = data_stream\n self.inode = inode\n self.location = location", "docstring": "Initializes a path specification.\n\nNote that the TSK path specification must have a parent.\n\nArgs:\ndata_stream (Optional[str]): data stream name, where None indicates\nthe default data stream.\ninode (Optional[int]): inode.\nlocation (Optional[str]): location.\nparent (Optional[PathSpec]): parent path specification.\n\nRaises:\nValueError: when inode and location, or parent are not set.", "source": "juraj-google-style"} {"code": "def recordbatch(self, auth, resource, entries, defer=False):\n \n return self._call('recordbatch', auth, [resource, entries], defer)", "docstring": "Records a list of historical entries to the resource specified.\n\nCalls a function that bulids a request that writes a list of historical entries to the\nspecified resource.\n\nArgs:\nauth: Takes the device cik\nresource: Takes the dataport alias or rid.\nentries: A list of entries to write to the resource.", "source": "juraj-google-style"} {"code": "def observations_np(self, boundary=20):\n \n list_observations_np_ts = [t.observations_np for t in self.trajectories]\n \n OBS = list_observations_np_ts[0].shape[1:] \n\n num_time_steps = [t.num_time_steps for t in self.trajectories]\n t_max = max(num_time_steps)\n \n boundary = int(boundary)\n bucket_length = boundary * int(np.ceil(float(t_max) / boundary))\n\n def padding_config(obs):\n \n num_to_pad = bucket_length + 1 - obs.shape[0]\n return [(0, num_to_pad)] + [(0, 0)] * len(OBS)\n\n return np.stack([\n np.pad(obs, padding_config(obs), \"constant\")\n for obs in list_observations_np_ts]), num_time_steps", "docstring": "Pads the observations in all the trajectories and returns them.\n\nArgs:\nboundary: integer, Observations will be padded to (n * boundary) + 1 where\nn is an integer.\n\nReturns:\na tuple(padded_observations, time_steps), with shapes:\npadded_observations: (self.batch_size, n * boundary + 1) + OBS\ntime_steps: integer list of length = self.batch_size", "source": "juraj-google-style"} {"code": "def __init__(self, criterion, description=None, font_attr=DEFAULT_TENSOR_ELEMENT_HIGHLIGHT_FONT_ATTR):\n self.criterion = criterion\n self.description = description\n self.font_attr = font_attr", "docstring": "Constructor of HighlightOptions.\n\nArgs:\ncriterion: (callable) A callable of the following signature:\ndef to_highlight(X):\n# Args:\n# X: The tensor to highlight elements in.\n#\n# Returns:\n# (boolean ndarray) A boolean ndarray of the same shape as X\n# indicating which elements are to be highlighted (iff True).\nThis callable will be used as the argument of np.argwhere() to\ndetermine which elements of the tensor are to be highlighted.\ndescription: (str) Description of the highlight criterion embodied by\ncriterion.\nfont_attr: (str) Font attribute to be applied to the\nhighlighted elements.", "source": "github-repos"} {"code": "def __intervals_from_tops(self, tops, values, basis, components, field=None, ignore_nan=True):\n length = float(basis.size)\n (start, stop) = (basis[0], basis[(- 1)])\n tops = [(start + ((p / (length - 1)) * (stop - start))) for p in tops]\n bases = (tops[1:] + [stop])\n list_of_Intervals = []\n for (i, t) in enumerate(tops):\n (v, c, d) = (values[i], [], {})\n if (ignore_nan and np.isnan(v)):\n continue\n if (field is not None):\n d = {field: v}\n if (components is not None):\n try:\n c = [deepcopy(components[int(v)])]\n except IndexError:\n c = []\n if (c and (c[0] is None)):\n c = []\n interval = Interval(t, bases[i], data=d, components=c)\n list_of_Intervals.append(interval)\n return list_of_Intervals", "docstring": "Private method. Take a sequence of tops in an arbitrary dimension,\nand provide a list of intervals from which a striplog can be made.\n\nThis is only intended to be used by ``from_image()``.\n\nArgs:\ntops (iterable). A list of floats.\nvalues (iterable). A list of values to look up.\nbasis (iterable). A list of components.\ncomponents (iterable). A list of Components.\n\nReturns:\nList. A list of Intervals.", "source": "codesearchnet"} {"code": "def cw_ssim_value(self, target, width=30):\n \n if not isinstance(target, SSIMImage):\n target = SSIMImage(target, size=self.img.size)\n\n \n widths = np.arange(1, width+1)\n\n \n sig1 = np.asarray(self.img.img_gray.getdata())\n sig2 = np.asarray(target.img_gray.getdata())\n\n \n cwtmatr1 = signal.cwt(sig1, signal.ricker, widths)\n cwtmatr2 = signal.cwt(sig2, signal.ricker, widths)\n\n \n c1c2 = np.multiply(abs(cwtmatr1), abs(cwtmatr2))\n c1_2 = np.square(abs(cwtmatr1))\n c2_2 = np.square(abs(cwtmatr2))\n num_ssim_1 = 2 * np.sum(c1c2, axis=0) + self.k\n den_ssim_1 = np.sum(c1_2, axis=0) + np.sum(c2_2, axis=0) + self.k\n\n \n c1c2_conj = np.multiply(cwtmatr1, np.conjugate(cwtmatr2))\n num_ssim_2 = 2 * np.abs(np.sum(c1c2_conj, axis=0)) + self.k\n den_ssim_2 = 2 * np.sum(np.abs(c1c2_conj), axis=0) + self.k\n\n \n ssim_map = (num_ssim_1 / den_ssim_1) * (num_ssim_2 / den_ssim_2)\n\n \n index = np.average(ssim_map)\n return index", "docstring": "Compute the complex wavelet SSIM (CW-SSIM) value from the reference\nimage to the target image.\n\nArgs:\ntarget (str or PIL.Image): Input image to compare the reference image\nto. This may be a PIL Image object or, to save time, an SSIMImage\nobject (e.g. the img member of another SSIM object).\nwidth: width for the wavelet convolution (default: 30)\n\nReturns:\nComputed CW-SSIM float value.", "source": "juraj-google-style"} {"code": "def __init__(self, station_code, DST=False):\n \n filename = env.WEATHER_DATA_PATH + '/' + _basename(station_code)\n self.csvfile = None\n try:\n self.csvfile = open(filename)\n except IOError:\n logger.info(\"File not found\")\n download_extract(_eere_url(station_code))\n self.csvfile = open(filename)\n logging.debug('opened %s', self.csvfile.name)\n fieldnames = [\"Year\", \"Month\", \"Day\", \"Hour\", \"Minute\", \"DS\",\n \"Dry-bulb (C)\", \"Dewpoint (C)\", \"Relative Humidity\",\n \"Pressure (Pa)\", \"ETR (W/m^2)\", \"ETRN (W/m^2)\",\n \"HIR (W/m^2)\", \"GHI (W/m^2)\", \"DNI (W/m^2)\",\n \"DHI (W/m^2)\", \"GHIL (lux)\", \"DNIL (lux)\", \"DFIL (lux)\",\n \"Zlum (Cd/m2)\", \"Wdir (degrees)\", \"Wspd (m/s)\",\n \"Ts cover\", \"O sky cover\", \"CeilHgt (m)\",\n \"Present Weather\", \"Pw codes\", \"Pwat (cm)\",\n \"AOD (unitless)\", \"Snow Depth (cm)\",\n \"Days since snowfall\"]\n station_meta = self.csvfile.readline().split(',')\n self.station_name = station_meta[1]\n self.CC = station_meta[3]\n self.station_fmt = station_meta[4]\n self.station_code = station_meta[5]\n self.lat = station_meta[6]\n self.lon = station_meta[7]\n self.TZ = float(station_meta[8])\n self.ELEV = station_meta[9]\n self.DST = DST\n\n if self.DST:\n geocoder = geocoders.GoogleV3()\n self.local_tz = pytz.timezone(geocoder.timezone((self.lat,\n self.lon)).zone)\n dummy = \"\"\n for _ in range(7):\n dummy += self.csvfile.readline()\n self.epw_data = csv.DictReader(self.csvfile, fieldnames=fieldnames)", "docstring": "Data for a weather station.\n\nArgs:\nstation_code (str): Station code of weather station\nDST (bool): Weather timestands in daylight savings. Default False", "source": "juraj-google-style"} {"code": "def _to_enos_networks(networks):\n \n nets = []\n for roles, network in networks:\n nets.append(network.to_enos(roles))\n logger.debug(nets)\n return nets", "docstring": "Transform the networks returned by deploy5k.\n\nArgs:\nnetworks (dict): networks returned by\n:py:func:`enoslib.infra.provider.Provider.init`", "source": "juraj-google-style"} {"code": "def get_escalatee(self, main_type, sub_type, unique_id, escalatee_id, params=None):\n \n params = params or {}\n\n return self.escalatee(main_type, sub_type, unique_id, escalatee_id, params=params)", "docstring": "Args:\nmain_type:\nsub_type:\nunique_id:\nescalatee_id:\nparams:\n\nReturn:", "source": "juraj-google-style"} {"code": "def merge_caches_on_tpu(self, local_tpu_cache_tensor):\n x = array_ops.broadcast_to(local_tpu_cache_tensor, shape=[self._tt_config.num_replicas] + local_tpu_cache_tensor.shape.as_list())\n if tensor_tracer_flags.TT_SINGLE_CORE_SUMMARIES.value:\n return x\n return tpu_ops.all_to_all(x, concat_dimension=0, split_dimension=0, split_count=self._tt_config.num_replicas, group_assignment=[list(range(self._tt_config.num_replicas))])", "docstring": "Merges the given caches on tpu.\n\nArgs:\nlocal_tpu_cache_tensor: A local tensor that needs to be merged\nby concanting data from other tpu cores.\nReturns:\nA merged tf.Tensor.", "source": "github-repos"} {"code": "class GaussianDropout(layers.Layer):\n\n def __init__(self, rate, seed=None, **kwargs):\n super().__init__(**kwargs)\n if not 0 <= rate <= 1:\n raise ValueError(f'Invalid value received for argument `rate`. Expected a float value between 0 and 1. Received: rate={rate}')\n self.rate = rate\n self.seed = seed\n if rate > 0:\n self.seed_generator = backend.random.SeedGenerator(seed)\n self.supports_masking = True\n self._build_at_init()\n\n def call(self, inputs, training=False):\n if training and self.rate > 0:\n stddev = math.sqrt(self.rate / (1.0 - self.rate))\n return inputs * backend.random.normal(shape=ops.shape(inputs), mean=1.0, stddev=stddev, dtype=self.compute_dtype, seed=self.seed_generator)\n return inputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n base_config = super().get_config()\n config = {'rate': self.rate, 'seed': self.seed}\n return {**base_config, **config}", "docstring": "Apply multiplicative 1-centered Gaussian noise.\n\nAs it is a regularization layer, it is only active at training time.\n\nArgs:\nrate: Float, drop probability (as with `Dropout`).\nThe multiplicative noise will have\nstandard deviation `sqrt(rate / (1 - rate))`.\nseed: Integer, optional random seed to enable deterministic behavior.\n\nCall arguments:\ninputs: Input tensor (of any rank).\ntraining: Python boolean indicating whether the layer should behave in\ntraining mode (adding dropout) or in inference mode (doing nothing).", "source": "github-repos"} {"code": "def _DownloadUrl(self, url, dest_dir):\n dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)\n dest_file.close()\n dest = dest_file.name\n self.logger.info('Downloading url from %s to %s.', url, dest)\n try:\n urlretrieve.urlretrieve(url, dest)\n return dest\n except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:\n self.logger.warning('Could not download %s. %s.', url, str(e))\n except Exception as e:\n self.logger.warning('Exception downloading %s. %s.', url, str(e))\n return None", "docstring": "Download a script from a given URL.\n\nArgs:\nurl: string, the URL to download.\ndest_dir: string, the path to a directory for storing metadata scripts.\n\nReturns:\nstring, the path to the file storing the metadata script.", "source": "codesearchnet"} {"code": "def get_metrics_namespace(self) -> str:\n return 'BeamML_HuggingFacePipelineModelHandler'", "docstring": "Returns:\nA namespace for metrics collected by the RunInference transform.", "source": "github-repos"} {"code": "def apply_operation_back(self, op, qargs=None, cargs=None, condition=None):\n qargs = (qargs or [])\n cargs = (cargs or [])\n all_cbits = self._bits_in_condition(condition)\n all_cbits.extend(cargs)\n self._check_condition(op.name, condition)\n self._check_bits(qargs, self.output_map)\n self._check_bits(all_cbits, self.output_map)\n self._add_op_node(op, qargs, cargs, condition)\n al = [qargs, all_cbits]\n for q in itertools.chain(*al):\n ie = list(self._multi_graph.predecessors(self.output_map[q]))\n if (len(ie) != 1):\n raise DAGCircuitError('output node has multiple in-edges')\n self._multi_graph.add_edge(ie[0], self._id_to_node[self._max_node_id], name=('%s[%s]' % (q[0].name, q[1])), wire=q)\n self._multi_graph.remove_edge(ie[0], self.output_map[q])\n self._multi_graph.add_edge(self._id_to_node[self._max_node_id], self.output_map[q], name=('%s[%s]' % (q[0].name, q[1])), wire=q)\n return self._id_to_node[self._max_node_id]", "docstring": "Apply an operation to the output of the circuit.\n\nArgs:\nop (Instruction): the operation associated with the DAG node\nqargs (list[tuple]): qubits that op will be applied to\ncargs (list[tuple]): cbits that op will be applied to\ncondition (tuple or None): optional condition (ClassicalRegister, int)\n\nReturns:\nDAGNode: the current max node\n\nRaises:\nDAGCircuitError: if a leaf node is connected to multiple outputs", "source": "codesearchnet"} {"code": "def markers(self, values):\n \n if not isinstance(values, list):\n raise TypeError(\"Markers must be a list of objects\")\n\n self.options[\"markers\"] = values", "docstring": "Set the markers.\n\nArgs:\nvalues (list): list of marker objects.\n\nRaises:\nValueError: Markers must be a list of objects.", "source": "juraj-google-style"} {"code": "def read_profile(name):\n \n config = configparser.ConfigParser()\n config.read(CONFIG_FILE)\n profile = config[name]\n repo = profile[\"repo\"]\n token = profile[\"token\"]\n return {\"repo\": repo, \"token\": token}", "docstring": "Get a named profile from the CONFIG_FILE.\n\nArgs:\n\nname\nThe name of the profile to load.\n\nReturns:\nA dictionary with the profile's ``repo`` and ``token`` values.", "source": "juraj-google-style"} {"code": "def _init_net_specs(conf):\n for (net_name, net_spec) in conf.get('nets', {}).items():\n net_spec['name'] = net_name\n net_spec['mapping'] = {}\n net_spec.setdefault('type', 'nat')\n return conf", "docstring": "Given a configuration specification, initializes all the net\ndefinitions in it so they can be used comfortably\n\nArgs:\nconf (dict): Configuration specification\n\nReturns:\ndict: the adapted new conf", "source": "codesearchnet"} {"code": "def open(self, filepath):\n \n with io.open(filepath, 'r', encoding='utf-8') as fp:\n content = fp.read()\n return content", "docstring": "Open settings backend to return its content\n\nArgs:\nfilepath (str): Settings object, depends from backend\n\nReturns:\nstring: File content.", "source": "juraj-google-style"} {"code": "def generate_code(max_length, max_nest, ops):\n stack = []\n\n def fetch_one():\n if stack:\n return stack.pop()\n else:\n value = random.randint((10 ** (max_length - 1)), ((10 ** max_length) - 1))\n code = str(value)\n return (value, code)\n\n def fetch(num_operands):\n (values, codes) = zip(*[fetch_one() for _ in six.moves.range(num_operands)])\n return (values, codes)\n for _ in six.moves.range(max_nest):\n op = random.choice(ops)\n (values, codes) = fetch(op.num_operands)\n new_value = op.eval(values)\n new_code = op.get_code(codes)\n stack.append((new_value, (('(' + new_code) + ')')))\n (final_value, final_code) = stack.pop()\n final_code = final_code[1:(- 1)]\n final_code.strip('()')\n if (not op.is_memory):\n final_value = (int(final_value) % (10 ** (max_length + 1)))\n return (str(final_value), final_code)", "docstring": "Generates code samples.\n\nArgs:\nmax_length: int. max literal length.\nmax_nest: int. max nesting level.\nops: CodeOp. set of allowable operations.\n\nReturns:\n1. (str) output value.\n2. (str) Code operation.", "source": "codesearchnet"} {"code": "def events(self, institute, case=None, variant_id=None, level=None, comments=False, panel=None):\n query = {}\n if variant_id:\n if comments:\n LOG.debug('Fetching all comments for institute {0} case {1} variant {2}'.format(institute['_id'], case['_id'], variant_id))\n query = {'$or': [{'category': 'variant', 'variant_id': variant_id, 'verb': 'comment', 'level': 'global'}, {'category': 'variant', 'variant_id': variant_id, 'institute': institute['_id'], 'case': case['_id'], 'verb': 'comment', 'level': 'specific'}]}\n else:\n query['institute'] = institute['_id']\n query['category'] = 'variant'\n query['variant_id'] = variant_id\n query['case'] = case['_id']\n else:\n query['institute'] = institute['_id']\n if panel:\n query['panel'] = panel\n else:\n query['category'] = 'case'\n if case:\n query['case'] = case['_id']\n if comments:\n query['verb'] = 'comment'\n return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING)", "docstring": "Fetch events from the database.\n\nArgs:\ninstitute (dict): A institute\ncase (dict): A case\nvariant_id (str, optional): global variant id\nlevel (str, optional): restrict comments to 'specific' or 'global'\ncomments (bool, optional): restrict events to include only comments\npanel (str): A panel name\n\nReturns:\npymongo.Cursor: Query result", "source": "codesearchnet"} {"code": "def _assert_validators(self, validators):\n for validator in sorted(validators, key=(lambda validator: validator.insertion_index)):\n try:\n validator.verify(self)\n except _exceptions.ValidationError as e:\n message = validator.print_flags_with_values(self)\n raise _exceptions.IllegalFlagValueError(('%s: %s' % (message, str(e))))", "docstring": "Asserts if all validators in the list are satisfied.\n\nIt asserts validators in the order they were created.\n\nArgs:\nvalidators: Iterable(validators.Validator), validators to be\nverified.\nRaises:\nAttributeError: Raised if validators work with a non-existing flag.\nIllegalFlagValueError: Raised if validation fails for at least one\nvalidator.", "source": "codesearchnet"} {"code": "def _freeze_keras_model(self):\n input_signature = None\n if not isinstance(self._keras_model.call, _def_function.Function):\n input_signature = _model_input_signature(self._keras_model, keep_original_batch_size=True)\n func = _trace_model_call(self._keras_model, input_signature)\n concrete_func = func.get_concrete_function()\n self._funcs = [concrete_func]\n frozen_func, graph_def = _convert_to_constants.convert_variables_to_constants_v2_as_graph(self._funcs[0], lower_control_flow=False)\n input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource]\n output_tensors = frozen_func.outputs\n return (graph_def, input_tensors, output_tensors, frozen_func)", "docstring": "Freeze Keras model to frozen graph.\n\nReturns:\ngraph_def: The frozen GraphDef.\ninput_tensors: List of input tensors.\noutput_tensors: List of output tensors.\nfrozen_func: The frozen ConcreteFunction.", "source": "github-repos"} {"code": "def _testSimpleHelper(self, dtype, test_cases):\n current_test_case = []\n dataset = dataset_ops.Dataset.from_generator(lambda: current_test_case, dtype).unique()\n for test_case, expected in test_cases:\n current_test_case = test_case\n self.assertDatasetProduces(dataset, [compat.as_bytes(element) if dtype == dtypes.string else element for element in expected])", "docstring": "Test the `unique()` transformation on a list of test cases.\n\nArgs:\ndtype: The `dtype` of the elements in each test case.\ntest_cases: A list of pairs of lists. The first component is the test\ninput that will be passed to the transformation; the second component is\nthe expected sequence of outputs from the transformation.", "source": "github-repos"} {"code": "def tv(self, **kwargs):\n path = self._get_path('tv')\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Search for TV shows by title.\n\nArgs:\nquery: CGI escpaed string.\npage: (optional) Minimum value of 1. Expected value is an integer.\nlanguage: (optional) ISO 639-1 code.\nfirst_air_date_year: (optional) Filter the results to only match\nshows that have a air date with with value.\nsearch_type: (optional) By default, the search type is 'phrase'.\nThis is almost guaranteed the option you will want.\nIt's a great all purpose search type and by far the\nmost tuned for every day querying. For those wanting\nmore of an \"autocomplete\" type search, set this\noption to 'ngram'.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"} {"code": "def remove_accent_marks(text, excluded=None):\n \n if excluded is None:\n excluded = set()\n\n return unicodedata.normalize(\n 'NFKC', ''.join(\n c for c in unicodedata.normalize(\n 'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded))", "docstring": "Remove accent marks from input text.\n\nThis function removes accent marks in the text, but leaves\nunicode characters defined in the 'excluded' parameter.\n\nArgs:\ntext: The text to be processed.\nexcluded: Set of unicode characters to exclude.\n\nReturns:\nThe text without accent marks.", "source": "juraj-google-style"} {"code": "def _get_extras(self):\n extra_parts = ['']\n for value in self._unknown_keys.values():\n extra_parts.append(value)\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STREAM])\n extra_parts.append(self._known_keys[_InstrumentationKnownResultKeys.SHORTMSG])\n extra_parts.append(self._known_keys[_InstrumentationKnownResultKeys.LONGMSG])\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.ERROR])\n if self._known_keys[_InstrumentationKnownStatusKeys.STACK] not in self._known_keys[_InstrumentationKnownStatusKeys.STREAM]:\n extra_parts.append(self._known_keys[_InstrumentationKnownStatusKeys.STACK])\n return '\\n'.join(filter(None, extra_parts))", "docstring": "Gets the output for the extras section of the TestResultRecord.\n\nReturns:\nA string to set for a TestResultRecord's extras.", "source": "github-repos"} {"code": "def resolve_lookups(variable, context, provider):\n resolved_lookups = {}\n for lookup in variable.lookups:\n try:\n handler = LOOKUP_HANDLERS[lookup.type]\n except KeyError:\n raise UnknownLookupType(lookup)\n try:\n resolved_lookups[lookup] = handler(value=lookup.input, context=context, provider=provider)\n except Exception as e:\n raise FailedVariableLookup(variable.name, lookup, e)\n return resolved_lookups", "docstring": "Resolve a set of lookups.\n\nArgs:\nvariable (:class:`stacker.variables.Variable`): The variable resolving\nit's lookups.\ncontext (:class:`stacker.context.Context`): stacker context\nprovider (:class:`stacker.provider.base.BaseProvider`): subclass of the\nbase provider\n\nReturns:\ndict: dict of Lookup -> resolved value", "source": "codesearchnet"} {"code": "def sequence_beam_search(symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size, alpha, max_decode_length, eos_id):\n batch_size = tf.shape(initial_ids)[0]\n sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id)\n return sbs.search(initial_ids, initial_cache)", "docstring": "Search for sequence of subtoken ids with the largest probability.\n\nArgs:\nsymbols_to_logits_fn: A function that takes in ids, index, and cache as\narguments. The passed in arguments will have shape:\nids -> [batch_size * beam_size, index]\nindex -> [] (scalar)\ncache -> nested dictionary of tensors [batch_size * beam_size, ...]\nThe function must return logits and new cache.\nlogits -> [batch * beam_size, vocab_size]\nnew cache -> same shape/structure as inputted cache\ninitial_ids: Starting ids for each batch item.\nint32 tensor with shape [batch_size]\ninitial_cache: dict containing starting decoder variables information\nvocab_size: int size of tokens\nbeam_size: int number of beams\nalpha: float defining the strength of length normalization\nmax_decode_length: maximum length to decoded sequence\neos_id: int id of eos token, used to determine when a sequence has finished\n\nReturns:\nTop decoded sequences [batch_size, beam_size, max_decode_length]\nsequence scores [batch_size, beam_size]", "source": "codesearchnet"} {"code": "def send(self, cumulative_counters=None, gauges=None, counters=None):\n if ((not gauges) and (not cumulative_counters) and (not counters)):\n return\n data = {'cumulative_counter': cumulative_counters, 'gauge': gauges, 'counter': counters}\n _logger.debug('Sending datapoints to SignalFx: %s', data)\n for (metric_type, datapoints) in data.items():\n if (not datapoints):\n continue\n if (not isinstance(datapoints, list)):\n raise TypeError('Datapoints not of type list %s', datapoints)\n for datapoint in datapoints:\n self._add_extra_dimensions(datapoint)\n self._add_to_queue(metric_type, datapoint)\n self._start_thread()", "docstring": "Send the given metrics to SignalFx.\n\nArgs:\ncumulative_counters (list): a list of dictionaries representing the\ncumulative counters to report.\ngauges (list): a list of dictionaries representing the gauges to\nreport.\ncounters (list): a list of dictionaries representing the counters\nto report.", "source": "codesearchnet"} {"code": "def short(self, url):\n \n url = self.clean_url(url)\n shorten_url = f'{self.api_url}v1/shorten'\n payload = {\n 'domain': getattr(self, 'domain', 'adf.ly'),\n 'advert_type': getattr(self, 'type', 'int'),\n 'group_id': getattr(self, 'group_id', None),\n 'key': self.api_key,\n 'user_id': self.user_id,\n 'url': url,\n }\n response = self._post(shorten_url, data=payload)\n if not response.ok:\n raise BadAPIResponseException(response.content)\n\n try:\n data = response.json()\n except json.decoder.JSONDecodeError:\n raise BadAPIResponseException('API response could not be decoded')\n\n if data.get('errors'):\n errors = ','.join(i['msg'] for i in data['errors'])\n raise ShorteningErrorException(errors)\n\n if not data.get('data'):\n raise BadAPIResponseException(response.content)\n\n return data['data'][0]['short_url']", "docstring": "Short implementation for Adf.ly\nArgs:\nurl: the URL you want to shorten\n\nReturns:\nA string containing the shortened URL\n\nRaises:\nBadAPIResponseException: If the data is malformed or we got a bad\nstatus code on API response\nShorteningErrorException: If the API Returns an error as response", "source": "juraj-google-style"} {"code": "def attention_lm_moe_prepare_decoder(targets, hparams):\n \n targets_pad_mask = common_attention.embedding_to_padding(targets)\n with tf.name_scope(\"pad_remover\"):\n \n \n \n pad_remover = expert_utils.PadRemover(targets_pad_mask)\n\n if hparams.prepend_mode == \"prepend_inputs_full_attention\":\n decoder_self_attention_bias = (\n common_attention.attention_bias_prepend_inputs_full_attention(\n targets_pad_mask))\n else:\n decoder_self_attention_bias = (\n common_attention.attention_bias_lower_triangle(tf.shape(targets)[1]))\n decoder_input = common_layers.shift_right_3d(targets)\n if hparams.pos == \"timing\":\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n return (decoder_input, decoder_self_attention_bias, pad_remover)", "docstring": "Prepare one shard of the model for the decoder.\n\nArgs:\ntargets: a Tensor.\nhparams: run hyperparameters\n\nReturns:\ndecoder_input: a Tensor, bottom of decoder stack\ndecoder_self_attention_bias: a Tensor, containing large negative values\nto implement masked attention and possibly biases for diagonal alignments\npad_remover (expert_utils.PadRemover): an util object to remove padding", "source": "juraj-google-style"} {"code": "def add_error(self, error, critical=False):\n self.errors.append((error, critical))", "docstring": "Adds an error to the state.\n\nArgs:\nerror: The text that will be added to the error list.\ncritical: If set to True and the error is checked with check_errors, will\ndfTimewolf will abort.", "source": "codesearchnet"} {"code": "def ascii_tree(self, no_types: bool=False, val_count: bool=False) -> str:\n return self.schema._ascii_tree('', no_types, val_count)", "docstring": "Generate ASCII art representation of the schema tree.\n\nArgs:\nno_types: Suppress output of data type info.\nval_count: Show accumulated validation counts.\n\nReturns:\nString with the ASCII tree.", "source": "codesearchnet"} {"code": "def _ConvertToTimestamp(self, date, time, timezone):\n if ((not date) and (not time)):\n raise errors.TimestampError('Unable to extract timestamp from McAfee AV logline.')\n try:\n time_string = '{0:s} {1:s}'.format(date, time)\n except UnicodeDecodeError:\n raise errors.TimestampError('Unable to form a timestamp string.')\n return timelib.Timestamp.FromTimeString(time_string, timezone=timezone)", "docstring": "Converts date and time values into a timestamp.\n\nThe date and time are made up of two strings, the date and the time,\nseparated by a tab. The time is in local time. The month and day can\nbe either 1 or 2 characters long, e.g.: 7/30/2013\\\\t10:22:48 AM\n\nArgs:\ndate (str): date.\ntime (str): time.\ntimezone (pytz.timezone): timezone of the date and time.\n\nReturns:\nint: a timestamp integer containing the number of micro seconds since\nJanuary 1, 1970, 00:00:00 UTC.\n\nRaises:\nTimestampError: if the timestamp is badly formed or unable to transfer\nthe supplied date and time into a timestamp.", "source": "codesearchnet"} {"code": "def compute_ngrams(word_list, S=3, T=3):\n \n _ngrams = []\n if isinstance(word_list, str):\n word_list = [word_list]\n for word in word_list:\n for n in range(S, T+1):\n _ngrams += zip(*(word[i:] for i in range(n)))\n return [''.join(_ngram) for _ngram in _ngrams]", "docstring": "Compute NGrams in the word_list from [S-T)\nArgs:\nword_list (list): A list of words to compute ngram set from\nS (int): The smallest NGram (default=3)\nT (int): The biggest NGram (default=3)", "source": "juraj-google-style"} {"code": "def get_enterprise_user_id(self, obj):\n \n \n \n enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()\n\n return enterprise_learner and enterprise_learner.id", "docstring": "Get enterprise user id from user object.\n\nArguments:\nobj (User): Django User object\n\nReturns:\n(int): Primary Key identifier for enterprise user object.", "source": "juraj-google-style"} {"code": "def euler_angles_1q(unitary_matrix):\n \n if unitary_matrix.shape != (2, 2):\n raise QiskitError(\"euler_angles_1q: expected 2x2 matrix\")\n phase = la.det(unitary_matrix)**(-1.0/2.0)\n U = phase * unitary_matrix \n \n \n \n \n \n \n if abs(U[0, 0]) > _CUTOFF_PRECISION:\n theta = 2 * math.acos(abs(U[0, 0]))\n else:\n theta = 2 * math.asin(abs(U[1, 0]))\n \n phase11 = 0.0\n phase10 = 0.0\n if abs(math.cos(theta/2.0)) > _CUTOFF_PRECISION:\n phase11 = U[1, 1] / math.cos(theta/2.0)\n if abs(math.sin(theta/2.0)) > _CUTOFF_PRECISION:\n phase10 = U[1, 0] / math.sin(theta/2.0)\n phiplambda = 2 * math.atan2(np.imag(phase11), np.real(phase11))\n phimlambda = 2 * math.atan2(np.imag(phase10), np.real(phase10))\n phi = 0.0\n if abs(U[0, 0]) > _CUTOFF_PRECISION and abs(U[1, 0]) > _CUTOFF_PRECISION:\n phi = (phiplambda + phimlambda) / 2.0\n lamb = (phiplambda - phimlambda) / 2.0\n else:\n if abs(U[0, 0]) < _CUTOFF_PRECISION:\n lamb = -phimlambda\n else:\n lamb = phiplambda\n \n Rzphi = np.array([[np.exp(-1j*phi/2.0), 0],\n [0, np.exp(1j*phi/2.0)]], dtype=complex)\n Rytheta = np.array([[np.cos(theta/2.0), -np.sin(theta/2.0)],\n [np.sin(theta/2.0), np.cos(theta/2.0)]], dtype=complex)\n Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0],\n [0, np.exp(1j*lamb/2.0)]], dtype=complex)\n V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda))\n if la.norm(V - U) > _CUTOFF_PRECISION:\n raise QiskitError(\"euler_angles_1q: incorrect result\")\n return theta, phi, lamb", "docstring": "Compute Euler angles for a single-qubit gate.\n\nFind angles (theta, phi, lambda) such that\nunitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda)\n\nArgs:\nunitary_matrix (ndarray): 2x2 unitary matrix\n\nReturns:\ntuple: (theta, phi, lambda) Euler angles of SU(2)\n\nRaises:\nQiskitError: if unitary_matrix not 2x2, or failure", "source": "juraj-google-style"} {"code": "def collective_manager_ids_from_op(op):\n if op.type == 'CollectiveReduce':\n try:\n return [op.get_attr('_collective_manager_id')]\n except ValueError:\n pass\n elif op.type == 'StatefulPartitionedCall':\n try:\n return op.get_attr(utils.COLLECTIVE_MANAGER_IDS)\n except ValueError:\n pass\n return []", "docstring": "Returns CollectiveManager ID from the op if one exists, else None.\n\nCollectiveManager adds collective and no_op operations tagged with an ID,\nunique to the manager object. This function extracts that ID, or None, if the\nnode was not generated by a CollectiveManager.\n\nArgs:\nop: `Operation` to get the collective manager ID from.\n\nReturns:\nList of CollectiveManager IDs used by the op.", "source": "github-repos"} {"code": "def start(authkey, queues, mode='local'):\n \n global mgr, qdict, kdict\n qdict.clear()\n kdict.clear()\n for q in queues:\n qdict[q] = JoinableQueue()\n\n TFManager.register('get_queue', callable=lambda qname: _get_queue(qname))\n TFManager.register('get', callable=lambda key: _get(key))\n TFManager.register('set', callable=lambda key, value: _set(key, value))\n if mode == 'remote':\n mgr = TFManager(address=('', 0), authkey=authkey)\n else:\n mgr = TFManager(authkey=authkey)\n mgr.start()\n return mgr", "docstring": "Create a new multiprocess.Manager (or return existing one).\n\nArgs:\n:authkey: string authorization key\n:queues: *INTERNAL_USE*\n:mode: 'local' indicates that the manager will only be accessible from the same host, otherwise remotely accessible.\n\nReturns:\nA TFManager instance, which is also cached in local memory of the Python worker process.", "source": "juraj-google-style"} {"code": "def _constant_to_value(self, pyval, subst, get_node):\n if isinstance(pyval, str):\n return self.build_concrete_value(pyval, str)\n elif isinstance(pyval, bytes):\n return self.build_concrete_value(pyval, bytes)\n elif isinstance(pyval, bool):\n return self.true if pyval else self.false\n elif isinstance(pyval, int) and -1 <= pyval <= _MAX_IMPORT_DEPTH:\n return self.build_concrete_value(pyval, int)\n elif pyval.__class__ in self.primitive_classes:\n return self.primitive_instances[pyval.__class__]\n elif pyval.__class__ is frozenset:\n return self._frozenset_literal_to_value(pyval)\n elif isinstance(pyval, (pycnite.types.CodeTypeBase, blocks.OrderedCode)):\n return abstract.ConcreteValue(pyval, self.primitive_classes[types.CodeType], self.ctx)\n elif pyval is super:\n return special_builtins.Super.make(self.ctx)\n elif pyval is object:\n return special_builtins.Object.make(self.ctx)\n elif pyval.__class__ is type:\n try:\n return self.lookup_value(*self._type_to_name(pyval), subst)\n except (KeyError, AttributeError):\n log.debug('Failed to find pytd', exc_info=True)\n raise\n elif isinstance(pyval, abstract_utils.AsInstance):\n cls = pyval.cls\n if isinstance(cls, pytd.LateType):\n actual = self._load_late_type(cls)\n if not isinstance(actual, pytd.ClassType):\n return self.unsolvable\n cls = actual.cls\n if isinstance(cls, pytd.ClassType):\n cls = cls.cls\n if isinstance(cls, pytd.GenericType) and cls.name == 'typing.ClassVar':\n param, = cls.parameters\n return self.constant_to_value(abstract_utils.AsInstance(param), subst)\n elif isinstance(cls, pytd.GenericType) or (isinstance(cls, pytd.Class) and cls.template):\n if isinstance(cls, pytd.Class):\n params = tuple((t.type_param.upper_value for t in cls.template))\n cls = pytd.GenericType(base_type=pytd.ClassType(cls.name, cls), parameters=params)\n return self._pytd_generic_type_to_instance_value(cls, subst, get_node)\n elif isinstance(cls, pytd.Class):\n return self._pytd_class_to_instance_value(cls, subst)\n elif isinstance(cls, pytd.Literal):\n return self._get_literal_value(cls.value, subst)\n else:\n return self.constant_to_value(cls, subst)\n elif isinstance(pyval, pytd.Node):\n return self._pytd_constant_to_value(pyval, subst, get_node)\n elif pyval.__class__ is tuple:\n return self._tuple_literal_to_value(pyval)\n else:\n raise NotImplementedError(f\"Can't convert constant {type(pyval)} {pyval!r}\")", "docstring": "Create a BaseValue that represents a python constant.\n\nThis supports both constant from code constant pools and PyTD constants such\nas classes. This also supports builtin python objects such as int and float.\n\nArgs:\npyval: The python or PyTD value to convert.\nsubst: The current type parameters.\nget_node: A getter function for the current node.\n\nReturns:\nA Value that represents the constant, or None if we couldn't convert.\nRaises:\nNotImplementedError: If we don't know how to convert a value.\nTypeParameterError: If we can't find a substitution for a type parameter.", "source": "github-repos"} {"code": "def _get_cached_response_from_django_cache(key):\n \n if TieredCache._should_force_django_cache_miss():\n return CachedResponse(is_found=False, key=key, value=None)\n\n cached_value = django_cache.get(key, _CACHE_MISS)\n is_found = cached_value is not _CACHE_MISS\n return CachedResponse(is_found, key, cached_value)", "docstring": "Retrieves a CachedResponse for the given key from the django cache.\n\nIf the request was set to force cache misses, then this will always\nreturn a cache miss response.\n\nArgs:\nkey (string)\n\nReturns:\nA CachedResponse with is_found status and value.", "source": "juraj-google-style"} {"code": "def equivalent_to(std_function):\n\n def decorate(cos_function):\n 'Decorator argument handler'\n\n @wraps(cos_function)\n def decorated(path, *args, **kwargs):\n 'Decorated function'\n path = fsdecode(path).replace('\\\\', '/')\n if is_storage(path):\n with handle_os_exceptions():\n return cos_function(path, *args, **kwargs)\n return std_function(path, *args, **kwargs)\n return decorated\n return decorate", "docstring": "Decorates a cloud object compatible function\nto provides fall back to standard function if\nused on local files.\n\nArgs:\nstd_function (function): standard function to\nused with local files.\n\nReturns:\nfunction: new function", "source": "codesearchnet"} {"code": "def StartsWithIgnoreCase(self, value):\n \n self._awql = self._CreateSingleValueCondition(value,\n 'STARTS_WITH_IGNORE_CASE')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"starts with ignore case\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"} {"code": "def get_geometry(self):\n return (self.thet0, self.thet, self.phi0, self.phi, self.alpha, self.beta)", "docstring": "A convenience function to get the geometry variables.\n\nReturns:\nA tuple containing (thet0, thet, phi0, phi, alpha, beta).\nSee the Scatterer class documentation for a description of these\nangles.", "source": "codesearchnet"} {"code": "def predict_proba(self, L):\n \n self._set_constants(L)\n\n L_aug = self._get_augmented_label_matrix(L)\n mu = np.clip(self.mu.detach().clone().numpy(), 0.01, 0.99)\n\n \n if len(self.deps) > 0:\n jtm = np.zeros(L_aug.shape[1])\n\n \n for i in self.c_tree.nodes():\n node = self.c_tree.node[i]\n jtm[node[\"start_index\"] : node[\"end_index\"]] = 1\n\n \n for i, j in self.c_tree.edges():\n edge = self.c_tree[i][j]\n jtm[edge[\"start_index\"] : edge[\"end_index\"]] = 1\n else:\n jtm = np.ones(L_aug.shape[1])\n\n \n X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.p))\n Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.k)\n return X / Z", "docstring": "Returns the [n,k] matrix of label probabilities P(Y | \\lambda)\n\nArgs:\nL: An [n,m] scipy.sparse label matrix with values in {0,1,...,k}", "source": "juraj-google-style"} {"code": "def get_points_and_weights(w_func=(lambda x: np.ones(x.shape)), left=(- 1.0), right=1.0, num_points=5, n=4096):\n dx = ((float(right) - left) / n)\n z = np.hstack(np.linspace((left + (0.5 * dx)), (right - (0.5 * dx)), n))\n w = (dx * w_func(z))\n (a, b) = discrete_gautschi(z, w, num_points)\n alpha = a\n beta = np.sqrt(b)\n J = np.diag(alpha)\n J += np.diag(beta, k=(- 1))\n J += np.diag(beta, k=1)\n (points, v) = np.linalg.eigh(J)\n ind = points.argsort()\n points = points[ind]\n weights = ((v[(0, :)] ** 2) * w.sum())\n weights = weights[ind]\n return (points, weights)", "docstring": "Quadratude points and weights for a weighting function.\n\nPoints and weights for approximating the integral\nI = \\int_left^right f(x) w(x) dx\ngiven the weighting function w(x) using the approximation\nI ~ w_i f(x_i)\n\nArgs:\nw_func: The weighting function w(x). Must be a function that takes\none argument and is valid over the open interval (left, right).\nleft: The left boundary of the interval\nright: The left boundary of the interval\nnum_points: number of integration points to return\nn: the number of points to evaluate w_func at.\n\nReturns:\nA tuple (points, weights) where points is a sorted array of the\npoints x_i and weights gives the corresponding weights w_i.", "source": "codesearchnet"} {"code": "def _matches_version(actual_version, required_version):\n if actual_version is None:\n return False\n actual_version = actual_version.strip()\n required_version = required_version.strip()\n return actual_version.startswith(required_version)", "docstring": "Checks whether some version meets the requirements.\n\nAll elements of the required_version need to be present in the\nactual_version.\n\nrequired_version actual_version result\n-----------------------------------------\n1 1.1 True\n1.2 1 False\n1.2 1.3 False\n1 True\n\nArgs:\nrequired_version: The version specified by the user.\nactual_version: The version detected from the CUDA installation.\nReturns: Whether the actual version matches the required one.", "source": "github-repos"} {"code": "def normalize(self, mode='max', value=1):\n if (mode.lower() == 'sum'):\n factor = np.sum(self.y, axis=0)\n elif (mode.lower() == 'max'):\n factor = np.max(self.y, axis=0)\n else:\n raise ValueError(('Unsupported normalization mode %s!' % mode))\n self.y /= (factor / value)", "docstring": "Normalize the spectrum with respect to the sum of intensity\n\nArgs:\nmode (str): Normalization mode. Supported modes are \"max\" (set the\nmax y value to value, e.g., in XRD patterns), \"sum\" (set the\nsum of y to a value, i.e., like a probability density).\nvalue (float): Value to normalize to. Defaults to 1.", "source": "codesearchnet"} {"code": "def __init__(self, identifier=None):\n \n super(SessionCompletion, self).__init__()\n self.aborted = False\n self.analysis_reports_counter = None\n self.event_labels_counter = None\n self.identifier = identifier\n self.parsers_counter = None\n self.timestamp = None", "docstring": "Initializes a session completion attribute container.\n\nArgs:\nidentifier (Optional[str]): unique identifier of the session.\nThe identifier should match that of the corresponding\nsession start information.", "source": "juraj-google-style"} {"code": "def spawn(self, function, *args, **kwargs):\n assert (self.state != STOPPED), \"Can't spawn when process stopped\"\n spawned = Spawned(function, args, kwargs)\n self._spawned.append(spawned)\n self._spawn_count += 1\n if (self._spawn_count > SPAWN_CLEAR_COUNT):\n self._clear_spawn_list()\n return spawned", "docstring": "Runs the function in a worker thread, returning a Result object\n\nArgs:\nfunction: Function to run\nargs: Positional arguments to run the function with\nkwargs: Keyword arguments to run the function with\n\nReturns:\nSpawned: Something you can call wait(timeout) on to see when it's\nfinished executing", "source": "codesearchnet"} {"code": "def from_prev_calc(cls, prev_calc_dir, copy_wavecar=True, mode='DIAG', nbands_factor=5, ncores=16, **kwargs):\n (vasprun, outcar) = get_vasprun_outcar(prev_calc_dir)\n prev_incar = vasprun.incar\n structure = vasprun.final_structure\n nbands = int(vasprun.parameters['NBANDS'])\n if (mode.upper() == 'DIAG'):\n nbands = int((np.ceil(((nbands * nbands_factor) / ncores)) * ncores))\n files_to_transfer = {}\n if copy_wavecar:\n for fname in ('WAVECAR', 'WAVEDER', 'WFULL'):\n w = sorted(glob.glob(str((Path(prev_calc_dir) / (fname + '*')))))\n if w:\n if (fname == 'WFULL'):\n for f in w:\n fname = Path(f).name\n fname = fname.split('.')[0]\n files_to_transfer[fname] = f\n else:\n files_to_transfer[fname] = str(w[(- 1)])\n return cls(structure=structure, prev_incar=prev_incar, nbands=nbands, mode=mode, files_to_transfer=files_to_transfer, **kwargs)", "docstring": "Generate a set of Vasp input files for GW or BSE calculations from a\ndirectory of previous Exact Diag Vasp run.\n\nArgs:\nprev_calc_dir (str): The directory contains the outputs(\nvasprun.xml of previous vasp run.\ncopy_wavecar: Whether to copy the old WAVECAR, WAVEDER and\nassociated files. Defaults to True.\nmode (str): Supported modes are \"STATIC\", \"DIAG\" (default), \"GW\",\nand \"BSE\".\nnbands_factor (int): Multiplicative factor for NBANDS. Only applies\nif mode==\"DIAG\". Need to be tested for convergence.\nncores (int): numbers of cores you do calculations. VASP will alter\nNBANDS if it was not dividable by ncores. Only applies\nif mode==\"DIAG\".\n\\\\*\\\\*kwargs: All kwargs supported by MVLGWSet,\nother than structure, prev_incar and mode, which\nare determined from the prev_calc_dir.", "source": "codesearchnet"} {"code": "def lease(self, items):\n self._manager.leaser.add(items)\n self._manager.maybe_pause_consumer()", "docstring": "Add the given messages to lease management.\n\nArgs:\nitems(Sequence[LeaseRequest]): The items to lease.", "source": "codesearchnet"} {"code": "def create_unique_autosave_filename(self, filename, autosave_dir):\n \n basename = osp.basename(filename)\n autosave_filename = osp.join(autosave_dir, basename)\n if autosave_filename in self.name_mapping.values():\n counter = 0\n root, ext = osp.splitext(basename)\n while autosave_filename in self.name_mapping.values():\n counter += 1\n autosave_basename = '{}-{}{}'.format(root, counter, ext)\n autosave_filename = osp.join(autosave_dir, autosave_basename)\n return autosave_filename", "docstring": "Create unique autosave file name for specified file name.\n\nArgs:\nfilename (str): original file name\nautosave_dir (str): directory in which autosave files are stored", "source": "juraj-google-style"} {"code": "def select_action(self, state_key, next_action_list):\n \n if self.q_df is None or self.q_df.shape[0] == 0:\n return random.choice(next_action_list)\n\n next_action_b_df = self.__calculate_boltzmann_factor(state_key, next_action_list)\n\n if next_action_b_df.shape[0] == 1:\n return next_action_b_df[\"action_key\"].values[0]\n\n prob = np.random.random()\n next_action_b_df = next_action_b_df.sort_values(by=[\"boltzmann_factor\"])\n\n i = 0\n while prob > next_action_b_df.iloc[i, :][\"boltzmann_factor\"] + next_action_b_df.iloc[i + 1, :][\"boltzmann_factor\"]:\n i += 1\n if i + 1 >= next_action_b_df.shape[0]:\n break\n\n max_b_action_key = next_action_b_df.iloc[i, :][\"action_key\"]\n return max_b_action_key", "docstring": "Select action by Q(state, action).\n\nConcreat method for boltzmann distribution.\n\nArgs:\nstate_key: The key of state.\nnext_action_list: The possible action in `self.t+1`.\nIf the length of this list is 0, all action should be possible.\n\nReturns:\nThe key of action.", "source": "juraj-google-style"} {"code": "def DoesNotContain(self, value):\n \n self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"does not contain\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "juraj-google-style"} {"code": "def date_to_integer(date):\n if (pd and isinstance(date, pd.Timestamp)):\n try:\n date = date.to_datetime64()\n except:\n date = date.to_datetime()\n if isinstance(date, np.datetime64):\n return date.astype('datetime64[ms]').astype(float)\n elif isinstance(date, cftime_types):\n return cftime_to_timestamp(date, 'ms')\n if hasattr(date, 'timetuple'):\n dt_int = (calendar.timegm(date.timetuple()) * 1000)\n else:\n raise ValueError('Datetime type not recognized')\n return dt_int", "docstring": "Converts support date types to milliseconds since epoch\n\nAttempts highest precision conversion of different datetime\nformats to milliseconds since the epoch (1970-01-01 00:00:00).\nIf datetime is a cftime with a non-standard calendar the\ncaveats described in hv.core.util.cftime_to_timestamp apply.\n\nArgs:\ndate: Date- or datetime-like object\n\nReturns:\nMilliseconds since 1970-01-01 00:00:00", "source": "codesearchnet"} {"code": "def poisson_source(rate, iterable, target):\n if (rate <= 0.0):\n raise ValueError('poisson_source rate {} is not positive'.format(rate))\n it = iter(iterable)\n for item in it:\n duration = random.expovariate(rate)\n sleep(duration)\n try:\n target.send(item)\n except StopIteration:\n return prepend(item, it)\n return empty_iter()", "docstring": "Send events at random times with uniform probability.\n\nArgs:\nrate: The average number of events to send per second.\niterable: A series of items which will be sent to the target one by one.\ntarget: The target coroutine or sink.\n\nReturns:\nAn iterator over any remaining items.", "source": "codesearchnet"} {"code": "async def _handle_conversation_delta(self, conversation):\n conv_id = conversation.conversation_id.id\n conv = self._conv_dict.get(conv_id, None)\n if (conv is None):\n (await self._get_or_fetch_conversation(conv_id))\n else:\n conv.update_conversation(conversation)", "docstring": "Receive Conversation delta and create or update the conversation.\n\nArgs:\nconversation: hangouts_pb2.Conversation instance\n\nRaises:\nNetworkError: A request to fetch the complete conversation failed.", "source": "codesearchnet"} {"code": "def open_street_map_geoloc_link(data):\n \n if isinstance(data, str):\n lat_lon = ip_geoloc(data)\n if lat_lon is None:\n return ''\n lat, lon = lat_lon\n else:\n lat, lon = data\n return 'https:\n '?query=%s%%2C%s", "docstring": "Get a link to open street map pointing on this IP's geolocation.\n\nArgs:\ndata (str/tuple): IP address or (latitude, longitude).\n\nReturns:\nstr: a link to open street map pointing on this IP's geolocation.", "source": "juraj-google-style"} {"code": "def extract_list_from_list_of_dict(list_of_dict, key):\n \n \n result = list()\n for dictionary in list_of_dict:\n result.append(dictionary[key])\n return result", "docstring": "Extract a list by looking up key in each member of a list of dictionaries\n\nArgs:\nlist_of_dict (List[DictUpperBound]): List of dictionaries\nkey (Any): Key to find in each dictionary\n\nReturns:\nList: List containing values returned from each dictionary", "source": "juraj-google-style"} {"code": "def DeleteRecords(cls, ids, token):\n \n with data_store.DB.GetMutationPool() as mutation_pool:\n mutation_pool.QueueDeleteRecords(ids)", "docstring": "Delete records identified by ids.\n\nArgs:\nids: A list of ids provided by ClaimRecords.\ntoken: The database access token to delete with.\n\nRaises:\nLockError: If the queue is not locked.", "source": "juraj-google-style"} {"code": "def days(start, end=None):\n \n return iterate.between(start, datetime.timedelta(days=1), end)", "docstring": "Iterate over the days between the given datetime_tzs.\n\nArgs:\nstart: datetime_tz to start from.\nend: (Optional) Date to end at, if not given the iterator will never\nterminate.\n\nReturns:\nAn iterator which generates datetime_tz objects a day apart.", "source": "juraj-google-style"} {"code": "def parse_genetic_models(models_info, case_id):\n \n genetic_models = []\n if models_info:\n for family_info in models_info.split(','):\n splitted_info = family_info.split(':')\n if splitted_info[0] == case_id:\n genetic_models = splitted_info[1].split('|')\n\n return genetic_models", "docstring": "Parse the genetic models entry of a vcf\n\nArgs:\nmodels_info(str): The raw vcf information\ncase_id(str)\n\nReturns:\ngenetic_models(list)", "source": "juraj-google-style"} {"code": "def unwrap(data_type):\n unwrapped_nullable = False\n unwrapped_alias = False\n while (is_alias(data_type) or is_nullable_type(data_type)):\n if is_nullable_type(data_type):\n unwrapped_nullable = True\n if is_alias(data_type):\n unwrapped_alias = True\n data_type = data_type.data_type\n return (data_type, unwrapped_nullable, unwrapped_alias)", "docstring": "Convenience method to unwrap all Aliases and Nullables from around a\nDataType. This checks for nullable wrapping aliases, as well as aliases\nwrapping nullables.\n\nArgs:\ndata_type (DataType): The target to unwrap.\n\nReturn:\nTuple[DataType, bool, bool]: The underlying data type; a bool that is\nset if a nullable was present; a bool that is set if an alias was\npresent.", "source": "codesearchnet"} {"code": "def line(self, x0, y0, x1, y1, char):\n if (x0 > x1):\n (x1, x0) = (x0, x1)\n (y1, y0) = (y0, y1)\n dx = (x1 - x0)\n dy = (y1 - y0)\n if ((dx == 0) and (dy == 0)):\n self.point(x0, y0, char)\n elif (abs(dx) >= abs(dy)):\n for x in range(x0, (x1 + 1)):\n if (dx == 0):\n y = y0\n else:\n y = (y0 + int(round((((x - x0) * dy) / float(dx)))))\n self.point(x, y, char)\n elif (y0 < y1):\n for y in range(y0, (y1 + 1)):\n if (dy == 0):\n x = x0\n else:\n x = (x0 + int(round((((y - y0) * dx) / float(dy)))))\n self.point(x, y, char)\n else:\n for y in range(y1, (y0 + 1)):\n if (dy == 0):\n x = x0\n else:\n x = (x1 + int(round((((y - y1) * dx) / float(dy)))))\n self.point(x, y, char)", "docstring": "Create a line on ASCII canvas.\n\nArgs:\nx0 (int): x coordinate where the line should start.\ny0 (int): y coordinate where the line should start.\nx1 (int): x coordinate where the line should end.\ny1 (int): y coordinate where the line should end.\nchar (str): character to draw the line with.", "source": "codesearchnet"} {"code": "def is_empty(self):\n return self._empty", "docstring": "Deteremines whether or not anything has been parsed with this\ninstrumentation block.\n\nReturns:\nA boolean indicating whether or not the this instrumentation block\nhas parsed and contains any output.", "source": "github-repos"} {"code": "def FromEvent(cls, service_event):\n (_, _, name) = service_event.key_path.rpartition(WindowsService._REGISTRY_KEY_PATH_SEPARATOR)\n service_type = service_event.regvalue.get('Type', '')\n image_path = service_event.regvalue.get('ImagePath', '')\n start_type = service_event.regvalue.get('Start', '')\n service_dll = service_event.regvalue.get('ServiceDll', '')\n object_name = service_event.regvalue.get('ObjectName', '')\n if service_event.pathspec:\n source = (service_event.pathspec.location, service_event.key_path)\n else:\n source = ('Unknown', 'Unknown')\n return cls(name=name, service_type=service_type, image_path=image_path, start_type=start_type, object_name=object_name, source=source, service_dll=service_dll)", "docstring": "Creates a service object from an event.\n\nArgs:\nservice_event (EventObject): event to create a new service object from.\n\nReturns:\nWindowsService: service.", "source": "codesearchnet"} {"code": "def ptb_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, 'PTBProducer', [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name='raw_data', dtype=tf.int32)\n data_len = tf.size(raw_data)\n batch_len = (data_len \n data = tf.reshape(raw_data[0:(batch_size * batch_len)], [batch_size, batch_len])\n epoch_size = ((batch_len - 1) \n assertion = tf.assert_positive(epoch_size, message='epoch_size == 0, decrease batch_size or num_steps')\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name='epoch_size')\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, (i * num_steps)], [batch_size, ((i + 1) * num_steps)])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, ((i * num_steps) + 1)], [batch_size, (((i + 1) * num_steps) + 1)])\n y.set_shape([batch_size, num_steps])\n return (x, y)", "docstring": "Iterate on the raw PTB data.\n\nThis chunks up raw_data into batches of examples and returns Tensors that\nare drawn from these batches.\n\nArgs:\nraw_data: one of the raw data outputs from ptb_raw_data.\nbatch_size: int, the batch size.\nnum_steps: int, the number of unrolls.\nname: the name of this operation (optional).\n\nReturns:\nA pair of Tensors, each shaped [batch_size, num_steps]. The second element\nof the tuple is the same data time-shifted to the right by one.\n\nRaises:\ntf.errors.InvalidArgumentError: if batch_size or num_steps are too high.", "source": "codesearchnet"} {"code": "def preconnect(self, size=-1):\n \n if size == -1 and self.max_size == -1:\n raise ClientError(\"size=-1 not allowed with pool max_size=-1\")\n limit = min(size, self.max_size) if size != -1 else self.max_size\n clients = yield [self.get_connected_client() for _ in range(0, limit)]\n for client in clients:\n self.release_client(client)", "docstring": "(pre)Connects some or all redis clients inside the pool.\n\nArgs:\nsize (int): number of redis clients to build and to connect\n(-1 means all clients if pool max_size > -1)\n\nRaises:\nClientError: when size == -1 and pool max_size == -1", "source": "juraj-google-style"} {"code": "def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):\n src_r = RasterUtilClass.read_raster(srcfile)\n src_data = src_r.data\n dst_data = numpy.copy(src_data)\n if ((gdaltype == GDT_Float32) and (src_r.dataType != GDT_Float32)):\n gdaltype = src_r.dataType\n no_data = src_r.noDataValue\n new_no_data = DEFAULT_NODATA\n if (gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]):\n new_no_data = 0\n if (not MathClass.floatequal(new_no_data, src_r.noDataValue)):\n if (src_r.noDataValue not in v_dict):\n v_dict[src_r.noDataValue] = new_no_data\n no_data = new_no_data\n for (k, v) in iteritems(v_dict):\n dst_data[(src_data == k)] = v\n RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data, src_r.geotrans, src_r.srs, no_data, gdaltype)", "docstring": "Reclassify raster by given classifier dict.\n\nArgs:\nsrcfile: source raster file.\nv_dict: classifier dict.\ndstfile: destination file path.\ngdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.", "source": "codesearchnet"} {"code": "def get_embedded_object(self, signature_id):\n request = self._get_request()\n return request.get((self.EMBEDDED_OBJECT_GET_URL + signature_id))", "docstring": "Retrieves a embedded signing object\n\nRetrieves an embedded object containing a signature url that can be opened in an iFrame.\n\nArgs:\n\nsignature_id (str): The id of the signature to get a signature url for\n\nReturns:\nAn Embedded object", "source": "codesearchnet"} {"code": "def fastcc_consistent_subset(model, epsilon, solver):\n reaction_set = set(model.reactions)\n return reaction_set.difference(fastcc(model, epsilon, solver))", "docstring": "Return consistent subset of model.\n\nThe largest consistent subset is returned as\na set of reaction names.\n\nArgs:\nmodel: :class:`MetabolicModel` to solve.\nepsilon: Flux threshold value.\nsolver: LP solver instance to use.\n\nReturns:\nSet of reaction IDs in the consistent reaction subset.", "source": "codesearchnet"} {"code": "def clone(self, *args, **overrides):\n clone = super(NdLayout, self).clone(*args, **overrides)\n clone._max_cols = self._max_cols\n clone.id = self.id\n return clone", "docstring": "Clones the NdLayout, overriding data and parameters.\n\nArgs:\ndata: New data replacing the existing data\nshared_data (bool, optional): Whether to use existing data\nnew_type (optional): Type to cast object to\n*args: Additional arguments to pass to constructor\n**overrides: New keyword arguments to pass to constructor\n\nReturns:\nCloned NdLayout object", "source": "codesearchnet"} {"code": "def notify(self, subsystem, recipient, subject, body_html, body_text):\n if (not re.match(self.validation, recipient, re.I)):\n raise ValueError('Invalid recipient provided')\n if recipient.startswith('\n target_type = 'channel'\n elif (recipient.find('@') != (- 1)):\n target_type = 'user'\n else:\n self.log.error('Unknown contact type for Slack: {}'.format(recipient))\n return\n try:\n self._send_message(target_type=target_type, target=recipient, message=body_text, title=subject)\n except SlackError as ex:\n self.log.error('Failed sending message to {}: {}'.format(recipient, ex))", "docstring": "You can send messages either to channels and private groups by using the following formats\n\n#channel-name\n@username-direct-message\n\nArgs:\nsubsystem (`str`): Name of the subsystem originating the notification\nrecipient (`str`): Recipient\nsubject (`str`): Subject / title of the notification, not used for this notifier\nbody_html (`str)`: HTML formatted version of the message, not used for this notifier\nbody_text (`str`): Text formatted version of the message\n\nReturns:\n`None`", "source": "codesearchnet"} {"code": "def cancel(**kwargs):\n task_list = _query(**kwargs)\n for task in task_list:\n task.status = WorkQueue.CANCELED\n task.finished = datetime.datetime.utcnow()\n db.session.add(task)\n return len(task_list)", "docstring": "Cancels work items based on their criteria.\n\nArgs:\n**kwargs: Same parameters as the query() method.\n\nReturns:\nThe number of tasks that were canceled.", "source": "codesearchnet"} {"code": "def __eq__(self, other: Any) -> bool:\n if isinstance(other, str):\n return self.path == other\n return isinstance(other, KeyPath) and self.keys == other.keys", "docstring": "Equality check.\n\nArgs:\nother: A string or a KeyPath.\n\nReturns:\nWhether JSON-path representation (either absolute or relative form)\nof current path equals to other.", "source": "github-repos"} {"code": "def device_name(self):\n return self._device_name", "docstring": "Name of the device that the tensor belongs to.\n\nReturns:\n(`str`) device name.", "source": "github-repos"} {"code": "def remove_unused_links(self, used):\n unused = []\n self._execute('SELECT * FROM {}'.format(self.LINK_STATE_TABLE))\n for row in self.cursor:\n (relpath, inode, mtime) = row\n inode = self._from_sqlite(inode)\n path = os.path.join(self.root_dir, relpath)\n if (path in used):\n continue\n if (not os.path.exists(path)):\n continue\n actual_inode = get_inode(path)\n (actual_mtime, _) = get_mtime_and_size(path)\n if ((inode == actual_inode) and (mtime == actual_mtime)):\n logger.debug(\"Removing '{}' as unused link.\".format(path))\n remove(path)\n unused.append(relpath)\n for relpath in unused:\n cmd = 'DELETE FROM {} WHERE path = \"{}\"'\n self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))", "docstring": "Removes all saved links except the ones that are used.\n\nArgs:\nused (list): list of used links that should not be removed.", "source": "codesearchnet"} {"code": "def allow_partial(allow: Optional[bool]=True) -> ContextManager[None]:\n return thread_local.thread_local_value_scope(_TLS_ALLOW_PARTIAL, allow, None)", "docstring": "Returns a context manager that allows partial values in scope.\n\nThis function is thread-safe and can be nested. In the nested use case, the\nallow flag of immediate parent context is effective.\n\nExample::\n\n@pg.members([\n('x', pg.typing.Int()),\n('y', pg.typing.Int())\n])\nclass A(pg.Object):\npass\n\nwith pg.allow_partial(True):\na = A(x=1) # Missing `y`, but OK\nwith pg.allow_partial(False):\na.rebind(x=pg.MISSING_VALUE) # NOT OK\na.rebind(x=pg.MISSING_VALUE) # OK\n\nArgs:\nallow: If True, allow partial symbolic values in scope.\nIf False, do not allow partial symbolic values in scope even if\nindividual objects allow so. If None, honor object-level\n`allow_partial` property.\n\nReturns:\nA context manager that allows/disallow partial symbolic values in scope.\nAfter leaving the scope, the `allow_partial` state of individual objects\nwill remain intact.", "source": "github-repos"} {"code": "def update_estimator_from_task(estimator, task_id, task_type):\n if (task_type is None):\n return\n if (task_type.lower() == 'training'):\n training_job = (\"{{ ti.xcom_pull(task_ids='%s')['Training']['TrainingJobName'] }}\" % task_id)\n job_name = training_job\n elif (task_type.lower() == 'tuning'):\n training_job = (\"{{ ti.xcom_pull(task_ids='%s')['Tuning']['BestTrainingJob']['TrainingJobName'] }}\" % task_id)\n job_name = (\"{{ ti.xcom_pull(task_ids='%s')['Tuning']['TrainingJobDefinition']['StaticHyperParameters']['sagemaker_job_name'].strip('%s') }}\" % (task_id, '\"'))\n else:\n raise ValueError(\"task_type must be either 'training', 'tuning' or None.\")\n estimator._current_job_name = training_job\n if isinstance(estimator, sagemaker.estimator.Framework):\n update_submit_s3_uri(estimator, job_name)", "docstring": "Update training job of the estimator from a task in the DAG\n\nArgs:\nestimator (sagemaker.estimator.EstimatorBase): The estimator to update\ntask_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or\nairflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG.\ntask_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be\n'training', 'tuning' or None (which means training job is not from any task).", "source": "codesearchnet"} {"code": "def scale_translation(self, trans_scale_factor: float) -> Rigid:\n return self.apply_trans_fn(lambda t: t * trans_scale_factor)", "docstring": "Scales the translation by a constant factor.\n\nArgs:\ntrans_scale_factor:\nThe constant factor\nReturns:\nA transformation object with a scaled translation.", "source": "github-repos"} {"code": "def mac_app_exists(app):\n\t\n\n\tAPP_CHECK_APPLESCRIPT = \n\n\twith open('/tmp/app_check.AppleScript', 'w') as f:\n\t\tf.write(APP_CHECK_APPLESCRIPT % app)\n\n\tapp_check_proc = sp.Popen(\n\t\t['osascript', '-e', '/tmp/app_check.AppleScript'])\n\n\tif app_check_proc.wait() != 0:\n\t\treturn False\n\n\telse:\n\t\treturn True", "docstring": "Check if 'app' is installed (OS X).\n\nCheck if the given applications is installed on this OS X system.\n\nArgs:\napp (str): The application name.\n\nReturns:\nbool: Is the app installed or not?", "source": "juraj-google-style"} {"code": "def key_for_namespace(cls, namespace):\n if namespace:\n return model.Key(cls.KIND_NAME, namespace)\n else:\n return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)", "docstring": "Return the Key for a namespace.\n\nArgs:\nnamespace: A string giving the namespace whose key is requested.\n\nReturns:\nThe Key for the namespace.", "source": "codesearchnet"} {"code": "def RemoveKeywordsForName(self, name, keywords):\n data_store.DB.IndexRemoveKeywordsForName(self.urn, name, keywords)", "docstring": "Removes keywords for a name.\n\nArgs:\nname: A name which should not be associated with some keywords anymore.\nkeywords: A collection of keywords.", "source": "codesearchnet"} {"code": "def _verify_static_batch_size_equality(tensors, columns):\n expected_batch_size = None\n for i in range(0, len(tensors)):\n batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(tensors[i].shape[0]))\n if batch_size.value is not None:\n if expected_batch_size is None:\n bath_size_column_index = i\n expected_batch_size = batch_size\n elif not expected_batch_size.is_compatible_with(batch_size):\n raise ValueError('Batch size (first dimension) of each feature must be same. Batch size of columns ({}, {}): ({}, {})'.format(columns[bath_size_column_index].name, columns[i].name, expected_batch_size, batch_size))", "docstring": "Verify equality between static batch sizes.\n\nArgs:\ntensors: iterable of input tensors.\ncolumns: Corresponding feature columns.\n\nRaises:\nValueError: in case of mismatched batch sizes.", "source": "github-repos"} {"code": "def usergroups_create(self, *, name: str, **kwargs) -> SlackResponse:\n \n self._validate_xoxp_token()\n kwargs.update({\"name\": name})\n return self.api_call(\"usergroups.create\", json=kwargs)", "docstring": "Create a User Group\n\nArgs:\nname (str): A name for the User Group. Must be unique among User Groups.\ne.g. 'My Test Team'", "source": "juraj-google-style"} {"code": "def update_port_monitor(self, resource, timeout=(- 1)):\n data = resource.copy()\n if ('type' not in data):\n data['type'] = 'port-monitor'\n uri = '{}{}'.format(self.data['uri'], self.PORT_MONITOR_PATH)\n return self._helper.update(data, uri=uri, timeout=timeout)", "docstring": "Updates the port monitor configuration of a logical interconnect.\n\nArgs:\nresource: Port monitor configuration.\n\nReturns:\ndict: Port monitor configuration.", "source": "codesearchnet"} {"code": "def replaceext(filepath, new_ext):\n \n if new_ext and new_ext[0] != '.':\n new_ext = '.' + new_ext\n\n root, ext = os.path.splitext(safepath(filepath))\n return root + new_ext", "docstring": "Replace any existing file extension with a new one\n\nExample::\n\n>>> replaceext('/foo/bar.txt', 'py')\n'/foo/bar.py'\n>>> replaceext('/foo/bar.txt', '.doc')\n'/foo/bar.doc'\n\nArgs:\nfilepath (str, path): file path\nnew_ext (str): new file extension; if a leading dot is not included,\nit will be added.\n\nReturns:\nTuple[str]", "source": "juraj-google-style"} {"code": "def update_nsval(self, *, nsval: str=None, ns: str=None, val: str=None) -> None:\n if ((not (ns and val)) and nsval):\n (ns, val) = nsval.split(':', 1)\n elif ((not (ns and val)) and (not nsval)):\n log.error('Did not update NSArg - no ns:val or nsval provided')\n self.namespace = ns\n self.value = val", "docstring": "Update Namespace and valueast.\n\nArgs:\nnsval: e.g. HGNC:AKT1\nns: namespace\nval: value of entity", "source": "codesearchnet"} {"code": "def calc_copulas(self,\n output_file,\n model_names=(\"start-time\", \"translation-x\", \"translation-y\"),\n label_columns=(\"Start_Time_Error\", \"Translation_Error_X\", \"Translation_Error_Y\")):\n \n if len(self.data['train']) == 0:\n self.load_data()\n groups = self.data[\"train\"][\"member\"][self.group_col].unique()\n copulas = {}\n label_columns = list(label_columns)\n for group in groups:\n print(group)\n group_data = self.data[\"train\"][\"total_group\"].loc[\n self.data[\"train\"][\"total_group\"][self.group_col] == group]\n group_data = group_data.dropna()\n group_data.reset_index(drop=True, inplace=True)\n copulas[group] = {}\n copulas[group][\"mean\"] = group_data[label_columns].mean(axis=0).values\n copulas[group][\"cov\"] = np.cov(group_data[label_columns].values.T)\n copulas[group][\"model_names\"] = list(model_names)\n del group_data\n pickle.dump(copulas, open(output_file, \"w\"), pickle.HIGHEST_PROTOCOL)", "docstring": "Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.\nDistributions are written to a pickle file for later use.\nArgs:\noutput_file: Pickle file\nmodel_names: Names of the tracking models\nlabel_columns: Names of the data columns used for labeling\nReturns:", "source": "juraj-google-style"} {"code": "def visibility(self, value=9999.0):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float '\n 'for field `visibility`'.format(value))\n\n self._visibility = value", "docstring": "Corresponds to IDD Field `visibility` This is the value for\nvisibility in km. (Horizontal visibility at the time indicated.)\n\nArgs:\nvalue (float): value for IDD Field `visibility`\nUnit: km\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def _resize_for_patching(self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:\n new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)\n resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n return resized_image", "docstring": "Resizes an image to a target resolution while maintaining aspect ratio.\n\nArgs:\nimage (np.array):\nThe input image.\ntarget_resolution (tuple):\nThe target resolution (height, width) of the image.\nresample (`PILImageResampling`):\nResampling filter to use if resizing the image.\ninput_data_format (`ChannelDimension` or `str`):\nThe channel dimension format of the input image.\n\nReturns:\nnp.array: The resized and padded image.", "source": "github-repos"} {"code": "def get_template_edit_url(self, template_id):\n request = self._get_request()\n return request.get((self.EMBEDDED_TEMPLATE_EDIT_URL + template_id))", "docstring": "Retrieves a embedded template for editing\n\nRetrieves an embedded object containing a template url that can be opened in an iFrame.\n\nArgs:\n\ntemplate_id (str): The id of the template to get a signature url for\n\nReturns:\nAn Embedded object", "source": "codesearchnet"} {"code": "def find_next(self, *strings, **kwargs):\n \n start = kwargs.pop(\"start\", None)\n keys_only = kwargs.pop(\"keys_only\", False)\n staht = start if start is not None else self.cursor\n for start, stop in [(staht, len(self)), (0, staht)]:\n for i in range(start, stop):\n for string in strings:\n if string in self[i]:\n tup = (i, self[i])\n self.cursor = i + 1\n if keys_only: return i\n return tup", "docstring": "From the editor's current cursor position find the next instance of the\ngiven string.\n\nArgs:\nstrings (iterable): String or strings to search for\n\nReturns:\ntup (tuple): Tuple of cursor position and line or None if not found\n\nNote:\nThis function cycles the entire editor (i.e. cursor to length of\neditor to zero and back to cursor position).", "source": "juraj-google-style"} {"code": "def __init__(self, value: T, proxy: Optional[T]=None):\n if proxy is None:\n proxy = value\n super().__init__('constant', proxy)\n self._value = value", "docstring": "Initialize a constant expression.\n\nArgs:\nvalue: The constant value to be produced by this expression.\nproxy: (Optional) a proxy object with same type as `value` to use for\nrapid type checking at pipeline construction time. If not provided,\n`value` will be used directly.", "source": "github-repos"} {"code": "def decode_image_tokens(self, image_tokens: torch.Tensor):\n decoded_image = self.model.vqmodel.decode(image_tokens)\n decoded_image = decoded_image.permute(0, 2, 3, 1)\n return decoded_image", "docstring": "Decodes generated image tokens from language model to continuous pixel values\nwith VQGAN module via upsampling.\nArgs:\nimage_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):\nThe tensors corresponding to the input images.", "source": "github-repos"} {"code": "def key_vals_dict_to_tuple_list(key_vals_dict, fill=float('nan')):\n tuple_list = []\n if (not key_vals_dict):\n return tuple_list\n vlen = max([len(vs) for vs in itertools.chain(*key_vals_dict.values())])\n for (k, vs) in key_vals_dict.items():\n try:\n tuple_list.extend([((k + tuple(v)) + ((fill,) * (vlen - len(v)))) for v in vs])\n except TypeError:\n tuple_list.extend([(((k,) + tuple(v)) + ((fill,) * (vlen - len(v)))) for v in vs])\n return tuple_list", "docstring": "Convert ``key_vals_dict`` to `tuple_list``.\n\nArgs:\nkey_vals_dict (dict): The first parameter.\nfill: a value to fill missing data\n\nReturns:\nA list of tuples", "source": "codesearchnet"} {"code": "def save_publication(pub):\n _assert_obj_type(pub)\n _get_handler().store_object(pub)\n return pub.to_comm(light_request=True)", "docstring": "Save `pub` into database and into proper indexes.\n\nAttr:\npub (obj): Instance of the :class:`.DBPublication`.\n\nReturns:\nobj: :class:`.DBPublication` without data.\n\nRaises:\nInvalidType: When the `pub` is not instance of :class:`.DBPublication`.\nUnindexablePublication: When there is no index (property) which can be\nused to index `pub` in database.", "source": "codesearchnet"} {"code": "def __init__(self, watch_paths, on_changed=None, interval=1.0, recursive=True):\n \n if isinstance(watch_paths, basestring):\n watch_paths = [watch_paths]\n\n watch_paths = [os.path.abspath(path) for path in watch_paths]\n for path in watch_paths:\n if not os.path.exists(path) or not os.path.isdir(path):\n raise MissingFolderError(path)\n\n self.watch_paths = watch_paths\n self.interval = interval * 1000.0\n self.recursive = recursive\n self.periodic_callback = PeriodicCallback(self.check_fs_events, self.interval)\n self.on_changed = on_changed\n self.observer = Observer()\n for path in self.watch_paths:\n self.observer.schedule(\n WatcherEventHandler(self),\n path,\n self.recursive\n )\n self.started = False\n self.fs_event_queue = Queue()", "docstring": "Constructor.\n\nArgs:\nwatch_paths: A list of filesystem paths to watch for changes.\non_changed: Callback to call when one or more changes to the watch path are detected.\ninterval: The minimum interval at which to notify about changes (in seconds).\nrecursive: Should the watch path be monitored recursively for changes?", "source": "juraj-google-style"} {"code": "def hardware_status(self):\n stat = structs.JLinkHardwareStatus()\n res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))\n if (res == 1):\n raise errors.JLinkException('Error in reading hardware status.')\n return stat", "docstring": "Retrieves and returns the hardware status.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nA ``JLinkHardwareStatus`` describing the J-Link hardware.", "source": "codesearchnet"} {"code": "def set_acl(self, role, users):\n acl_updates = [{'user': user, 'role': role} for user in users]\n r = fapi.update_repository_method_acl(self.namespace, self.name, self.snapshot_id, acl_updates, self.api_url)\n fapi._check_response_code(r, 200)", "docstring": "Set permissions for this method.\n\nArgs:\nrole (str): Access level\none of {one of \"OWNER\", \"READER\", \"WRITER\", \"NO ACCESS\"}\nusers (list(str)): List of users to give role to", "source": "codesearchnet"} {"code": "def _entry_allocated_bitmap(self, entry_number):\n \n index, offset = divmod(entry_number, 8)\n return bool(self._bitmap[index] & (1 << offset))", "docstring": "Checks if a particular index is allocated.\n\nArgs:\nentry_number (int): Index to verify\n\nReturns:\nbool: True if it is allocated, False otherwise.", "source": "juraj-google-style"} {"code": "def expand_dims(a, axis):\n \n if hasattr(a, 'expand_dims') and hasattr(type(a), '__array_interface__'):\n return a.expand_dims(axis)\n else:\n return np.expand_dims(a, axis)", "docstring": "Insert a new axis, corresponding to a given position in the array shape\n\nArgs:\na (array_like): Input array.\naxis (int): Position (amongst axes) where new axis is to be inserted.", "source": "juraj-google-style"} {"code": "def fill(self, background_shape, img):\n background_shape = tuple(background_shape)\n return self._fill(background_shape, img)", "docstring": "Return a proper background image of background_shape, given img.\n\nArgs:\nbackground_shape (tuple): a shape (h, w)\nimg: an image\nReturns:\na background image", "source": "codesearchnet"} {"code": "def get_display_name(self, room=None):\n if room:\n try:\n return room.members_displaynames[self.user_id]\n except KeyError:\n return self.user_id\n if (not self.displayname):\n self.displayname = self.api.get_display_name(self.user_id)\n return (self.displayname or self.user_id)", "docstring": "Get this user's display name.\n\nArgs:\nroom (Room): Optional. When specified, return the display name of the user\nin this room.\n\nReturns:\nThe display name. Defaults to the user ID if not set.", "source": "codesearchnet"} {"code": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\nimage (`np.ndarray`):\nImage to resize.\nsize (`Dict[str, int]`):\nSize of the output image.\nresample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\nResampling filter to use when resiizing the image.\ndata_format (`str` or `ChannelDimension`, *optional*):\nThe channel dimension format of the image. If not provided, it will be the same as the input image.\ninput_data_format (`ChannelDimension` or `str`, *optional*):\nThe channel dimension format of the input image. If not provided, it will be inferred.", "source": "github-repos"} {"code": "def join(self):\n self._cluster.join()", "docstring": "Blocks until all the scheduled functions have finished execution.\n\nIf any previously scheduled function raises an error, `join` will fail by\nraising any one of those errors, and clear the errors collected so far. If\nthis happens, some of the previously scheduled functions may have not been\nexecuted. Users can call `fetch` on the returned\n`tf.distribute.experimental.coordinator.RemoteValue` to inspect if they have\nexecuted, failed, or cancelled. If some that have been cancelled need to be\nrescheduled, users should call `schedule` with the function again.\n\nWhen `join` returns or raises, it guarantees that there is no function that\nis still being executed.\n\nRaises:\nException: one of the exceptions caught by the coordinator by any\npreviously scheduled function since the last time an error was thrown or\nsince the beginning of the program.", "source": "github-repos"} {"code": "def set_tuple_shapes(self, tuple_shapes):\n if len(tuple_shapes) != self.number_of_tuple_elements:\n raise ValueError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of length {self.number_of_tuple_elements}')\n try:\n tuple_shapes = [tensor_shape.as_shape(shape) for shape in tuple_shapes]\n except (ValueError, TypeError) as e:\n raise TypeError(f'tuple_shapes is {str(tuple_shapes)}, but must be a list of elements each convertible to TensorShape: got error {str(e)}') from e\n if self._frozen:\n for frozen, updated in zip(self._tuple_shapes, tuple_shapes):\n if frozen != updated:\n raise ValueError(f'Trying to update InfeedQueue with frozen configuration with an incompatible shape. Frozen shapes are {str(self._tuple_shapes)}, updated shapes are {str(tuple_shapes)}')\n else:\n self._tuple_shapes = tuple_shapes\n self._validate()", "docstring": "Sets the shape of each element of the queue.\n\ntuple_shapes must be a list of length\nself.number_of_tuple_elements, and each element must be\nconvertible to a TensorShape.\n\nArgs:\ntuple_shapes: the shapes of each queue element.\n\nRaises:\nValueError: if tuple_shapes is not of length\nself.number_of_tuple_elements.\nTypeError: if an element of tuple_shapes cannot be converted to\na TensorShape.", "source": "github-repos"} {"code": "def selection_error_control(self, form_info):\n \n keys, names = self.return_selected_form_items(form_info['ChannelList'])\n chosen_channels_number = len(keys)\n\n if form_info['new_channel'] and chosen_channels_number < 2:\n return False, _(\n u\"You should choose at least two channel to merge operation at a new channel.\")\n elif form_info['existing_channel'] and chosen_channels_number == 0:\n return False, _(\n u\"You should choose at least one channel to merge operation with existing channel.\")\n elif form_info['find_chosen_channel'] and chosen_channels_number != 1:\n return False, _(u\"You should choose one channel for split operation.\")\n\n return True, None", "docstring": "It controls the selection from the form according\nto the operations, and returns an error message\nif it does not comply with the rules.\n\nArgs:\nform_info: Channel or subscriber form from the user\n\nReturns: True or False\nerror message", "source": "juraj-google-style"} {"code": "def unparse_range(obj):\n if isinstance(obj, (int, long)):\n return str(obj)\n if isinstance(obj, tuple):\n arg = (str(obj[0]) + '-')\n if (len(obj) > 1):\n arg += str(obj[1])\n return arg\n raise ValueError('Must be an integer or tuple')", "docstring": "Unparse a range argument.\n\nArgs:\nobj: An article range. There are a number of valid formats; an integer\nspecifying a single article or a tuple specifying an article range.\nIf the range doesn't give a start article then all articles up to\nthe specified last article are included. If the range doesn't\nspecify a last article then all articles from the first specified\narticle up to the current last article for the group are included.\n\nReturns:\nThe range as a string that can be used by an NNTP command.\n\nNote: Sample valid formats.\n4678\n(,5234)\n(4245,)\n(4245, 5234)", "source": "codesearchnet"} {"code": "def _scalar_operations(self, axis, scalar, func):\n if isinstance(scalar, (list, np.ndarray, pandas.Series)):\n new_index = (self.index if (axis == 0) else self.columns)\n\n def list_like_op(df):\n if (axis == 0):\n df.index = new_index\n else:\n df.columns = new_index\n return func(df)\n new_data = self._map_across_full_axis(axis, self._prepare_method(list_like_op))\n return self.__constructor__(new_data, self.index, self.columns)\n else:\n return self._map_partitions(self._prepare_method(func))", "docstring": "Handler for mapping scalar operations across a Manager.\n\nArgs:\naxis: The axis index object to execute the function on.\nscalar: The scalar value to map.\nfunc: The function to use on the Manager with the scalar.\n\nReturns:\nA new QueryCompiler with updated data and new index.", "source": "codesearchnet"} {"code": "def get_examples_per_second_hook(every_n_steps=100, batch_size=128, warm_steps=5, **kwargs):\n return hooks.ExamplesPerSecondHook(every_n_steps=every_n_steps, batch_size=batch_size, warm_steps=warm_steps)", "docstring": "Function to get ExamplesPerSecondHook.\n\nArgs:\nevery_n_steps: `int`, print current and average examples per second every\nN steps.\nbatch_size: `int`, total batch size used to calculate examples/second from\nglobal time.\nwarm_steps: skip this number of steps before logging and running average.\n**kwargs: a dictionary of arguments to ExamplesPerSecondHook.\n\nReturns:\nReturns a ProfilerHook that writes out timelines that can be loaded into\nprofiling tools like chrome://tracing.", "source": "codesearchnet"} {"code": "def add_test_class(self, clazz, config=None, tests=None, name_suffix=None):\n if self._test_selector:\n cls_name = clazz.__name__\n if (cls_name, name_suffix) in self._test_selector:\n tests = self._test_selector[cls_name, name_suffix]\n elif cls_name in self._test_selector:\n tests = self._test_selector[cls_name]\n else:\n logging.info('Skipping test class %s due to CLI argument `tests`.', cls_name)\n return\n if not config:\n config = self._config\n self._runner.add_test_class(config, clazz, tests, name_suffix)", "docstring": "Adds a test class to the suite.\n\nArgs:\nclazz: class, a Mobly test class.\nconfig: config_parser.TestRunConfig, the config to run the class with. If\nnot specified, the default config passed from google3 infra is used.\ntests: list of strings, names of the tests to run in this test class, in\nthe execution order. Or a string with prefix `re:` for full regex match\nof test cases; all matched test cases will be executed; an error is\nraised if no match is found.\nIf not specified, all tests in the class are executed.\nCLI argument `tests` takes precedence over this argument.\nname_suffix: string, suffix to append to the class name for reporting.\nThis is used for differentiating the same class executed with different\nparameters in a suite.", "source": "github-repos"} {"code": "def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(ArchiveResponsePayload, self).read(input_stream, kmip_version=kmip_version)\n local_stream = utils.BytearrayStream(input_stream.read(self.length))\n if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):\n self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n self._unique_identifier.read(local_stream, kmip_version=kmip_version)\n self.is_oversized(local_stream)", "docstring": "Read the data encoding the Archive response payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_stream (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"} {"code": "def italic(self, action):\n \n if action =='on':\n action = '4'\n elif action=='off':\n action = '5'\n else:\n raise RuntimeError('Invalid action for function italic. Options are on and off')\n self.send(chr(27)+action)", "docstring": "Enable/cancel italic printing\n\nArgs:\naction: Enable or disable italic printing. Options are 'on' and 'off'\nReturns:\nNone\nRaises:\nRuntimeError: Invalid action.", "source": "juraj-google-style"} {"code": "def __init__(\n self,\n expr,\n grouping_column_names,\n column_names,\n grouping_column_types,\n column_types):\n \n self.expr = expr\n self.grouping_column_name = grouping_column_names\n self.column_names = column_names\n self.grouping_column_types = grouping_column_types\n self.column_types = column_types\n\n if isinstance(self.column_types, list):\n if len(self.column_types) == 1:\n column_types = self.column_types[0]\n else:\n column_types = WeldStruct(self.column_types)\n\n if len(self.grouping_column_types) == 1:\n grouping_column_types = self.grouping_column_types[0]\n else:\n grouping_column_types = WeldStruct(self.grouping_column_types)\n self.weld_type = WeldStruct([grouping_column_types, column_types])", "docstring": "Summary\n\nArgs:\nexpr (TYPE): Description\ngrouping_column_name (TYPE): Description\ncolumn_names (TYPE): Description\ngrouping_column_type (TYPE): Description\ncolumn_types (TYPE): Description", "source": "juraj-google-style"} {"code": "def __init__(self, tcex, main_type, api_type, sub_type, api_entity, owner):\n \n self._tcex = tcex\n self._data = {}\n\n self._owner = owner\n self._type = main_type\n self._api_sub_type = sub_type\n self._api_type = api_type\n self._unique_id = None\n self._api_entity = api_entity\n\n self._utils = TcExUtils()\n self._tc_requests = TiTcRequest(self._tcex)", "docstring": "Initialize Class Properties.\n\nArgs:\ntcex:\nmain_type:\napi_type:\nsub_type:\napi_entity:", "source": "juraj-google-style"} {"code": "def get(self, key):\n match = self._get_match(key=key)\n if (not match):\n return None\n return self._get_value_from_match(key=key, match=match)", "docstring": "Gets the value of the property of the given key.\n\nArgs:\nkey (str): Key of the property to look-up.", "source": "codesearchnet"} {"code": "def print_projects(projects=None):\n grouped_by = {}\n if (not projects):\n print(\"Your selection didn't include any projects for this experiment.\")\n return\n for name in projects:\n prj = projects[name]\n if (prj.GROUP not in grouped_by):\n grouped_by[prj.GROUP] = []\n grouped_by[prj.GROUP].append('{name}/{group}'.format(name=prj.NAME, group=prj.GROUP))\n for name in grouped_by:\n print('group: {0}'.format(name))\n group_projects = sorted(grouped_by[name])\n for prj in group_projects:\n prj_cls = projects[prj]\n version_str = None\n if hasattr(prj_cls, 'versions'):\n version_str = ', '.join(prj_cls.versions())\n project_id = '{0}/{1}'.format(prj_cls.NAME, prj_cls.GROUP)\n project_str = ' name: {id:<32} version: {version:<24} source: {src}'.format(id=str(project_id), version=str(prj_cls.VERSION), src=str(prj_cls.SRC_FILE))\n print(project_str)\n if prj_cls.__doc__:\n docstr = prj_cls.__doc__.strip('\\n ')\n print(' description: {desc}'.format(desc=docstr))\n if version_str:\n print(' versions: {versions}'.format(versions=version_str))\n print()", "docstring": "Print a list of projects registered for that experiment.\n\nArgs:\nexp: The experiment to print all projects for.", "source": "codesearchnet"} {"code": "def _CalculateElementsDataSize(self, context):\n \n elements_data_size = None\n\n if self._HasElementsDataSize():\n elements_data_size = self._EvaluateElementsDataSize(context)\n\n elif self._HasNumberOfElements():\n element_byte_size = self._element_data_type_definition.GetByteSize()\n if element_byte_size is not None:\n number_of_elements = self._EvaluateNumberOfElements(context)\n elements_data_size = number_of_elements * element_byte_size\n\n return elements_data_size", "docstring": "Calculates the elements data size.\n\nArgs:\ncontext (Optional[DataTypeMapContext]): data type map context, used to\ndetermine the size hint.\n\nReturns:\nint: the elements data size or None if not available.", "source": "juraj-google-style"} {"code": "def normalize_input(data: str) -> typing.Tuple[str, typing.Set[int]]:\n chunks = data.replace('\\n', utils.SEP).strip().split(utils.SEP)\n chunk_lengths = [len(chunk) for chunk in chunks]\n sep_indices = set(itertools.accumulate(chunk_lengths, lambda x, y: x + y))\n sentence = ''.join(chunks)\n return (sentence, sep_indices)", "docstring": "Normalizes the input to one line with separators.\n\nArgs:\ndata(str): Source input\n\nReturns:\ntyping.Tuple[str, typing.Set[int]]: A tuple of the sentence and the\nseparator indices.", "source": "github-repos"} {"code": "def delete(self, addon_id, data={}, **kwargs):\n return super(Addon, self).delete(addon_id, data, **kwargs)", "docstring": "Delete addon for given id\n\nArgs:\naddon_id : Id for which addon object has to be deleted", "source": "codesearchnet"} {"code": "def kill_all_processes(self, check_alive=True, allow_graceful=False):\n \n \n \n \n \n \n if ray_constants.PROCESS_TYPE_RAYLET in self.all_processes:\n self._kill_process_type(\n ray_constants.PROCESS_TYPE_RAYLET,\n check_alive=check_alive,\n allow_graceful=allow_graceful)\n\n \n \n for process_type in list(self.all_processes.keys()):\n self._kill_process_type(\n process_type,\n check_alive=check_alive,\n allow_graceful=allow_graceful)", "docstring": "Kill all of the processes.\n\nNote that This is slower than necessary because it calls kill, wait,\nkill, wait, ... instead of kill, kill, ..., wait, wait, ...\n\nArgs:\ncheck_alive (bool): Raise an exception if any of the processes were\nalready dead.", "source": "juraj-google-style"} {"code": "def sample(self, nmr_samples, burnin=0, thinning=1):\n if ((not thinning) or (thinning < 1)):\n thinning = 1\n if ((not burnin) or (burnin < 0)):\n burnin = 0\n max_samples_per_batch = max((1000 \n with self._logging(nmr_samples, burnin, thinning):\n if (burnin > 0):\n for (batch_start, batch_end) in split_in_batches(burnin, max_samples_per_batch):\n self._sample((batch_end - batch_start), return_output=False)\n if (nmr_samples > 0):\n outputs = []\n for (batch_start, batch_end) in split_in_batches(nmr_samples, max_samples_per_batch):\n outputs.append(self._sample((batch_end - batch_start), thinning=thinning))\n return SimpleSampleOutput(*[np.concatenate([o[ind] for o in outputs], axis=(- 1)) for ind in range(3)])", "docstring": "Take additional samples from the given likelihood and prior, using this sampler.\n\nThis method can be called multiple times in which the sample state is stored in between.\n\nArgs:\nnmr_samples (int): the number of samples to return\nburnin (int): the number of samples to discard before returning samples\nthinning (int): how many sample we wait before storing a new one. This will draw extra samples such that\nthe total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples\nstored is ``nmr_samples``. If set to one or lower we store every sample after the burn in.\n\nReturns:\nSamplingOutput: the sample output object", "source": "codesearchnet"} {"code": "def _select_helper(args, kwargs):\n if (len(args) > 1):\n raise TypeError('select accepts at most ONE positional argument.')\n if ((len(args) > 0) and (len(kwargs) > 0)):\n raise TypeError('select accepts EITHER a positional argument, OR keyword arguments (not both).')\n if ((len(args) == 0) and (len(kwargs) == 0)):\n raise TypeError('select requires EITHER a positional argument, OR keyword arguments.')\n if args:\n arg = args[0]\n if isinstance(arg, dict):\n selector = arg\n elif isinstance(arg, string_types):\n selector = dict(name=arg)\n elif (isinstance(arg, type) and issubclass(arg, Model)):\n selector = {'type': arg}\n else:\n raise TypeError('selector must be a dictionary, string or plot object.')\n elif ('selector' in kwargs):\n if (len(kwargs) == 1):\n selector = kwargs['selector']\n else:\n raise TypeError(\"when passing 'selector' keyword arg, not other keyword args may be present\")\n else:\n selector = kwargs\n return selector", "docstring": "Allow flexible selector syntax.\n\nReturns:\ndict", "source": "codesearchnet"} {"code": "def find_sorted_task_dependencies(task, task_name, task_id):\n log.info('find_sorted_task_dependencies {} {}'.format(task_name, task_id))\n cot_input_dependencies = [_craft_dependency_tuple(task_name, task_type, task_id) for (task_type, task_id) in task['extra'].get('chainOfTrust', {}).get('inputs', {}).items()]\n upstream_artifacts_dependencies = [_craft_dependency_tuple(task_name, artifact_dict['taskType'], artifact_dict['taskId']) for artifact_dict in task.get('payload', {}).get('upstreamArtifacts', [])]\n dependencies = [*cot_input_dependencies, *upstream_artifacts_dependencies]\n dependencies = _sort_dependencies_by_name_then_task_id(dependencies)\n parent_task_id = (get_parent_task_id(task) or get_decision_task_id(task))\n parent_task_type = 'parent'\n parent_tuple = _craft_dependency_tuple(task_name, parent_task_type, parent_task_id)\n dependencies.insert(0, parent_tuple)\n log.info('found dependencies: {}'.format(dependencies))\n return dependencies", "docstring": "Find the taskIds of the chain of trust dependencies of a given task.\n\nArgs:\ntask (dict): the task definition to inspect.\ntask_name (str): the name of the task, for logging and naming children.\ntask_id (str): the taskId of the task.\n\nReturns:\nlist: tuples associating dependent task ``name`` to dependent task ``taskId``.", "source": "codesearchnet"} {"code": "def rabi_oscillations(sampler: sim.Sampler, qubit: devices.GridQubit, max_angle: float=(2 * np.pi), *, repetitions: int=1000, num_points: int=200) -> RabiResult:\n theta = sympy.Symbol('theta')\n circuit = circuits.Circuit.from_ops((ops.X(qubit) ** theta))\n circuit.append(ops.measure(qubit, key='z'))\n sweep = study.Linspace(key='theta', start=0.0, stop=(max_angle / np.pi), length=num_points)\n results = sampler.run_sweep(circuit, params=sweep, repetitions=repetitions)\n angles = np.linspace(0.0, max_angle, num_points)\n excited_state_probs = np.zeros(num_points)\n for i in range(num_points):\n excited_state_probs[i] = np.mean(results[i].measurements['z'])\n return RabiResult(angles, excited_state_probs)", "docstring": "Runs a Rabi oscillation experiment.\n\nRotates a qubit around the x-axis of the Bloch sphere by a sequence of Rabi\nangles evenly spaced between 0 and max_angle. For each rotation, repeat\nthe circuit a number of times and measure the average probability of the\nqubit being in the |1> state.\n\nArgs:\nsampler: The quantum engine or simulator to run the circuits.\nqubit: The qubit under test.\nmax_angle: The final Rabi angle in radians.\nrepetitions: The number of repetitions of the circuit for each Rabi\nangle.\nnum_points: The number of Rabi angles.\n\nReturns:\nA RabiResult object that stores and plots the result.", "source": "codesearchnet"} {"code": "def method_exists(cls, method):\n methods = cls.API_METHODS\n for key in method.split('.'):\n methods = methods.get(key)\n if (methods is None):\n break\n if isinstance(methods, str):\n logger.debug('%r: %r', method, methods)\n return True\n return False", "docstring": "Whether a given method exists in the known API.\n\nArguments:\nmethod (:py:class:`str`): The name of the method.\n\nReturns:\n:py:class:`bool`: Whether the method is in the known API.", "source": "codesearchnet"} {"code": "def _definition_from_example(example):\n \n assert isinstance(example, dict)\n\n def _has_simple_type(value):\n accepted = (str, int, float, bool)\n return isinstance(value, accepted)\n\n definition = {\n 'type': 'object',\n 'properties': {},\n }\n for key, value in example.items():\n if not _has_simple_type(value):\n raise Exception(\"Not implemented yet\")\n ret_value = None\n if isinstance(value, str):\n ret_value = {'type': 'string'}\n elif isinstance(value, int):\n ret_value = {'type': 'integer', 'format': 'int64'}\n elif isinstance(value, float):\n ret_value = {'type': 'number', 'format': 'double'}\n elif isinstance(value, bool):\n ret_value = {'type': 'boolean'}\n else:\n raise Exception(\"Not implemented yet\")\n definition['properties'][key] = ret_value\n\n return definition", "docstring": "Generates a swagger definition json from a given example\nWorks only for simple types in the dict\n\nArgs:\nexample: The example for which we want a definition\nType is DICT\n\nReturns:\nA dict that is the swagger definition json", "source": "juraj-google-style"} {"code": "def get_integrated_diff(self, ind, radius, nbins=1):\n if (not self.is_spin_polarized):\n radii = [((radius / nbins) * (i + 1)) for i in range(nbins)]\n data = np.zeros((nbins, 2))\n data[(:, 0)] = radii\n return data\n struct = self.structure\n a = self.dim\n if ((ind not in self._distance_matrix) or (self._distance_matrix[ind]['max_radius'] < radius)):\n coords = []\n for (x, y, z) in itertools.product(*[list(range(i)) for i in a]):\n coords.append([(x / a[0]), (y / a[1]), (z / a[2])])\n sites_dist = struct.lattice.get_points_in_sphere(coords, struct[ind].coords, radius)\n self._distance_matrix[ind] = {'max_radius': radius, 'data': np.array(sites_dist)}\n data = self._distance_matrix[ind]['data']\n inds = (data[(:, 1)] <= radius)\n dists = data[(inds, 1)]\n data_inds = np.rint((np.mod(list(data[(inds, 0)]), 1) * np.tile(a, (len(dists), 1)))).astype(int)\n vals = [self.data['diff'][(x, y, z)] for (x, y, z) in data_inds]\n (hist, edges) = np.histogram(dists, bins=nbins, range=[0, radius], weights=vals)\n data = np.zeros((nbins, 2))\n data[(:, 0)] = edges[1:]\n data[(:, 1)] = [(sum(hist[0:(i + 1)]) / self.ngridpts) for i in range(nbins)]\n return data", "docstring": "Get integrated difference of atom index ind up to radius. This can be\nan extremely computationally intensive process, depending on how many\ngrid points are in the VolumetricData.\n\nArgs:\nind (int): Index of atom.\nradius (float): Radius of integration.\nnbins (int): Number of bins. Defaults to 1. This allows one to\nobtain the charge integration up to a list of the cumulative\ncharge integration values for radii for [radius/nbins,\n2 * radius/nbins, ....].\n\nReturns:\nDifferential integrated charge as a np array of [[radius, value],\n...]. Format is for ease of plotting. E.g., plt.plot(data[:,0],\ndata[:,1])", "source": "codesearchnet"} {"code": "def get_cpu_isa_version():\n key = 'cpu_isa'\n out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])\n if err and FLAGS.debug:\n print('Error in detecting supported ISA:\\n %s' % str(err))\n ret_val = out\n required_isa = ['avx', 'avx2', 'avx512f', 'sse4', 'sse4_1']\n found = []\n missing = []\n for isa in required_isa:\n for sys_isa in ret_val.split(b' '):\n if isa == sys_isa:\n if isa not in found:\n found.append(isa)\n missing = list(set(required_isa) - set(found))\n return (found, missing)", "docstring": "Retrieves all Instruction Set Architecture(ISA) available.\n\nRequired ISA(s): 'avx', 'avx2', 'avx512f', 'sse4', 'sse4_1'\n\nReturns:\nTuple\n(list of available ISA, list of missing ISA)", "source": "github-repos"} {"code": "def __init__(self, action):\n _check_type(action, str)\n self.action = action", "docstring": "Constructor.\n\nArgs:\naction: (`OnSessionInitAction`) Debugger action to take on session init.", "source": "github-repos"} {"code": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:\n name = name or self._name + '_price'\n with tf.name_scope(name):\n discount_curve = get_discount_curve(self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(self._coupon_end_dates)\n _, cashflows = self.cashflows(market, past_fixing=self._past_fixing)\n cashflow_pvs = cashflows * discount_factors\n return tf.math.reduce_sum(cashflow_pvs, axis=1)", "docstring": "Returns the present value of the stream on the valuation date.\n\nArgs:\nmarket: An instance of `ProcessedMarketData`.\nname: Python str. The name to give to the ops created by this function.\nDefault value: `None` which maps to 'price'.\n\nReturns:\nA `Tensor` of shape `batch_shape` containing the modeled price of each\nstream based on the input market data.", "source": "github-repos"} {"code": "def volumes(self):\n if (not self.__volumes):\n self.__volumes = Volumes(self.__connection)\n return self.__volumes", "docstring": "Gets the Volumes API client.\n\nReturns:\nVolumes:", "source": "codesearchnet"} {"code": "def parse_config_for_selected_keys(content, keys):\n config_items = {key: None for key in keys}\n if (not content):\n return (config_items, content)\n stripped = content.strip()\n if (len(stripped) == 0):\n return ({}, None)\n elif (stripped[0] == '{'):\n config = json.loads(content)\n else:\n config = yaml.load(content)\n if (not isinstance(config, dict)):\n raise ValueError('Invalid config.')\n for key in keys:\n config_items[key] = config.pop(key, None)\n if (not config):\n return (config_items, None)\n if (stripped[0] == '{'):\n content_out = json.dumps(config, indent=4)\n else:\n content_out = yaml.dump(config, default_flow_style=False)\n return (config_items, content_out)", "docstring": "Parse a config from a magic cell body for selected config keys.\n\nFor example, if 'content' is:\nconfig_item1: value1\nconfig_item2: value2\nconfig_item3: value3\nand 'keys' are: [config_item1, config_item3]\n\nThe results will be a tuple of\n1. The parsed config items (dict): {config_item1: value1, config_item3: value3}\n2. The remaining content (string): config_item2: value2\n\nArgs:\ncontent: the input content. A string. It has to be a yaml or JSON string.\nkeys: a list of keys to retrieve from content. Note that it only checks top level keys\nin the dict.\n\nReturns:\nA tuple. First is the parsed config including only selected keys. Second is\nthe remaining content.\n\nRaises:\nException if the content is not a valid yaml or JSON string.", "source": "codesearchnet"} {"code": "def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):\n is_successor = self._is_predecessor_of_possible_successor(txn_id, possible_successor)\n in_different_batch = (not self._is_in_same_batch(txn_id, possible_successor))\n has_not_been_seen = (possible_successor not in already_seen)\n return (is_successor and in_different_batch and has_not_been_seen)", "docstring": "Decide if possible_successor should be replayed.\n\nArgs:\ntxn_id (str): Id of txn in failed batch.\npossible_successor (str): Id of txn to possibly replay.\nalready_seen (list): A list of possible_successors that have\nbeen replayed.\n\nReturns:\n(bool): If the possible_successor should be replayed.", "source": "codesearchnet"} {"code": "def __init__(self, resolver, mets_url, src_dir=None, skip=None, download=False, page_strictness='strict'):\n \n self.report = ValidationReport()\n self.skip = skip if skip else []\n log.debug('resolver=%s mets_url=%s src_dir=%s', resolver, mets_url, src_dir)\n self.resolver = resolver\n self.mets_url = mets_url\n self.download = download\n self.page_strictness = page_strictness\n\n self.src_dir = src_dir\n if mets_url is None and src_dir is not None:\n mets_url = '%s/mets.xml' % src_dir\n self.workspace = None\n self.mets = None", "docstring": "Construct a new WorkspaceValidator.\n\nArgs:\nresolver (Resolver):\nmets_url (string):\nsrc_dir (string):\nskip (list):\ndownload (boolean):\npage_strictness (\"strict\"|\"lax\"|\"fix\"|\"off\"):", "source": "juraj-google-style"} {"code": "def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n for (beta, uni) in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n diacritics = beta[1:]\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = (beta[0] + ''.join(perm))\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n return t", "docstring": "Create the trie for betacode conversion.\n\nArgs:\ntext: The beta code text to convert. All of this text must be betacode.\nstrict: Flag to allow for flexible diacritic order on input.\n\nReturns:\nThe trie for conversion.", "source": "codesearchnet"} {"code": "def __init__(self, item_type=None, min_length=None, max_length=None, empty=True):\n \n super(TupleTypeChecker, self).__init__(\n iter_type=tuple, item_type=item_type, min_length=min_length, max_length=max_length, empty=empty\n )", "docstring": "Initialization method.\n\nArgs:\nitem_type (type): the type of the items inside the tuple.\nmin_length (int): minimum length of the tuple (included).\nmax_length (int): maximum length of the tuple (included).\nempty (bool): whether empty tuple is allowed.", "source": "juraj-google-style"} {"code": "def _get_attributes(self):\n return map((lambda i, c: (i[1], c[1])), self._get_instance_attributes(), self.get_class_attributes())", "docstring": "Return a generator for instance and class attribute.\n\n.. code-block:: python3\n\nfor instance_attribute, class_attribute in self._get_attributes():\nprint(\"Instance Attribute: {}\".format(instance_attribute))\nprint(\"Class Attribute: {}\".format(class_attribute))\n\nReturns:\ngenerator: Tuples with instance attribute and class attribute", "source": "codesearchnet"} {"code": "def _get_var_info(var, prev_tensor_name=None):\n if checkpoint_utils._is_variable(var):\n current_var_name = _infer_var_name([var])\n elif isinstance(var, list) and all((checkpoint_utils._is_variable(v) for v in var)):\n current_var_name = _infer_var_name(var)\n elif isinstance(var, variables_lib.PartitionedVariable):\n current_var_name = _infer_var_name([var])\n var = var._get_variable_list()\n else:\n raise TypeError('var MUST be one of the following: a Variable, list of Variable or PartitionedVariable, but is {}'.format(type(var)))\n if not prev_tensor_name:\n prev_tensor_name = current_var_name\n return (prev_tensor_name, var)", "docstring": "Helper method for standarizing Variable and naming.\n\nArgs:\nvar: Current graph's variable that needs to be warm-started (initialized).\nCan be either of the following: (i) `Variable` (ii) `ResourceVariable`\n(iii) list of `Variable`: The list must contain slices of the same larger\nvariable. (iv) `PartitionedVariable`\nprev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\nNone, we lookup tensor with same name as given `var`.\n\nReturns:\nA tuple of the Tensor name and var.", "source": "github-repos"} {"code": "class ConvNextStage(nn.Module):\n\n def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):\n super().__init__()\n if in_channels != out_channels or stride > 1:\n self.downsampling_layer = nn.Sequential(ConvNextLayerNorm(in_channels, eps=1e-06, data_format='channels_first'), nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride))\n else:\n self.downsampling_layer = nn.Identity()\n drop_path_rates = drop_path_rates or [0.0] * depth\n self.layers = nn.Sequential(*[ConvNextLayer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)])\n\n def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:\n hidden_states = self.downsampling_layer(hidden_states)\n hidden_states = self.layers(hidden_states)\n return hidden_states", "docstring": "ConvNeXT stage, consisting of an optional downsampling layer + multiple residual blocks.\n\nArgs:\nconfig ([`ConvNextConfig`]): Model configuration class.\nin_channels (`int`): Number of input channels.\nout_channels (`int`): Number of output channels.\ndepth (`int`): Number of residual blocks.\ndrop_path_rates(`List[float]`): Stochastic depth rates for each layer.", "source": "github-repos"} {"code": "def allclose_up_to_global_phase(\n a: np.ndarray,\n b: np.ndarray,\n *,\n rtol: float = 1.e-5,\n atol: float = 1.e-8,\n equal_nan: bool = False\n) -> bool:\n \n\n a, b = transformations.match_global_phase(a, b)\n\n \n return np.allclose(a=a, b=b, rtol=rtol, atol=atol, equal_nan=equal_nan)", "docstring": "Determines if a ~= b * exp(i t) for some t.\n\nArgs:\na: A numpy array.\nb: Another numpy array.\nrtol: Relative error tolerance.\natol: Absolute error tolerance.\nequal_nan: Whether or not NaN entries should be considered equal to\nother NaN entries.", "source": "juraj-google-style"} {"code": "def do_IDENT(self, service_name: str, source: list, *args, **kwargs) -> None:\n \n self.logger.info(' IDENT %s as %s', service_name, source)\n self.messaging._address_map[service_name] = source", "docstring": "Perform identification of a service to a binary representation.\n\nArgs:\nservice_name: human readable name for service\nsource: zmq representation for the socket source", "source": "juraj-google-style"} {"code": "def add_mount_point(self, path, total_size=None):\n path = self.absnormpath(path)\n if (path in self.mount_points):\n self.raise_os_error(errno.EEXIST, path)\n self._last_dev += 1\n self.mount_points[path] = {'idev': self._last_dev, 'total_size': total_size, 'used_size': 0}\n root_dir = (self.root if (path == self.root.name) else self.create_dir(path))\n root_dir.st_dev = self._last_dev\n return self.mount_points[path]", "docstring": "Add a new mount point for a filesystem device.\nThe mount point gets a new unique device number.\n\nArgs:\npath: The root path for the new mount path.\n\ntotal_size: The new total size of the added filesystem device\nin bytes. Defaults to infinite size.\n\nReturns:\nThe newly created mount point dict.\n\nRaises:\nOSError: if trying to mount an existing mount point again.", "source": "codesearchnet"} {"code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n return cpu_embedding_lookup(features, weights, self.embedding_tables, self._feature_config)", "docstring": "Apply standard lookup ops on CPU.\n\nArgs:\nfeatures: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n`tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\nwill be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\nor `tf.RaggedTensor` is supported per call.\nweights: If not `None`, a nested structure of `tf.Tensor`s,\n`tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\nthat the tensors should be of float type (and they will be downcast to\n`tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\nsame for the parallel entries from `features` and similarly for\n`tf.RaggedTensor`s we assume the row_splits are the same.\n\nReturns:\nA nested structure of Tensors with the same structure as input features.", "source": "github-repos"} {"code": "def get_event_from_name(self, event_name):\n \n return next((e for e in self.events if e.name == event_name), None)", "docstring": "Return an event from a name\nArgs:\nevent_name (str): name of the event\nReturns:\nEvent", "source": "juraj-google-style"} {"code": "def _default_tolerance(dtype):\n if dtype == np.float16:\n return 0.005\n elif dtype in (np.float32, np.complex64):\n return 0.001\n elif dtype in (np.float64, np.complex128):\n return 1e-05\n else:\n return None", "docstring": "Returns a sensible default tolerance for comparing results of a given type.\n\nArgs:\ndtype: A datatype.", "source": "github-repos"} {"code": "def set_property(self, name, value, update_session=True):\n \n if type(value) == datetime:\n value = value.isoformat()\n else:\n value = value\n\n try:\n prop = self.get_property(name)\n if prop.value == value:\n return False\n\n prop.value = value\n\n except AttributeError:\n prop = ResourceProperty()\n prop.resource_id = self.id\n prop.name = name\n prop.value = value\n\n if update_session:\n db.session.add(prop)\n\n return True", "docstring": "Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if\nthere were no changes to the value of the property.\n\nArgs:\nname (str): Name of the property to create or update\nvalue (any): Value of the property. This can be any type of JSON serializable data\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n`bool`", "source": "juraj-google-style"} {"code": "def in_coord_list(coord_list, coord, atol=1e-08):\n return (len(find_in_coord_list(coord_list, coord, atol=atol)) > 0)", "docstring": "Tests if a particular coord is within a coord_list.\n\nArgs:\ncoord_list: List of coords to test\ncoord: Specific coordinates\natol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and\narray.\n\nReturns:\nTrue if coord is in the coord list.", "source": "codesearchnet"} {"code": "def load_flag_values(self, flags=None):\n if (flags is None):\n flags = self._flags\n for keyval in flags.config_value:\n (k, v) = keyval.split('=', 1)\n v = (self._modules['yaml'].load(v) if isinstance(v, str) else v)\n k = (k.decode() if isinstance(k, bytes) else k)\n v = (v.decode() if isinstance(v, bytes) else v)\n self._flag_values.setdefault(k, v)", "docstring": "Load flag values given from command line flags.\n\nArgs:\nflags: An argparse Namespace containing the command line flags.", "source": "codesearchnet"} {"code": "def intersect_curves(nodes1, nodes2):\n nodes1 = _curve_helpers.full_reduce(nodes1)\n nodes2 = _curve_helpers.full_reduce(nodes2)\n (_, num_nodes1) = nodes1.shape\n (_, num_nodes2) = nodes2.shape\n swapped = False\n if (num_nodes1 > num_nodes2):\n (nodes1, nodes2) = (nodes2, nodes1)\n swapped = True\n coeffs = normalize_polynomial(to_power_basis(nodes1, nodes2))\n if np.all((coeffs == 0.0)):\n raise NotImplementedError(_COINCIDENT_ERR)\n _check_non_simple(coeffs)\n t_vals = roots_in_unit_interval(coeffs)\n final_s = []\n final_t = []\n for t_val in t_vals:\n ((x_val,), (y_val,)) = _curve_helpers.evaluate_multi(nodes2, np.asfortranarray([t_val]))\n s_val = locate_point(nodes1, x_val, y_val)\n if (s_val is not None):\n _resolve_and_add(nodes1, s_val, final_s, nodes2, t_val, final_t)\n result = np.zeros((2, len(final_s)), order='F')\n if swapped:\n (final_s, final_t) = (final_t, final_s)\n result[(0, :)] = final_s\n result[(1, :)] = final_t\n return result", "docstring": "r\"\"\"Intersect two parametric B |eacute| zier curves.\n\nArgs:\nnodes1 (numpy.ndarray): The nodes in the first curve.\nnodes2 (numpy.ndarray): The nodes in the second curve.\n\nReturns:\nnumpy.ndarray: ``2 x N`` array of intersection parameters.\nEach row contains a pair of values :math:`s` and :math:`t`\n(each in :math:`\\left[0, 1\\right]`) such that the curves\nintersect: :math:`B_1(s) = B_2(t)`.\n\nRaises:\nNotImplementedError: If the \"intersection polynomial\" is\nall zeros -- which indicates coincident curves.", "source": "codesearchnet"} {"code": "def time_to_jump( self ):\n \n k_tot = rate_prefactor * np.sum( self.p )\n return -( 1.0 / k_tot ) * math.log( random.random() )", "docstring": "The timestep until the next jump.\n\nArgs:\nNone\n\nReturns:\n(Float): The timestep until the next jump.", "source": "juraj-google-style"} {"code": "def find(self, package, **kwargs):\n \n if not exists(package):\n return None\n name, path = None, None\n enforce_init = kwargs.pop('enforce_init', True)\n if isdir(package):\n if isfile(join(package, '__init__.py')) or not enforce_init:\n name, path = basename(package), package\n elif isfile(package) and package.endswith('.py'):\n name, path = splitext(basename(package))[0], package\n if name and path:\n return PackageSpec(name, path)\n return None", "docstring": "Find method.\n\nArgs:\npackage (str): package to find.\n**kwargs (): additional keyword arguments.\n\nReturns:\nPackageSpec: the PackageSpec corresponding to the package, or None.", "source": "juraj-google-style"} {"code": "def server_hardware(self):\n if (not self.__server_hardware):\n self.__server_hardware = ServerHardware(self.__connection)\n return self.__server_hardware", "docstring": "Gets the ServerHardware API client.\n\nReturns:\nServerHardware:", "source": "codesearchnet"} {"code": "def metamodel_from_str(lang_desc, metamodel=None, **kwargs):\n \n\n if not metamodel:\n metamodel = TextXMetaModel(**kwargs)\n\n language_from_str(lang_desc, metamodel)\n\n return metamodel", "docstring": "Creates a new metamodel from the textX description given as a string.\n\nArgs:\nlang_desc(str): A textX language description.\nmetamodel(TextXMetaModel): A metamodel that should be used.\nother params: See TextXMetaModel.", "source": "juraj-google-style"} {"code": "def _handle_deferred_dependencies(self, name, trackable):\n self._maybe_initialize_trackable()\n trackable._maybe_initialize_trackable()\n deferred_dependencies_list = self._deferred_dependencies.pop(name, ())\n for checkpoint_position in sorted(deferred_dependencies_list, key=lambda restore: restore.checkpoint.restore_uid, reverse=True):\n checkpoint_position.restore(trackable)\n for name_based_restore in sorted(self._self_name_based_restores, key=lambda checkpoint: checkpoint.restore_uid, reverse=True):\n trackable._name_based_attribute_restore(name_based_restore)", "docstring": "Pop and load any deferred checkpoint restores into `trackable`.\n\nThis method does not add a new dependency on `trackable`, but it does\ncheck if any outstanding/deferred dependencies have been queued waiting for\nthis dependency to be added (matched based on `name`). If so,\n`trackable` and its dependencies are restored. The restorations are\nconsidered fulfilled and so are deleted.\n\n`_track_trackable` is more appropriate for adding a\nnormal/unconditional dependency, and includes handling for deferred\nrestorations. This method allows objects such as `Optimizer` to use the same\nrestoration logic while managing conditional dependencies themselves, by\noverriding `_checkpoint_dependencies` and `_lookup_dependency` to change the\nobject's dependencies based on the context it is saved/restored in (a single\noptimizer instance can have state associated with multiple graphs).\n\nArgs:\nname: The name of the dependency within this object (`self`), used to\nmatch `trackable` with values saved in a checkpoint.\ntrackable: The Trackable object to restore (inheriting from `Trackable`).", "source": "github-repos"} {"code": "def seq(self, value):\n \n if value == self._defaults['seq'] and 'seq' in self._values:\n del self._values['seq']\n else:\n self._values['seq'] = value", "docstring": "The seq property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def sample_point(input_features: torch.Tensor, point_coordinates: torch.Tensor, add_dim=False, **kwargs) -> torch.Tensor:\n if point_coordinates.dim() == 3:\n add_dim = True\n point_coordinates = point_coordinates.unsqueeze(2)\n point_features = torch.nn.functional.grid_sample(input_features, 2.0 * point_coordinates - 1.0, **kwargs)\n if add_dim:\n point_features = point_features.squeeze(3)\n return point_features", "docstring": "A wrapper around `torch.nn.functional.grid_sample` to support 3D point_coordinates tensors.\n\nArgs:\ninput_features (`torch.Tensor` of shape (batch_size, channels, height, width)):\nA tensor that contains features map on a height * width grid\npoint_coordinates (`torch.Tensor` of shape (batch_size, num_points, 2) or (batch_size, grid_height, grid_width,:\n2)):\nA tensor that contains [0, 1] * [0, 1] normalized point coordinates\nadd_dim (`bool`):\nboolean value to keep track of added dimension\n\nReturns:\npoint_features (`torch.Tensor` of shape (batch_size, channels, num_points) or (batch_size, channels,\nheight_grid, width_grid):\nA tensor that contains features for points in `point_coordinates`.", "source": "github-repos"} {"code": "def receive_datagram(self, data, address):\n \n\n \n if not self.app:\n logger.debug(\"Packet received\", address, data)\n return False\n\n \n \n try:\n response = self.app.handle_message(data, address)\n except Exception as err:\n logger.error(\"Error processing message from \" + str(address) +\n \":\" + str(data))\n logger.error(traceback.format_exc())\n return False\n\n \n \n if response:\n self.send_datagram(response, address)", "docstring": "Executes when UDP data has been received and sends the packet data\nto our app to process the request.\n\nArgs:\ndata (str): The raw serialized packet data received.\naddress (tuple): The address and port of the origin of the received\npacket. E.g. (address, port).\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def ensure_image_is_hex(input_path):\n family = utilities.get_family('module_settings.json')\n target = family.platform_independent_target()\n build_dir = target.build_dirs()['build']\n if (platform.system() == 'Windows'):\n env = Environment(tools=['mingw'], ENV=os.environ)\n else:\n env = Environment(tools=['default'], ENV=os.environ)\n input_path = str(input_path)\n image_name = os.path.basename(input_path)\n (root, ext) = os.path.splitext(image_name)\n if (len(ext) == 0):\n raise BuildError('Unknown file format or missing file extension in ensure_image_is_hex', file_name=input_path)\n file_format = ext[1:]\n if (file_format == 'hex'):\n return input_path\n if (file_format == 'elf'):\n new_file = os.path.join(build_dir, (root + '.hex'))\n if (new_file not in CONVERTED_HEX_FILES):\n env.Command(new_file, input_path, action=Action('arm-none-eabi-objcopy -O ihex $SOURCE $TARGET', 'Creating intel hex file from: $SOURCE'))\n CONVERTED_HEX_FILES.add(new_file)\n return new_file\n raise BuildError('Unknown file format extension in ensure_image_is_hex', file_name=input_path, extension=file_format)", "docstring": "Return a path to a hex version of a firmware image.\n\nIf the input file is already in hex format then input_path\nis returned and nothing is done. If it is not in hex format\nthen an SCons action is added to convert it to hex and the\ntarget output file path is returned.\n\nA cache is kept so that each file is only converted once.\n\nArgs:\ninput_path (str): A path to a firmware image.\n\nReturns:\nstr: The path to a hex version of input_path, this may\nbe equal to input_path if it is already in hex format.", "source": "codesearchnet"} {"code": "def __init__(self, recipe=None, project=None, user=None, service=None, client=None, filepath=None, key=None, verbose=False, trace_print=False, trace_file=False):\n starthinker_trace_start(trace_print, trace_file)\n self.recipe = recipe or {}\n self.verbose = verbose\n self.filepath = filepath\n if 'setup' not in self.recipe:\n self.recipe['setup'] = {}\n if 'auth' not in self.recipe['setup']:\n self.recipe['setup']['auth'] = {}\n if service:\n self.recipe['setup']['auth']['service'] = service\n if client:\n self.recipe['setup']['auth']['client'] = client\n if user:\n self.recipe['setup']['auth']['user'] = user\n if project:\n self.recipe['setup']['id'] = project\n if key:\n self.recipe['setup']['key'] = key\n self.project = self.recipe['setup'].get('project', self.recipe['setup'].get('id'))\n self.key = self.recipe['setup'].get('key')\n self.timezone = ZoneInfo(self.recipe['setup'].get('timezone', 'America/Los_Angeles'))\n self.now = datetime.now(self.timezone)\n self.date = self.now.date()\n self.hour = self.now.hour\n if self.verbose:\n print('DATE:', self.now.date())\n print('HOUR:', self.now.hour)", "docstring": "Used in StarThinker scripts as programmatic entry point.\n\nArgs:\n* recipe: (dict) JSON object representing the recipe\n* project: (string) See module description.\n* user: (string) See module description.\n* service: (string) See module description.\n* client: (string) See module description.\n* key: (string) See module description.\n* verbose: (boolean) See module description.\n* trace_print: (boolean) True if writing execution trace to stdout.\n* trace_file: (boolean) True if writing execution trace to file.\n* args: (dict) dictionary of arguments (used with argParse).\n\nReturns:\nNothing.", "source": "github-repos"} {"code": "def get_selector(self, name):\n try:\n return self.matcher.by_name[name]\n except (AttributeError, KeyError):\n if (self.base is not None):\n return self.base.get_selector(name)\n else:\n raise KeyError(\"No selector found for style '{}'\".format(name))", "docstring": "Find a selector mapped to a style in this or a base style sheet.\n\nArgs:\nname (str): a style name\n\nReturns:\n:class:`.Selector`: the selector mapped to the style `name`\n\nRaises:\nKeyError: if the style `name` was not found in this or a base\nstyle sheet", "source": "codesearchnet"} {"code": "def read_samples(self, sr=None, offset=0, duration=None):\n read_duration = self.duration\n if ((offset > 0) and (read_duration is not None)):\n read_duration -= offset\n if (duration is not None):\n if (read_duration is None):\n read_duration = duration\n else:\n read_duration = min(duration, read_duration)\n return self.track.read_samples(sr=sr, offset=(self.start + offset), duration=read_duration)", "docstring": "Read the samples of the utterance.\n\nArgs:\nsr (int): If None uses the sampling rate given by the track,\notherwise resamples to the given sampling rate.\noffset (float): Offset in seconds to read samples from.\nduration (float): If not ``None`` read only this\nnumber of seconds in maximum.\n\nReturns:\nnp.ndarray: A numpy array containing the samples\nas a floating point (numpy.float32) time series.", "source": "codesearchnet"} {"code": "def ip_network(address, strict=True):\n \n try:\n return IPv4Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n try:\n return IPv6Network(address, strict)\n except (AddressValueError, NetmaskValueError):\n pass\n\n raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %\n address)", "docstring": "Take an IP string/int and return an object of the correct type.\n\nArgs:\naddress: A string or integer, the IP network. Either IPv4 or\nIPv6 networks may be supplied; integers less than 2**32 will\nbe considered to be IPv4 by default.\n\nReturns:\nAn IPv4Network or IPv6Network object.\n\nRaises:\nValueError: if the string passed isn't either a v4 or a v6\naddress. Or if the network has host bits set.", "source": "juraj-google-style"} {"code": "def __init__(self, field=None):\n \n super().__init__(action_type=ActionType.OFPAT_SET_FIELD)\n self.field = OxmTLV() if field is None else field", "docstring": "Create a ActionSetField with the optional parameters below.\n\nArgs:\nlength (int): length padded to 64 bits, followed by exactly\noxm_len bytes containing a single OXM TLV, then\nexactly ((oxm_len + 4) + 7)/8*8 - (oxm_len + 4)\n(between 0 and 7) bytes of all-zero bytes\nfield (:class:`OxmTLV`): OXM field and value.", "source": "juraj-google-style"} {"code": "def sample(self, n):\n \n total = bq.Query('select count(*) from %s' %\n self._get_source()).execute().result()[0].values()[0]\n if n > total:\n raise ValueError('sample larger than population')\n sampling = bq.Sampling.random(percent=n * 100.0 / float(total))\n if self._query is not None:\n source = self._query\n else:\n source = 'SELECT * FROM `%s`' % self._table\n sample = bq.Query(source).execute(sampling=sampling).result()\n df = sample.to_dataframe()\n return df", "docstring": "Samples data into a Pandas DataFrame. Note that it calls BigQuery so it will\nincur cost.\n\nArgs:\nn: number of sampled counts. Note that the number of counts returned is approximated.\nReturns:\nA dataframe containing sampled data.\nRaises:\nException if n is larger than number of rows.", "source": "juraj-google-style"} {"code": "def UpdateBudget(self, client_customer_id, budget_id, micro_amount,\n delivery_method):\n \n self.client.SetClientCustomerId(client_customer_id)\n operations = [{\n 'operator': 'SET',\n 'operand': {\n 'budgetId': budget_id,\n 'amount': {\n 'microAmount': micro_amount\n },\n 'deliveryMethod': delivery_method\n }\n }]\n self.client.GetService('BudgetService').mutate(operations)", "docstring": "Update a Budget with the given budgetId.\n\nArgs:\nclient_customer_id: str Client Customer Id used to update Budget.\nbudget_id: str Id of the budget to be updated.\nmicro_amount: str New value for the microAmount field.\ndelivery_method: str New value for the deliveryMethod field.", "source": "juraj-google-style"} {"code": "def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None):\n if dtype_hint is None:\n dtype_hint = dtypes.int64\n if isinstance(partition, np.ndarray) and partition.dtype == np.int32 and (dtype is None):\n partition = ops.convert_to_tensor(partition, name=name)\n else:\n partition = tensor_conversion.convert_to_tensor_v2(partition, dtype_hint=dtype_hint, dtype=dtype, name=name)\n if partition.dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('%s must have dtype int32 or int64' % name)\n return partition", "docstring": "Converts `partition` to Tensors.\n\nArgs:\npartition: A row-partitioning tensor for the `RowPartition` being\nconstructed. I.e., one of: row_splits, row_lengths, row_starts,\nrow_limits, value_rowids, uniform_row_length.\nname: The name of the row-partitioning tensor.\ndtype: Optional dtype for the RowPartition. If missing, the type\nis inferred from the type of `uniform_row_length`, dtype_hint,\nor tf.int64.\ndtype_hint: Optional dtype for the RowPartition, used when dtype\nis None. In some cases, a caller may not have a dtype in mind when\nconverting to a tensor, so dtype_hint can be used as a soft preference.\nIf the conversion to `dtype_hint` is not possible, this argument has no\neffect.\n\nReturns:\nA tensor equivalent to partition.\n\nRaises:\nValueError: if dtype is not int32 or int64.", "source": "github-repos"} {"code": "def get_value_by_row_col(self, row, col):\n \n if row < 0 or row >= self.nRows or col < 0 or col >= self.nCols:\n raise ValueError(\"The row or col must be >=0 and less than \"\n \"nRows (%d) or nCols (%d)!\" % (self.nRows, self.nCols))\n else:\n value = self.data[int(round(row))][int(round(col))]\n if value == self.noDataValue:\n return None\n else:\n return value", "docstring": "Get raster value by (row, col).\n\nArgs:\nrow: row number.\ncol: col number.\n\nReturns:\nraster value, None if the input are invalid.", "source": "juraj-google-style"} {"code": "def create_local_scope_from_def_args(self, call_args, def_args, line_number, saved_function_call_index):\n for i in range(len(call_args)):\n def_arg_local_name = def_args[i]\n def_arg_temp_name = ((('temp_' + str(saved_function_call_index)) + '_') + def_args[i])\n local_scope_node = RestoreNode(((def_arg_local_name + ' = ') + def_arg_temp_name), def_arg_local_name, [def_arg_temp_name], line_number=line_number, path=self.filenames[(- 1)])\n self.nodes[(- 1)].connect(local_scope_node)\n self.nodes.append(local_scope_node)", "docstring": "Create the local scope before entering the body of a function call.\n\nArgs:\ncall_args(list[ast.Name]): Of the call being made.\ndef_args(ast_helper.Arguments): Of the definition being called.\nline_number(int): Of the def of the function call about to be entered into.\nsaved_function_call_index(int): Unique number for each call.\n\nNote: We do not need a connect_if_allowed because of the\npreceding call to save_def_args_in_temp.", "source": "codesearchnet"} {"code": "def _ScanFileSystem(self, scan_node, base_path_specs):\n \n if not scan_node or not scan_node.path_spec:\n raise errors.ScannerError('Invalid or missing file system scan node.')\n\n file_system = resolver.Resolver.OpenFileSystem(scan_node.path_spec)\n if not file_system:\n return\n\n try:\n path_resolver = windows_path_resolver.WindowsPathResolver(\n file_system, scan_node.path_spec.parent)\n\n if self._ScanFileSystemForWindowsDirectory(path_resolver):\n base_path_specs.append(scan_node.path_spec)\n\n finally:\n file_system.Close()", "docstring": "Scans a file system scan node for file systems.\n\nThis method checks if the file system contains a known Windows directory.\n\nArgs:\nscan_node (SourceScanNode): file system scan node.\nbase_path_specs (list[PathSpec]): file system base path specifications.\n\nRaises:\nScannerError: if the scan node is invalid.", "source": "juraj-google-style"} {"code": "def _WriteIfcfg(self, interfaces, logger):\n for interface in interfaces:\n interface_config = os.path.join(self.network_path, ('ifcfg-%s' % interface))\n interface_content = ['\n with open(interface_config, 'w') as interface_file:\n interface_file.write('\\n'.join(interface_content))\n logger.info('Created ifcfg file for interface %s.', interface)", "docstring": "Write ifcfg files for multi-NIC support.\n\nOverwrites the files. This allows us to update ifcfg-* in the future.\nDisable the network setup to override this behavior and customize the\nconfigurations.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.", "source": "codesearchnet"} {"code": "def run_command(self, command, arg=None, is_eval=False, member_id=None):\n logger.debug('run_command({command}, {arg}, {is_eval}, {member_id})'.format(**locals()))\n mode = ((is_eval and 'eval') or 'command')\n hostname = None\n if isinstance(member_id, int):\n hostname = self.member_id_to_host(member_id)\n result = getattr(self.connection(hostname=hostname).admin, mode)(command, arg)\n logger.debug('command result: {result}'.format(result=result))\n return result", "docstring": "run command on replica set\nif member_id is specified command will be execute on this server\nif member_id is not specified command will be execute on the primary\n\nArgs:\ncommand - command string\narg - command argument\nis_eval - if True execute command as eval\nmember_id - member id\n\nreturn command's result", "source": "codesearchnet"} {"code": "def _GetDaysPerMonth(self, year, month):\n if (month not in range(1, 13)):\n raise ValueError('Month value out of bounds.')\n days_per_month = self._DAYS_PER_MONTH[(month - 1)]\n if ((month == 2) and self._IsLeapYear(year)):\n days_per_month += 1\n return days_per_month", "docstring": "Retrieves the number of days in a month of a specific year.\n\nArgs:\nyear (int): year e.g. 1970.\nmonth (int): month, where 1 represents January.\n\nReturns:\nint: number of days in the month.\n\nRaises:\nValueError: if the month value is out of bounds.", "source": "codesearchnet"} {"code": "def _hexvalue_to_hsv(hexvalue):\n \n h = int(hexvalue[7:10], 16) / 360\n s = int(hexvalue[10:12], 16) / 255\n v = int(hexvalue[12:14], 16) / 255\n\n return (h, s, v)", "docstring": "Converts the hexvalue used by tuya for colour representation into\nan HSV value.\n\nArgs:\nhexvalue(string): The hex representation generated by BulbDevice._rgb_to_hexvalue()", "source": "juraj-google-style"} {"code": "def zoom_blur(x, severity=1):\n c = [np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01), np.arange(1, 1.21, 0.02), np.arange(1, 1.26, 0.02), np.arange(1, 1.31, 0.03)][(severity - 1)]\n x = (np.array(x) / 255.0).astype(np.float32)\n out = np.zeros_like(x)\n for zoom_factor in c:\n out += clipped_zoom(x, zoom_factor)\n x = ((x + out) / (len(c) + 1))\n x_clip = (np.clip(x, 0, 1) * 255)\n return around_and_astype(x_clip)", "docstring": "Zoom blurring to images.\n\nApplying zoom blurring to images by zooming the central part of the images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Applied zoom blur.", "source": "codesearchnet"} {"code": "def get_shreds(self, feature_extractors, sheet_name):\n if (self._shreds is None):\n shreds = []\n (_, contours, _) = cv2.findContours(self._foreground_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for (i, contour) in enumerate(contours):\n shred = self._make_shred(contour, i, feature_extractors, sheet_name)\n if (shred is not None):\n shreds.append(shred)\n self._shreds = shreds\n return self._shreds", "docstring": "Detects shreds in the current sheet and constructs Shred instances.\n\nCaches the results for further invocations.\n\nArgs:\nfeature_extractors: iterable of AbstractShredFeature instances to\nuse for shreds feature assignment.\nsheet_name: string, included in shred attributes.\n\nReturns:\nlist of Shred instances.", "source": "codesearchnet"} {"code": "def get_wells(self, uwis=None):\n if (uwis is None):\n return Project(self.__list)\n return Project([w for w in self if (w.uwi in uwis)])", "docstring": "Returns a new Project with only the wells named by UWI.\n\nArgs:\nuwis (list): list or tuple of UWI strings.\n\nReturns:\nproject.", "source": "codesearchnet"} {"code": "def get_collection(self, name, scope=None) -> list[Any]:\n with self._lock:\n collection = self._collections.get(name, None)\n if collection is None:\n return []\n if scope is None:\n return list(collection)\n else:\n c = []\n regex = re.compile(scope)\n for item in collection:\n try:\n if regex.match(item.name):\n c.append(item)\n except AttributeError:\n pass\n return c", "docstring": "Returns a list of values in the collection with the given `name`.\n\nThis is different from `get_collection_ref()` which always returns the\nactual collection list if it exists in that it returns a new list each time\nit is called.\n\nArgs:\nname: The key for the collection. For example, the `GraphKeys` class\ncontains many standard names for collections.\nscope: (Optional.) A string. If supplied, the resulting list is filtered\nto include only items whose `name` attribute matches `scope` using\n`re.match`. Items without a `name` attribute are never returned if a\nscope is supplied. The choice of `re.match` means that a `scope` without\nspecial tokens filters by prefix.\n\nReturns:\nThe list of values in the collection with the given `name`, or\nan empty list if no value has been added to that collection. The\nlist contains the values in the order under which they were\ncollected.", "source": "github-repos"} {"code": "def add_input(self, input_):\n \n if not isinstance(input_, Input):\n raise TypeError('`input_` must be a Input instance')\n self.inputs.append(input_)", "docstring": "Adds an input to a Transaction's list of inputs.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`): An Input to be added to the Transaction.", "source": "juraj-google-style"} {"code": "def swo_flush(self, num_bytes=None):\n \n if num_bytes is None:\n num_bytes = self.swo_num_bytes()\n\n buf = ctypes.c_uint32(num_bytes)\n res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.FLUSH,\n ctypes.byref(buf))\n if res < 0:\n raise errors.JLinkException(res)\n\n return None", "docstring": "Flushes data from the SWO buffer.\n\nAfter this method is called, the flushed part of the SWO buffer is\nempty.\n\nIf ``num_bytes`` is not present, flushes all data currently in the SWO\nbuffer.\n\nArgs:\nself (JLink): the ``JLink`` instance\nnum_bytes (int): the number of bytes to flush\n\nReturns:\n``None``\n\nRaises:\nJLinkException: on error", "source": "juraj-google-style"} {"code": "def resolve_theme(self, name):\n \n if name not in settings.CODEMIRROR_THEMES:\n msg = (\"Given theme name '{}' does not exists in \"\n \"'settings.CODEMIRROR_THEMES'.\")\n raise UnknowThemeError(msg.format(name))\n\n return settings.CODEMIRROR_THEMES.get(name)", "docstring": "From given theme name, return theme file path from\n``settings.CODEMIRROR_THEMES`` map.\n\nArguments:\nname (string): Theme name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_THEMES``.\n\nReturns:\nstring: Theme file path.", "source": "juraj-google-style"} {"code": "def _GetNextPath(self):\n paths = sorted((path for path in io_wrapper.ListDirectoryAbsolute(self._directory) if self._path_filter(path)))\n if (not paths):\n return None\n if (self._path is None):\n return paths[0]\n if ((not io_wrapper.IsCloudPath(paths[0])) and (not self._ooo_writes_detected)):\n current_path_index = bisect.bisect_left(paths, self._path)\n ooo_check_start = max(0, (current_path_index - self._OOO_WRITE_CHECK_COUNT))\n for path in paths[ooo_check_start:current_path_index]:\n if self._HasOOOWrite(path):\n self._ooo_writes_detected = True\n break\n next_paths = list((path for path in paths if ((self._path is None) or (path > self._path))))\n if next_paths:\n return min(next_paths)\n else:\n return None", "docstring": "Gets the next path to load from.\n\nThis function also does the checking for out-of-order writes as it iterates\nthrough the paths.\n\nReturns:\nThe next path to load events from, or None if there are no more paths.", "source": "codesearchnet"} {"code": "def __init__(self, sess, bad_init_action=None, bad_run_start_action=None, bad_debug_urls=None):\n self._bad_init_action = bad_init_action\n self._bad_run_start_action = bad_run_start_action\n self._bad_debug_urls = bad_debug_urls\n framework.BaseDebugWrapperSession.__init__(self, sess)", "docstring": "Constructor.\n\nArgs:\nsess: The TensorFlow Session object to be wrapped.\nbad_init_action: (str) bad action value to be returned during the\non-session-init callback.\nbad_run_start_action: (str) bad action value to be returned during the\nthe on-run-start callback.\nbad_debug_urls: Bad URL values to be returned during the on-run-start\ncallback.", "source": "github-repos"} {"code": "def assert_eventual(self, func, required, allowed, timeout_secs=300.0):\n required = set(required)\n assert required\n seen = set()\n start_time = time.time()\n while timeout_secs is None or time.time() - start_time < timeout_secs:\n if seen == required:\n return\n value = func()\n if value not in allowed:\n self.fail(msg=f'Disallowed value: {value}.')\n if value in required:\n seen.add(value)\n missing = [v for v in required if v not in seen]\n self.fail(msg=f'Timed out. Missing values: {str([str(v) for v in missing])}.')", "docstring": "Tests that calls to the given function meet required and allowed values.\n\nArgs:\nfunc: function to test.\nrequired: iterable of required values. Must be hashable and non-empty.\nallowed: iterable of allowed values. Must be hashable and non-empty.\ntimeout_secs: fails if more than this time is required.", "source": "github-repos"} {"code": "def _translate_to_fulltype_for_flat_tensors(spec: type_spec.TypeSpec) -> List[full_type_pb2.FullTypeDef]:\n if isinstance(spec, RaggedTensorSpec):\n dt = spec.dtype\n elem_t = _DT_TO_FT.get(dt)\n if elem_t is None:\n logging.vlog(1, 'dtype %s that has no conversion to fulltype.', dt)\n elif elem_t == full_type_pb2.TFT_LEGACY_VARIANT:\n logging.vlog(1, 'Ragged tensors containing variants are not supported.', dt)\n else:\n assert len(spec._flat_tensor_specs) == 1\n return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_RAGGED, args=[full_type_pb2.FullTypeDef(type_id=elem_t)])]\n return [full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_UNSET) for t in spec._flat_tensor_specs]", "docstring": "Convert a TypeSec to a list of FullTypeDef.\n\nThe FullTypeDef created corresponds to the encoding used with datasets\n(and map_fn) that uses variants (and not FullTypeDef corresponding to the\ndefault \"component\" encoding).\n\nCurrently, the only use of this is for information about the contents of\nragged tensors, so only ragged tensors return useful full type information\nand other types return TFT_UNSET. While this could be improved in the future,\nthis function is intended for temporary use and expected to be removed\nwhen type inference support is sufficient.\n\nArgs:\nspec: A TypeSpec for one element of a dataset or map_fn.\n\nReturns:\nA list of FullTypeDef corresponding to SPEC. The length of this list\nis always the same as the length of spec._flat_tensor_specs.", "source": "github-repos"} {"code": "def draw_layer(ax, layer):\n \n ax.set_aspect('equal', 'datalim')\n ax.plot(*layer)\n ax.axis('off')", "docstring": "Draws a layer on the given matplotlib axis.\n\nArgs:\nax (axis): the matplotlib axis to draw on\nlayer (layer): the layers to plot", "source": "juraj-google-style"} {"code": "def send_rpc_request(self, request):\n self._client_send(request)\n response = self._client_receive()\n if not response:\n raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_SERVER)\n return self._decode_socket_response_bytes(response)", "docstring": "Sends an RPC request to the server and receives a response.\n\nArgs:\nrequest: str, the request to send the server.\n\nReturns:\nThe string of the RPC response.\n\nRaises:\nerrors.Error: if failed to send the request or receive a response.\nerrors.ProtocolError: if received an empty response from the server.\nUnicodeError: if failed to decode the received response.", "source": "github-repos"} {"code": "def expression_filter(self, name, **kwargs):\n \n\n def decorator(func):\n self.filters[name] = ExpressionFilter(name, func, **kwargs)\n\n return decorator", "docstring": "Returns a decorator function for adding an expression filter.\n\nArgs:\nname (str): The name of the filter.\n**kwargs: Variable keyword arguments for the filter.\n\nReturns:\nCallable[[Callable[[AbstractExpression, Any], AbstractExpression]]]: A decorator\nfunction for adding an expression filter.", "source": "juraj-google-style"} {"code": "def set_all_xlims(self, xlim, dx, xscale, fontsize=None):\n self._set_all_lims('x', xlim, dx, xscale, fontsize)\n return", "docstring": "Set limits and ticks for x axis for whole figure.\n\nThis will set x axis limits and tick marks for the entire figure.\nIt can be overridden in the SinglePlot class.\n\nArgs:\nxlim (len-2 list of floats): The limits for the axis.\ndx (float): Amount to increment by between the limits.\nxscale (str): Scale of the axis. Either `log` or `lin`.\nfontsize (int, optional): Set fontsize for x axis tick marks.\nDefault is None.", "source": "codesearchnet"} {"code": "def match(self, part: ProcessorPart) -> bool:\n ...", "docstring": "Returns True if `part` should be processed by this part processor.\n\nReturns False if it sure that the part processor will not process the input\npart and that the part processor should pass the part as is.\n\nNOTE: the part processor `__call__` implementation should always skip the\npart (i.e. return the part as is) when `match` returns False.\n\nA typical example are part processors that are type-dependent, e.g. a part\nprocessor that parses a specific proto from the part or that only parses\ntext.\n\nArgs:\npart: the part to check.\n\nReturns:\nFalse if the part has no chance of being processed by this part\nprocessor. True otherwise.", "source": "github-repos"} {"code": "def remove_snippet_client(self, name):\n if name not in self._snippet_clients:\n raise Error(self._device, MISSING_SNIPPET_CLIENT_MSG % name)\n client = self._snippet_clients.pop(name)\n client.stop()", "docstring": "Removes a snippet client from management.\n\nArgs:\nname: string, the name of the snippet client to remove.\n\nRaises:\nError: if no snippet client is managed under the specified name.", "source": "github-repos"} {"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n images_kwargs = InternVLProcessorKwargs._defaults.get('images_kwargs', {})\n images_kwargs.update(kwargs)\n num_image_patches = [self.image_processor.get_number_of_image_tokens(*image_size, images_kwargs) for image_size in image_sizes]\n num_image_tokens = [2 + self.image_seq_length * num_patches for num_patches in num_image_patches]\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (`List[List[int]]`, *optional*):\nThe input sizes formatted as (height, width) per each image.\n\nReturns:\n`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided\ninput modalities, along with other useful data.", "source": "github-repos"} {"code": "def plot_dendrogram(ax, obj, show_diameters=True):\n \n \n dnd = Dendrogram(obj, show_diameters=show_diameters)\n dnd.generate()\n\n \n \n \n\n _render_dendrogram(dnd, ax, 0.)\n\n ax.set_title('Morphology Dendrogram')\n ax.set_xlabel('micrometers (um)')\n ax.set_ylabel('micrometers (um)')\n\n ax.set_aspect('auto')\n ax.legend()", "docstring": "Dendrogram of `obj`\n\nArgs:\nobj: Neuron or tree \\\nneurom.Neuron, neurom.Tree\nshow_diameters : boolean \\\nDetermines if node diameters will \\\nbe show or not.", "source": "juraj-google-style"} {"code": "def _price_lognormal_rate(self, valuation_date, market, pricing_context):\n discount_curve = market.discount_curve\n discount_factors = tf.where(self._payment_dates > valuation_date, discount_curve.get_discount_factor(self._payment_dates), 0.0)\n forward_rates = self._get_forward_rate(valuation_date, market)\n if pricing_context is None:\n volatility_surface = market.volatility_curve\n black_vols = volatility_surface.interpolate(self._reset_dates, self._strike, self._term)\n else:\n black_vols = tf.convert_to_tensor(pricing_context, dtype=self._dtype)\n expiry_times = dates.daycount_actual_365_fixed(start_date=valuation_date, end_date=self._reset_dates, dtype=self._dtype)\n caplet_prices = black_scholes.option_price(forwards=forward_rates, strikes=self._strike, volatilities=black_vols, expiries=expiry_times, is_call_options=self._is_cap)\n intrinsic_value = tf.where(self._is_cap, tf.math.maximum(forward_rates - self._strike, 0.0), tf.math.maximum(self._strike - forward_rates, 0))\n caplet_prices = tf.where(self._payment_dates < valuation_date, tf.constant(0.0, dtype=self._dtype), tf.where(self._accrual_start_dates < valuation_date, intrinsic_value, caplet_prices))\n caplet_prices = self._notional * self._daycount_fractions * caplet_prices\n return discount_factors * caplet_prices", "docstring": "Computes caplet/floorlet prices using lognormal model for forward rates.\n\nThe function computes individual caplet prices for the batch of caps/floors\nusing the lognormal model for the forward rates. If the volatilities are\nare supplied (through the input `pricing_context`) then they are used as\nforward rate volatilies. Otherwise, volatilities are extracted using the\nvolatility surface for `market`.\n\nArgs:\nvaluation_date: A scalar `DateTensor` specifying the date on which\nvaluation is being desired.\nmarket: A namedtuple of type `InterestRateMarket` which contains the\nnecessary information for pricing the Cap/Floor.\npricing_context: An optional input containing the black volatility for\nfor the forward rates.\n\nReturns:\nA Rank 1 `Tensor` of real type containing the price of each caplet\n(or floorlet) based using the lognormal model for forward rates.", "source": "github-repos"} {"code": "def _put_into_indexes(self, obj):\n no_of_used_indexes = 0\n for (field_name, db_index) in list(self._get_db_fields(obj)):\n attr_value = getattr(obj, field_name)\n if (attr_value is None):\n continue\n container = db_index.get(attr_value, None)\n if (container is None):\n container = OOTreeSet()\n db_index[attr_value] = container\n container.insert(obj)\n no_of_used_indexes += 1\n if (no_of_used_indexes <= 0):\n raise UnindexableObject('You have to use atleast one of the identificators!')", "docstring": "Put publication into all indexes.\n\nAttr:\nobj (obj): Indexable object.\n\nRaises:\nUnindexableObject: When there is no index (property) which can be\nused to index `obj` in database.", "source": "codesearchnet"} {"code": "def create_resource(self, parent_id=\"\"):\n \n resource_name = self.trigger_settings.get('resource', '')\n resource_name = resource_name.replace('/', '')\n if not self.resource_id:\n created_resource = self.client.create_resource(\n restApiId=self.api_id, parentId=parent_id, pathPart=resource_name)\n self.resource_id = created_resource['id']\n self.log.info(\"Successfully created resource\")\n else:\n self.log.info(\"Resource already exists. To update resource please delete existing resource: %s\",\n resource_name)", "docstring": "Create the specified resource.\n\nArgs:\nparent_id (str): The resource ID of the parent resource in API Gateway", "source": "juraj-google-style"} {"code": "def FlowAccumFromProps(props, weights=None, in_place=False):\n if (type(props) is not rd3array):\n raise Exception('A richdem.rd3array or numpy.ndarray is required!')\n if ((weights is not None) and in_place):\n accum = rdarray(weights, no_data=(- 1))\n elif ((weights is not None) and (not in_place)):\n accum = rdarray(weights, copy=True, meta_obj=props, no_data=(- 1))\n elif (weights is None):\n accum = rdarray(np.ones(shape=props.shape[0:2], dtype='float64'), meta_obj=props, no_data=(- 1))\n else:\n raise Exception('Execution should never reach this point!')\n if (accum.dtype != 'float64'):\n raise Exception(\"Accumulation array must be of type 'float64'!\")\n accumw = accum.wrap()\n _AddAnalysis(accum, 'FlowAccumFromProps(dem, weights={weights}, in_place={in_place})'.format(weights=('None' if (weights is None) else 'weights'), in_place=in_place))\n _richdem.FlowAccumulation(props.wrap(), accumw)\n accum.copyFromWrapped(accumw)\n return accum", "docstring": "Calculates flow accumulation from flow proportions.\n\nArgs:\nprops (rdarray): An elevation model\nweights (rdarray): Flow accumulation weights to use. This is the\namount of flow generated by each cell. If this is\nnot provided, each cell will generate 1 unit of\nflow.\nin_place (bool): If True, then `weights` is modified in place. An\naccumulation matrix is always returned, but it will\njust be a view of the modified data if `in_place`\nis True.\n\nReturns:\nA flow accumulation array. If `weights` was provided and `in_place` was\nTrue, then this matrix is a view of the modified data.", "source": "codesearchnet"} {"code": "def parse(file_contents, file_name):\n env = Environment()\n result = ''\n try:\n env.parse(file_contents)\n except Exception:\n (_, exc_value, _) = sys.exc_info()\n result += 'ERROR: Jinja2 Template File: {0}'.format(file_name)\n result += (repr(exc_value) + '\\n')\n return result", "docstring": "Takes a list of files which are assumed to be jinja2 templates and tries to\nparse the contents of the files\n\nArgs:\nfile_contents (str): File contents of a jinja file\n\nRaises:\nException: An exception is raised if the contents of the file cannot be\nparsed.", "source": "codesearchnet"} {"code": "def _get_mpr_view(self, connection, table):\n logger.debug('Looking for view of the table.\\n table: {}'.format(table.vid))\n view = self.get_view_name(table)\n view_exists = self._relation_exists(connection, view)\n if view_exists:\n logger.debug('View of the table exists.\\n table: {}, view: {}'.format(table.vid, view))\n return view\n raise MissingViewError('sqlite database does not have view for {} table.'.format(table.vid))", "docstring": "Finds and returns view name in the sqlite db represented by given connection.\n\nArgs:\nconnection: connection to sqlite db where to look for partition table.\ntable (orm.Table):\n\nRaises:\nMissingViewError: if database does not have partition table.\n\nReturns:\nstr: database table storing partition data.", "source": "codesearchnet"} {"code": "def bitwise_not(x):\n if any_symbolic_tensors((x,)):\n return BitwiseNot().symbolic_call(x)\n return backend.numpy.bitwise_not(x)", "docstring": "Compute bit-wise inversion, or bit-wise NOT, element-wise.\n\nComputes the bit-wise NOT of the underlying binary representation of the\nintegers in the input arrays. This ufunc implements the C/Python operator\n`~`.\n\nArgs:\nx: Input integer tensor.\n\nReturns:\nResult tensor.", "source": "github-repos"} {"code": "def branch(self):\n cmd = ['git', 'symbolic-ref', '--short', 'HEAD']\n try:\n output = self.sh(cmd, shell=False, ignore_error=True).rstrip()\n except subprocess.CalledProcessError as e:\n log.exception(e)\n return ('\n if output.startswith('fatal: ref HEAD is not a symbolic ref'):\n output = ('\n return output", "docstring": "Determine the branch name of the working directory of this Repository\n\nReturns:\nstr: branch name (``git symbolic-ref --short HEAD``)", "source": "codesearchnet"} {"code": "def get_submissions_for_student_item(request, course_id, student_id, item_id):\n student_item_dict = dict(course_id=course_id, student_id=student_id, item_id=item_id)\n context = dict(**student_item_dict)\n try:\n submissions = get_submissions(student_item_dict)\n context['submissions'] = submissions\n except SubmissionRequestError:\n context['error'] = 'The specified student item was not found.'\n return render_to_response('submissions.html', context)", "docstring": "Retrieve all submissions associated with the given student item.\n\nDeveloper utility for accessing all the submissions associated with a\nstudent item. The student item is specified by the unique combination of\ncourse, student, and item.\n\nArgs:\nrequest (dict): The request.\ncourse_id (str): The course id for this student item.\nstudent_id (str): The student id for this student item.\nitem_id (str): The item id for this student item.\n\nReturns:\nHttpResponse: The response object for this request. Renders a simple\ndevelopment page with all the submissions related to the specified\nstudent item.", "source": "codesearchnet"} {"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n \n init_params = dict()\n\n init_params['role'] = job_details['RoleArn']\n init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']\n init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']\n init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']\n init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']\n init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']\n init_params['base_job_name'] = job_details['TrainingJobName']\n init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']\n init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']\n\n has_hps = 'HyperParameters' in job_details\n init_params['hyperparameters'] = job_details['HyperParameters'] if has_hps else {}\n\n if 'TrainingImage' in job_details['AlgorithmSpecification']:\n init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage']\n elif 'AlgorithmName' in job_details['AlgorithmSpecification']:\n init_params['algorithm_arn'] = job_details['AlgorithmSpecification']['AlgorithmName']\n else:\n raise RuntimeError('Invalid AlgorithmSpecification. Either TrainingImage or '\n 'AlgorithmName is expected. None was found.')\n\n if 'MetricDefinitons' in job_details['AlgorithmSpecification']:\n init_params['metric_definitions'] = job_details['AlgorithmSpecification']['MetricsDefinition']\n\n if 'EnableInterContainerTrafficEncryption' in job_details:\n init_params['encrypt_inter_container_traffic'] = \\\n job_details['EnableInterContainerTrafficEncryption']\n\n subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY))\n if subnets:\n init_params['subnets'] = subnets\n if security_group_ids:\n init_params['security_group_ids'] = security_group_ids\n\n if 'InputDataConfig' in job_details and model_channel_name:\n for channel in job_details['InputDataConfig']:\n if channel['ChannelName'] == model_channel_name:\n init_params['model_channel_name'] = model_channel_name\n init_params['model_uri'] = channel['DataSource']['S3DataSource']['S3Uri']\n break\n\n return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be downloaded.\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"} {"code": "def sync_l(self, option: str = 'all') -> None:\n \n if option in ['system', 'vendor', 'oem', 'data', 'all']:\n self._execute('-s', self.device_sn, 'sync', '-l', option)\n else:\n raise ValueError('There is no option named: {!r}.'.format(option))", "docstring": "List but don't copy.\n\nArgs:\noption: 'system', 'vendor', 'oem', 'data', 'all'", "source": "juraj-google-style"} {"code": "def parse_geometry(ml_log, log=None, ml_version='2016.12', print_output=False):\n \n \n aabb = {}\n geometry = {'aabb':aabb}\n with open(ml_log) as fread:\n for line in fread:\n if 'Mesh Bounding Box min' in line: \n geometry['aabb']['min'] = (line.split()[4:7])\n geometry['aabb']['min'] = [util.to_float(val) for val in geometry['aabb']['min']]\n if 'Mesh Bounding Box max' in line: \n geometry['aabb']['max'] = (line.split()[4:7])\n geometry['aabb']['max'] = [util.to_float(val) for val in geometry['aabb']['max']]\n if 'Mesh Bounding Box Size' in line: \n geometry['aabb']['size'] = (line.split()[4:7])\n geometry['aabb']['size'] = [util.to_float(val) for val in geometry['aabb']['size']]\n if 'Mesh Bounding Box Diag' in line: \n geometry['aabb']['diagonal'] = util.to_float(line.split()[4])\n if 'Mesh Volume' in line:\n geometry['volume_mm3'] = util.to_float(line.split()[3])\n geometry['volume_cm3'] = geometry['volume_mm3'] * 0.001\n if 'Mesh Surface' in line:\n if ml_version == '1.3.4BETA':\n geometry['area_mm2'] = util.to_float(line.split()[3])\n else:\n geometry['area_mm2'] = util.to_float(line.split()[4])\n geometry['area_cm2'] = geometry['area_mm2'] * 0.01\n if 'Mesh Total Len of' in line:\n if 'including faux edges' in line:\n geometry['total_edge_length_incl_faux'] = util.to_float(\n line.split()[7])\n else:\n geometry['total_edge_length'] = util.to_float(\n line.split()[7])\n if 'Thin shell barycenter' in line:\n geometry['barycenter'] = (line.split()[3:6])\n geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']]\n if 'Thin shell (faces) barycenter' in line: \n geometry['barycenter'] = (line.split()[4:7])\n geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']]\n if 'Vertices barycenter' in line: \n geometry['vert_barycenter'] = (line.split()[2:5])\n geometry['vert_barycenter'] = [util.to_float(val) for val in geometry['vert_barycenter']]\n if 'Center of Mass' in line:\n geometry['center_of_mass'] = (line.split()[4:7])\n geometry['center_of_mass'] = [util.to_float(val) for val in geometry['center_of_mass']]\n if 'Inertia Tensor' in line:\n geometry['inertia_tensor'] = []\n for val in range(3):\n row = (next(fread, val).split()[1:4])\n row = [util.to_float(b) for b in row]\n geometry['inertia_tensor'].append(row)\n if 'Principal axes' in line:\n geometry['principal_axes'] = []\n for val in range(3):\n row = (next(fread, val).split()[1:4])\n row = [util.to_float(b) for b in row]\n geometry['principal_axes'].append(row)\n if 'axis momenta' in line:\n geometry['axis_momenta'] = (next(fread).split()[1:4])\n geometry['axis_momenta'] = [util.to_float(val) for val in geometry['axis_momenta']]\n break \n for key, value in geometry.items():\n if log is not None:\n log_file = open(log, 'a')\n log_file.write('{:27} = {}\\n'.format(key, value))\n log_file.close()\n elif print_output:\n print('{:27} = {}'.format(key, value))\n return geometry", "docstring": "Parse the ml_log file generated by the measure_geometry function.\n\nWarnings: Not all keys may exist if mesh is not watertight or manifold\n\nArgs:\nml_log (str): MeshLab log file to parse\nlog (str): filename to log output", "source": "juraj-google-style"} {"code": "def DeregisterAttributeContainer(cls, attribute_container_class):\n \n container_type = attribute_container_class.CONTAINER_TYPE.lower()\n if container_type not in cls._attribute_container_classes:\n raise KeyError(\n 'Attribute container class not set for container type: '\n '{0:s}.'.format(attribute_container_class.CONTAINER_TYPE))\n\n del cls._attribute_container_classes[container_type]", "docstring": "Deregisters an attribute container class.\n\nThe attribute container classes are identified based on their lower case\ncontainer type.\n\nArgs:\nattribute_container_class (type): attribute container class.\n\nRaises:\nKeyError: if attribute container class is not set for\nthe corresponding container type.", "source": "juraj-google-style"} {"code": "def convert_tokens_to_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]:\n if tokens is None:\n return None\n if isinstance(tokens, str):\n return self._convert_token_to_id_with_added_voc(tokens)\n ids = []\n for token in tokens:\n ids.append(self._convert_token_to_id_with_added_voc(token))\n return ids", "docstring": "Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the\nvocabulary.\n\nArgs:\ntokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).\n\nReturns:\n`int` or `List[int]`: The token id or list of token ids.", "source": "github-repos"} {"code": "def _prefix_from_prefix_string(cls, prefixlen_str):\n \n \n \n if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):\n cls._report_invalid_netmask(prefixlen_str)\n try:\n prefixlen = int(prefixlen_str)\n except ValueError:\n cls._report_invalid_netmask(prefixlen_str)\n if not (0 <= prefixlen <= cls._max_prefixlen):\n cls._report_invalid_netmask(prefixlen_str)\n return prefixlen", "docstring": "Return prefix length from a numeric string\n\nArgs:\nprefixlen_str: The string to be converted\n\nReturns:\nAn integer, the prefix length.\n\nRaises:\nNetmaskValueError: If the input is not a valid netmask", "source": "juraj-google-style"} {"code": "def pretty_str(something, indent=0):\n if isinstance(something, CodeEntity):\n return something.pretty_str(indent=indent)\n else:\n return ((' ' * indent) + repr(something))", "docstring": "Return a human-readable string representation of an object.\n\nUses `pretty_str` if the given value is an instance of\n`CodeEntity` and `repr` otherwise.\n\nArgs:\nsomething: Some value to convert.\n\nKwargs:\nindent (int): The amount of spaces to use as indentation.", "source": "codesearchnet"} {"code": "def match_validator(expression):\n \n if isinstance(expression, str):\n compiled = re.compile(expression)\n elif hasattr(expression, 'match'):\n \n compiled = expression\n else:\n raise TypeError(\n 'Provided match is nor a string nor has a match method '\n '(like re expressions)'\n )\n\n def validator(value):\n if not compiled.match(value):\n \n raise ValidationError(\n \"{} does not match pattern: {}\".format(\n value,\n compiled.pattern\n if hasattr(compiled, 'pattern')\n else compiled\n )\n )\n\n return validator", "docstring": "Return validator function that will check if matches given expression.\n\nArgs:\nmatch: if string then this will be converted to regular expression\nusing ``re.compile``. Can be also any object that has ``match()``\nmethod like already compiled regular regular expression or custom\nmatching object/class.", "source": "juraj-google-style"} {"code": "def scored_to_phenotype(self, phenotypes):\n\n def _apply_score(scored_calls, phenotypes):\n present = sorted(list((set(phenotypes) & set(scored_calls.keys()))))\n total = sum([scored_calls[x] for x in present])\n if (total > 1):\n raise ValueError('You cant extract phenotypes from scores if they are not mutually exclusive')\n if (total == 0):\n return np.nan\n for label in present:\n if (scored_calls[label] == 1):\n return label\n raise ValueError('Should have hit an exit criteria already')\n output = self.copy()\n output['phenotype_label'] = output.apply((lambda x: _apply_score(x['scored_calls'], phenotypes)), 1)\n output['phenotype_calls'] = output.apply((lambda x: dict([(y, (1 if (x['phenotype_label'] == y) else 0)) for y in phenotypes])), 1)\n return output", "docstring": "Convert binary pehnotypes to mutually exclusive phenotypes.\nIf none of the phenotypes are set, then phenotype_label becomes nan\nIf any of the phenotypes are multiply set then it throws a fatal error.\n\nArgs:\nphenotypes (list): a list of scored_names to convert to phenotypes\n\nReturns:\nCellDataFrame", "source": "codesearchnet"} {"code": "def resolve_mode(self, name):\n if (name not in settings.CODEMIRROR_MODES):\n msg = \"Given config name '{}' does not exists in 'settings.CODEMIRROR_MODES'.\"\n raise UnknowModeError(msg.format(name))\n return settings.CODEMIRROR_MODES.get(name)", "docstring": "From given mode name, return mode file path from\n``settings.CODEMIRROR_MODES`` map.\n\nArguments:\nname (string): Mode name.\n\nRaises:\nKeyError: When given name does not exist in\n``settings.CODEMIRROR_MODES``.\n\nReturns:\nstring: Mode file path.", "source": "codesearchnet"} {"code": "def aggregate(self, index):\n if isinstance(index, string_types):\n col_df_grouped = self.col_df.groupby(self.df[index])\n else:\n self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])\n col_df_grouped = self.col_df.groupby(level=index)\n self.col_df.index = self.df.index\n self.reduced_df = pd.DataFrame({colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions})\n reduced_dfs = []\n for cf in self.column_functions:\n reduced_dfs.append(cf.apply_and_name(self))\n return pd.concat(reduced_dfs, axis=1)", "docstring": "Performs a groupby of the unique Columns by index, as constructed from self.df.\n\nArgs:\nindex (str, or pd.Index): Index or column name of self.df.\n\nReturns:\npd.DataFrame: A dataframe, aggregated by index, that contains the result\nof the various ColumnFunctions, and named accordingly.", "source": "codesearchnet"} {"code": "def set_uid(self, uid, schema=None):\n \n try:\n uid, schema = author_id_normalize_and_schema(uid, schema)\n except UnknownUIDSchema:\n \n \n \n pass\n\n self._ensure_field('ids', [])\n self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]\n self._add_uid(uid, schema)", "docstring": "Set a unique ID.\n\nIf a UID of a given schema already exists in a record it will\nbe overwritten, otherwise it will be appended to the record.\n\nArgs:\nuid (string): unique identifier.\nschema (Optional[string]): schema of the unique identifier. If\n``None``, the schema will be guessed based on the shape of\n``uid``.\n\nRaises:\nSchemaUIDConflict: it UID and schema are not matching", "source": "juraj-google-style"} {"code": "def text_filepaths_for_task(self, tmp_dir, task_id):\n assert (task_id >= 0)\n assert (task_id < (self.num_train_shards + self.num_dev_shards))\n if (task_id < self.num_train_shards):\n return [f for (i, f) in enumerate(self.train_text_filepaths(tmp_dir)) if ((i % self.num_train_shards) == task_id)]\n else:\n return [f for (i, f) in enumerate(self.dev_text_filepaths(tmp_dir)) if ((i % self.num_dev_shards) == (task_id - self.num_train_shards))]", "docstring": "List of input filepaths for a particular training or dev shard.\n\nArgs:\ntmp_dir: a string\ntask_id: an integer less than self.num_shards\nReturns:\na list of tuples (filepath, start_pos, num_bytes)", "source": "codesearchnet"} {"code": "def __response_message_descriptor(self, message_type, method_id):\n descriptor = {'200': {'description': 'A successful response'}}\n if (message_type != message_types.VoidMessage()):\n self.__parser.add_message(message_type.__class__)\n self.__response_schema[method_id] = self.__parser.ref_for_message_type(message_type.__class__)\n descriptor['200']['schema'] = {'$ref': '\n return dict(descriptor)", "docstring": "Describes the response.\n\nArgs:\nmessage_type: messages.Message class, The message to describe.\nmethod_id: string, Unique method identifier (e.g. 'myapi.items.method')\n\nReturns:\nDictionary describing the response.", "source": "codesearchnet"} {"code": "def __init__(self, feature_dict, length=None, **kwargs):\n \n \n \n \n self._length = length\n super(SequenceDict, self).__init__(feature_dict, **kwargs)", "docstring": "Construct a sequence dict.\n\nArgs:\nfeature_dict: `dict`, the features to wrap\nlength: `int`, length of the sequence if static and known in advance\n**kwargs: `dict`, constructor kwargs of `tfds.features.FeaturesDict`", "source": "juraj-google-style"} {"code": "def to_json_string(self, use_diff: bool=True) -> str:\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nArgs:\nuse_diff (`bool`, *optional*, defaults to `True`):\nIf set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\nis serialized to JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this configuration instance in JSON format.", "source": "github-repos"} {"code": "def save_pickle(obj, outfile, protocol=2):\n \n with open(outfile, 'wb') as f:\n pickle.dump(obj, f, protocol=protocol)\n\n return outfile", "docstring": "Save the object as a pickle file\n\nArgs:\noutfile (str): Filename\nprotocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2\n\nReturns:\nstr: Path to pickle file", "source": "juraj-google-style"} {"code": "def create_s3_event(app_name, env, region, bucket, triggers):\n session = boto3.Session(profile_name=env, region_name=region)\n s3_client = session.client('s3')\n lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)\n LOG.debug('Lambda ARN for lambda function %s is %s.', app_name, lambda_alias_arn)\n LOG.debug('Creating S3 events for bucket %s', bucket)\n principal = 's3.amazonaws.com'\n statement_id = '{}_s3_{}'.format(app_name, bucket).replace('.', '')\n source_arn = 'arn:aws:s3:::{}'.format(bucket)\n add_lambda_permissions(function=lambda_alias_arn, env=env, region=region, principal=principal, statement_id=statement_id, source_arn=source_arn)\n template_kwargs = {'lambda_arn': lambda_alias_arn, 'triggers': triggers}\n config = get_template(template_file='infrastructure/lambda/s3_event.json.j2', **template_kwargs)\n s3_client.put_bucket_notification_configuration(Bucket=bucket, NotificationConfiguration=json.loads(config))\n LOG.info('Created lambda %s S3 event on bucket %s', app_name, bucket)", "docstring": "Create S3 lambda events from triggers\n\nArgs:\napp_name (str): name of the lambda function\nenv (str): Environment/Account for lambda function\nregion (str): AWS region of the lambda function\ntriggers (list): List of triggers from the settings", "source": "codesearchnet"} {"code": "def wait(self, duration=None, count=0):\n \n start = time.time()\n total = 0\n while True:\n type, result = self._recv(MSG, PING, OK)\n if type is MSG:\n total += 1\n if self._handle_msg(result) is False:\n break\n\n if count and total >= count:\n break\n\n elif type is PING:\n self._handle_ping()\n\n if duration and time.time() - start > duration:\n break", "docstring": "Publish publishes the data argument to the given subject.\n\nArgs:\nduration (float): will wait for the given number of seconds\ncount (count): stop of wait after n messages from any subject", "source": "juraj-google-style"} {"code": "def operate_multi(self, points):\n \n points = np.array(points)\n affine_points = np.concatenate(\n [points, np.ones(points.shape[:-1] + (1,))], axis=-1)\n return np.inner(affine_points, self.affine_matrix)[..., :-1]", "docstring": "Apply the operation on a list of points.\n\nArgs:\npoints: List of Cartesian coordinates\n\nReturns:\nNumpy array of coordinates after operation", "source": "juraj-google-style"} {"code": "def ip(self, value):\n \n if value == self._defaults['ai.location.ip'] and 'ai.location.ip' in self._values:\n del self._values['ai.location.ip']\n else:\n self._values['ai.location.ip'] = value", "docstring": "The ip property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def _make_parser_func(sep):\n \n\n def parser_func(\n filepath_or_buffer,\n sep=sep,\n delimiter=None,\n header=\"infer\",\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n iterator=False,\n chunksize=None,\n compression=\"infer\",\n thousands=None,\n decimal=b\".\",\n lineterminator=None,\n quotechar='\"',\n quoting=0,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=None,\n error_bad_lines=True,\n warn_bad_lines=True,\n skipfooter=0,\n doublequote=True,\n delim_whitespace=False,\n low_memory=True,\n memory_map=False,\n float_precision=None,\n ):\n _, _, _, kwargs = inspect.getargvalues(inspect.currentframe())\n if not kwargs.get(\"sep\", sep):\n kwargs[\"sep\"] = \"\\t\"\n return _read(**kwargs)\n\n return parser_func", "docstring": "Creates a parser function from the given sep.\n\nArgs:\nsep: The separator default to use for the parser.\n\nReturns:\nA function object.", "source": "juraj-google-style"} {"code": "def from_raw(self, robj: RawObject) -> RootNode:\n cooked = self.schema.from_raw(robj)\n return RootNode(cooked, self.schema, cooked.timestamp)", "docstring": "Create an instance node from a raw data tree.\n\nArgs:\nrobj: Dictionary representing a raw data tree.\n\nReturns:\nRoot instance node.", "source": "codesearchnet"} {"code": "def _generate(cls, strategy, params):\n if cls._meta.abstract:\n raise errors.FactoryError(('Cannot generate instances of abstract factory %(f)s; Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract is either not set or False.' % dict(f=cls.__name__)))\n step = builder.StepBuilder(cls._meta, params, strategy)\n return step.build()", "docstring": "generate the object.\n\nArgs:\nparams (dict): attributes to use for generating the object\nstrategy: the strategy to use", "source": "codesearchnet"} {"code": "def device_ids(self):\n if self._device_ids is None:\n with ops.init_scope():\n device_ids_list = []\n for index, device in enumerate(self.components):\n with ops.device(device):\n device_ids_list.append(array_ops.identity(constant_op.constant(index)))\n self._device_ids = self.pack(device_ids_list)\n return self._device_ids", "docstring": "A parallel tensor with scalar integers numbering component devices.\n\nEach device ID is placed on its corresponding device, in the same order as\nthe `components` constructor argument.\n\nReturns:\nA parallel tensor containing 0 on the first device, 1 on the second, etc.", "source": "github-repos"} {"code": "def dft_task(cls, mol, xc=\"b3lyp\", **kwargs):\n \n t = NwTask.from_molecule(mol, theory=\"dft\", **kwargs)\n t.theory_directives.update({\"xc\": xc,\n \"mult\": t.spin_multiplicity})\n return t", "docstring": "A class method for quickly creating DFT tasks with optional\ncosmo parameter .\n\nArgs:\nmol: Input molecule\nxc: Exchange correlation to use.\n\\\\*\\\\*kwargs: Any of the other kwargs supported by NwTask. Note the\ntheory is always \"dft\" for a dft task.", "source": "juraj-google-style"} {"code": "def from_class(cls, target_class):\n \n module_name = target_class.__module__\n class_name = target_class.__name__\n return cls(module_name, \"__init__\", class_name)", "docstring": "Create a FunctionDescriptor from a class.\n\nArgs:\ncls: Current class which is required argument for classmethod.\ntarget_class: the python class used to create the function\ndescriptor.\n\nReturns:\nThe FunctionDescriptor instance created according to the class.", "source": "juraj-google-style"} {"code": "def __init__(self, app, env, region, prop_path, artifact_path, artifact_version, primary_region='us-east-1'):\n \n self.app_name = app\n self.env = env\n self.region = region\n self.artifact_path = artifact_path\n self.version = artifact_version\n self.properties = get_properties(prop_path, env=self.env, region=self.region)\n self.s3props = self.properties['s3']\n generated = get_details(app=app, env=env, region=region)\n\n include_region = True\n if self.region == primary_region:\n include_region = False\n if self.s3props.get('shared_bucket_master'):\n self.bucket = generated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n elif self.s3props.get('shared_bucket_target'):\n shared_app = self.s3props['shared_bucket_target']\n newgenerated = get_details(app=shared_app, env=env, region=region)\n self.bucket = newgenerated.shared_s3_app_bucket(include_region=include_region)\n self.s3path = app\n else:\n self.bucket = generated.s3_app_bucket(include_region=include_region)\n self.s3path = self.s3props['path'].lstrip('/')\n\n self.s3_version_uri = ''\n self.s3_latest_uri = ''\n self.setup_pathing()", "docstring": "S3 deployment object.\n\nArgs:\napp (str): Application name\nenv (str): Environment/Account\nregion (str): AWS Region\nprop_path (str): Path of environment property file\nartifact_path (str): Path to tar.gz artifact\nprimary_region (str): The primary region for the application.", "source": "juraj-google-style"} {"code": "def _get_css_files(cls, extra_files):\n packager = Packager()\n css_packages = getattr(cls, 'css_packages', {})\n return dict(((media_target, cls._get_media_files(packager=packager, media_packages=media_packages, media_type='css', extra_files=extra_files.get(media_target, []))) for (media_target, media_packages) in six.iteritems(css_packages)))", "docstring": "Return all CSS files from the Media class.\n\nArgs:\nextra_files (dict):\nThe contents of the Media class's original :py:attr:`css`\nattribute, if one was provided.\n\nReturns:\ndict:\nThe CSS media types and files to return for the :py:attr:`css`\nattribute.", "source": "codesearchnet"} {"code": "def populate_development(version):\n with open(DEVELOPMENT_TEMPLATE, 'r') as file_obj:\n template = file_obj.read()\n contents = template.format(revision=version, rtd_version=version)\n with open(DEVELOPMENT_FILE, 'w') as file_obj:\n file_obj.write(contents)", "docstring": "Populates ``DEVELOPMENT.rst`` with release-specific data.\n\nThis is because ``DEVELOPMENT.rst`` is used in the Sphinx documentation.\n\nArgs:\nversion (str): The current version.", "source": "codesearchnet"} {"code": "def parse_cartouche_text(lines):\n indent_lines = unindent(lines)\n indent_lines = pad_blank_lines(indent_lines)\n indent_lines = first_paragraph_indent(indent_lines)\n indent_paragraphs = gather_lines(indent_lines)\n parse_tree = group_paragraphs(indent_paragraphs)\n syntax_tree = extract_structure(parse_tree)\n result = syntax_tree.render_rst()\n ensure_terminal_blank(result)\n return result", "docstring": "Parse text in cartouche format and return a reStructuredText equivalent\n\nArgs:\nlines: A sequence of strings representing the lines of a single\ndocstring as read from the source by Sphinx. This string should be\nin a format that can be parsed by cartouche.\n\nReturns:\nA list of lines containing the transformed docstring as\nreStructuredText as produced by cartouche.\n\nRaises:\nRuntimeError: If the docstring cannot be parsed.", "source": "codesearchnet"} {"code": "def sampler(sample_function: Callable) -> Callable:\n\n def generate_sampler(continuous_pulse: Callable) -> Callable:\n 'Return a decorated sampler function.'\n\n @functools.wraps(continuous_pulse)\n def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:\n 'Replace the call to the continuous function with a call to the sampler applied\\n to the anlytic pulse function.'\n sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)\n return np.asarray(sampled_pulse, dtype=np.complex_)\n call_sampler = _update_annotations(call_sampler)\n call_sampler = _update_docstring(call_sampler, sample_function)\n call_sampler.__dict__.pop('__wrapped__')\n return commands.functional_pulse(call_sampler)\n return generate_sampler", "docstring": "Sampler decorator base method.\n\nSamplers are used for converting an continuous function to a discretized pulse.\n\nThey operate on a function with the signature:\n`def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`\nWhere `times` is a numpy array of floats with length n_times and the output array\nis a complex numpy array with length n_times. The output of the decorator is an\ninstance of `FunctionalPulse` with signature:\n`def g(duration: int, *args, **kwargs) -> SamplePulse`\n\nNote if your continuous pulse function outputs a `complex` scalar rather than a\n`np.ndarray`, you should first vectorize it before applying a sampler.\n\n\nThis class implements the sampler boilerplate for the sampler.\n\nArgs:\nsample_function: A sampler function to be decorated.", "source": "codesearchnet"} {"code": "def get_object(self, file_path):\n \n file_path = make_string_path(file_path)\n file_path = self.absnormpath(self._original_path(file_path))\n return self.get_object_from_normpath(file_path)", "docstring": "Search for the specified filesystem object within the fake\nfilesystem.\n\nArgs:\nfile_path: Specifies the target FakeFile object to retrieve.\n\nReturns:\nThe FakeFile object corresponding to `file_path`.\n\nRaises:\nIOError: if the object is not found.", "source": "juraj-google-style"} {"code": "def concat(self, array_like):\n arr = list(array_like)\n if (len(set([x.microns_per_pixel for x in arr])) != 1):\n raise ValueError('Multiple microns per pixel set')\n cdf = CellDataFrame(pd.concat([pd.DataFrame(x) for x in arr]))\n cdf.microns_per_pixel = arr[0].microns_per_pixel\n return cdf", "docstring": "Concatonate multiple CellDataFrames\n\nthrows an error if the microns_per_pixel is not uniform across the frames\n\nArgs:\narray_like (list): a list of CellDataFrames with 1 or more CellDataFrames\n\nReturns:\nCellDataFrame", "source": "codesearchnet"} {"code": "def s_url(self, path, method=None, type_cast=None):\n if (not type_cast):\n type_cast = {}\n\n def decorator(function):\n self.s_add(path, function, method, type_cast)\n return function\n return decorator", "docstring": "Decorator for registering a simple path.\n\nArgs:\npath (str): Path to be matched.\nmethod (str, optional): Usually used to define one of GET, POST,\nPUT, DELETE. You may use whatever fits your situation though.\nDefaults to None.\ntype_cast (dict, optional): Mapping between the param name and\none of `int`, `float` or `bool`. The value reflected by the\nprovided param name will than be casted to the given type.\nDefaults to None.", "source": "codesearchnet"} {"code": "def mode_group(self):\n hmodegroup = self._libinput.libinput_event_tablet_pad_get_mode_group(self._handle)\n return TabletPadModeGroup(hmodegroup, self._libinput)", "docstring": "The mode group that the button, ring, or strip that\ntriggered this event is considered in.\n\nThe mode is a virtual grouping of functionality, usually based on some\nvisual feedback like LEDs on the pad. See `Tablet pad modes`_\nfor details.\n\nReturns:\n~libinput.define.TabletPadModeGroup: The mode group of the button,\nring or strip that caused this event.", "source": "codesearchnet"} {"code": "def trees_by_subpath(self, sub_path):\n \n matches = (\n self.path_db[tree_path].keys()\n for tree_path in self.path_db.iterkeys()\n if tree_path.startswith(sub_path)\n )\n\n return set(sum(matches, []))", "docstring": "Search trees by `sub_path` using ``Tree.path.startswith(sub_path)``\ncomparison.\n\nArgs:\nsub_path (str): Part of the :attr:`.Tree.path` property of\n:class:`.Tree`.\n\nReturns:\nset: Set of matching :class:`Tree` instances.", "source": "juraj-google-style"} {"code": "def random_get_instance() -> tcod.random.Random:\n return tcod.random.Random._new_from_cdata(ffi.cast('mersenne_data_t*', lib.TCOD_random_get_instance()))", "docstring": "Return the default Random instance.\n\nReturns:\nRandom: A Random instance using the default random number generator.", "source": "codesearchnet"} {"code": "def crop(self, extent, copy=False):\n \n try:\n if extent[0] is None:\n extent = (self.start.z, extent[1])\n if extent[1] is None:\n extent = (extent[0], self.stop.z)\n except:\n m = \"You must provide a 2-tuple for the new extents. Use None for\"\n m += \" the existing start or stop.\"\n raise StriplogError(m)\n\n first_ix = self.read_at(extent[0], index=True)\n last_ix = self.read_at(extent[1], index=True)\n\n first = self[first_ix].split_at(extent[0])[1]\n last = self[last_ix].split_at(extent[1])[0]\n\n new_list = self.__list[first_ix:last_ix+1].copy()\n new_list[0] = first\n new_list[-1] = last\n\n if copy:\n return Striplog(new_list)\n else:\n self.__list = new_list\n return", "docstring": "Crop to a new depth range.\n\nArgs:\nextent (tuple): The new start and stop depth. Must be 'inside'\nexisting striplog.\ncopy (bool): Whether to operate in place or make a copy.\n\nReturns:\nOperates in place by deault; if copy is True, returns a striplog.", "source": "juraj-google-style"} {"code": "def delete(self, remove_tombstone=True):\n\n\t\t\n\n\t\tresponse = self.repo.api.http_request('DELETE', self.uri)\n\n\t\t\n\t\tif response.status_code == 204:\n\t\t\t\n\t\t\tself._empty_resource_attributes()\n\n\t\tif remove_tombstone:\n\t\t\tself.repo.api.http_request('DELETE', '%s/fcr:tombstone' % self.uri)\n\n\t\treturn True", "docstring": "Method to delete resources.\n\nArgs:\nremove_tombstone (bool): If True, will remove tombstone at uri/fcr:tombstone when removing resource.\n\nReturns:\n(bool)", "source": "juraj-google-style"} {"code": "def _convert_single_op_hint_to_stub(call, graph_def, function_def_nodes=None, is_last_run=True):\n if function_def_nodes is None:\n function_def_nodes = set()\n name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(graph_def)\n input_names, output_names = call.flattened_inputs_and_outputs()\n reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)\n reachable_by_output = _bfs_for_reachable_nodes(output_names, name_to_input_name)\n output_nodes_set = set(output_names)\n nodes_after_fuse = []\n nodes_deleted_by_fuse = set()\n for node in graph_def.node:\n n = _tensor_name_base(node.name)\n if n in reachable_by_output:\n if n not in reachable_by_input and n not in output_nodes_set:\n nodes_deleted_by_fuse.add(n)\n elif n not in reachable_by_input and n not in function_def_nodes:\n nodes_after_fuse.append(n)\n elif not is_last_run:\n nodes_after_fuse.append(n)\n out = _graph_pb2.GraphDef()\n reachable_by_input_sorted = sorted(list(reachable_by_input), key=lambda n: name_to_seq_num[n])\n for node in reachable_by_input_sorted:\n out.node.extend([_copy.deepcopy(name_to_node[node])])\n sorted_input_indices = list(call.inputs.keys())\n sorted_input_indices.sort()\n sorted_output_indices = list(call.outputs.keys())\n sorted_output_indices.sort()\n new_node = _node_def_pb2.NodeDef()\n optional_input_node = _node_def_pb2.NodeDef()\n optional_input_node.name = 'Const' + str(_uuid.uuid1().hex)\n optional_input_node.op = 'Const'\n optional_input_node.attr['dtype'].CopyFrom(_attr_value_pb2.AttrValue(type=_dtypes.float32.as_datatype_enum))\n optional_input_node.attr['value'].CopyFrom(_attr_value_pb2.AttrValue(tensor=_tensor_util.make_tensor_proto([-1], _dtypes.float32, [1])))\n out.node.extend([optional_input_node])\n max_index = max(sorted_input_indices) + 1\n for cur_index in range(max_index):\n if cur_index in sorted_input_indices:\n inputs = call.inputs[cur_index]\n input_name = inputs.aggregate_and_return_name_for_input(out)\n new_node.input.append(input_name)\n else:\n new_node.input.append(optional_input_node.name)\n new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices)\n new_node.op = call.function_name\n new_node.name = call.uuid\n out.node.extend([new_node])\n output_dtypes = []\n max_output_index = max(sorted_output_indices) + 1\n for cur_index in range(max_output_index):\n if cur_index in sorted_output_indices:\n output = call.outputs[cur_index]\n output_dtype = output.aggregate_and_return_name_for_output(new_node.name, cur_index, out)\n else:\n output_dtype = optional_input_node.attr['type'].i\n output_dtypes.append(output_dtype)\n new_node.attr['_output_types'].list.type[:] = output_dtypes\n new_node.attr['_output_quantized'].b = False\n for n in nodes_after_fuse:\n should_keep = True\n for input_name in name_to_input_name[n]:\n if input_name in nodes_deleted_by_fuse:\n should_keep = False\n if should_keep:\n out.node.extend([_copy.deepcopy(name_to_node[n])])\n out.library.CopyFrom(graph_def.library)\n out.versions.CopyFrom(graph_def.versions)\n return out", "docstring": "Given a graph_def, converts `call` into a stub and returns a new graph_def.\n\nArgs:\ncall: A single function call to be converted.\ngraph_def: A graph_def to use as input (that has call obviously).\nfunction_def_nodes: Nodes inside the function def those are not connected to\nthe graph.\nis_last_run: Whether it is the last run for a given pass (for OpHint has\nchildren).\n\nReturns:\nA new transformed graph-def that has call as a stub (single op).\n\nNote: after this process, the graph_def can no longer be loaded into\nthe tensorflow runtime, so all future manipulations are done in graph_def\nlevel.", "source": "github-repos"} {"code": "def _usage_id_from_node(self, node, parent_id, id_generator=None):\n \n if id_generator is not None:\n warnings.warn(\n \"Passing an id_generator directly is deprecated \"\n \"in favor of constructing the Runtime with the id_generator\",\n DeprecationWarning,\n stacklevel=3,\n )\n\n id_generator = id_generator or self.id_generator\n\n block_type = node.tag\n \n node.attrib.pop('xblock-family', None)\n \n def_id = id_generator.create_definition(block_type)\n usage_id = id_generator.create_usage(def_id)\n keys = ScopeIds(None, block_type, def_id, usage_id)\n block_class = self.mixologist.mix(self.load_block_type(block_type))\n \n aside_children = []\n for child in node.iterchildren():\n \n xblock_family = child.attrib.pop('xblock-family', None)\n if xblock_family:\n xblock_family = self._family_id_to_superclass(xblock_family)\n if issubclass(xblock_family, XBlockAside):\n aside_children.append(child)\n \n for child in aside_children:\n self._aside_from_xml(child, def_id, usage_id, id_generator)\n node.remove(child)\n block = block_class.parse_xml(node, self, keys, id_generator)\n block.parent = parent_id\n block.save()\n return usage_id", "docstring": "Create a new usage id from an XML dom node.\n\nArgs:\nnode (lxml.etree.Element): The DOM node to interpret.\nparent_id: The usage ID of the parent block\nid_generator (IdGenerator): The :class:`.IdGenerator` to use\nfor creating ids", "source": "juraj-google-style"} {"code": "def _clone_helper(op_to_clone, variant_tensor_ops):\n remap_dict = {}\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in variant_tensor_ops:\n recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n remap_dict.update(recursive_map)\n inputs_list = []\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in remap_dict:\n remapped_input = remap_dict[input_tensor_op].outputs[0]\n inputs_list.append(remapped_input)\n else:\n inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n g = ops.get_default_graph()\n new_op = g.create_op(op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone))\n remap_dict[op_to_clone] = new_op\n return remap_dict", "docstring": "Helper method that recursively clones `op_to_clone`.\n\nArgs:\nop_to_clone: The op we want to clone.\nvariant_tensor_ops: A list of ops that we have to clone along the way.\n\nReturns:\nA dictionary mapping old_ops to new_ops created. Includes op_to_clone\nas a key.", "source": "github-repos"} {"code": "def create_snapshot(self, volume_id_or_uri, snapshot, timeout=-1):\n \n uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n\n return self._client.create(snapshot, uri=uri, timeout=timeout, default_values=self.DEFAULT_VALUES_SNAPSHOT)", "docstring": "Creates a snapshot for the specified volume.\n\nArgs:\nvolume_id_or_uri:\nCan be either the volume ID or the volume URI.\nsnapshot (dict):\nObject to create.\ntimeout:\nTimeout in seconds. Wait for task completion by default. The timeout does not abort the operation in\nOneView, just stops waiting for its completion.\n\nReturns:\ndict: Storage volume.", "source": "juraj-google-style"} {"code": "def get_all_profiles(store='local'):\n \n return {\n 'Domain Profile': get_all_settings(profile='domain', store=store),\n 'Private Profile': get_all_settings(profile='private', store=store),\n 'Public Profile': get_all_settings(profile='public', store=store)\n }", "docstring": "Gets all properties for all profiles in the specified store\n\nArgs:\n\nstore (str):\nThe store to use. This is either the local firewall policy or the\npolicy defined by local group policy. Valid options are:\n\n- lgpo\n- local\n\nDefault is ``local``\n\nReturns:\ndict: A dictionary containing the specified settings for each profile", "source": "juraj-google-style"} {"code": "def _plot(self, axes_list):\n \n\n axes = axes_list[0]\n\n if self.plot_settings:\n axes.imshow(self.data['image_data'], cmap=self.plot_settings['cmap'], interpolation=self.plot_settings['interpol'], extent=self.data['extent'])\n axes.set_xlabel(self.plot_settings['xlabel'])\n axes.set_ylabel(self.plot_settings['ylabel'])\n axes.set_title(self.plot_settings['title'])\n\n self._update(axes_list)", "docstring": "Plots a dot on top of each selected NV, with a corresponding number denoting the order in which the NVs are\nlisted.\nPrecondition: must have an existing image in figure_list[0] to plot over\nArgs:\nfigure_list:", "source": "juraj-google-style"} {"code": "def is_enrolled(self, username, course_run_id):\n enrollment = self.get_course_enrollment(username, course_run_id)\n return ((enrollment is not None) and enrollment.get('is_active', False))", "docstring": "Query the enrollment API and determine if a learner is enrolled in a course run.\n\nArgs:\nusername (str): The username by which the user goes on the OpenEdX platform\ncourse_run_id (str): The string value of the course's unique identifier\n\nReturns:\nbool: Indicating whether the user is enrolled in the course run. Returns False under any errors.", "source": "codesearchnet"} {"code": "def run(self, data):\n result_type = namedtuple('Result', 'code messages')\n if (self.passes is True):\n result = result_type(Checker.Code.PASSED, '')\n elif (self.passes is False):\n if self.allow_failure:\n result = result_type(Checker.Code.IGNORED, '')\n else:\n result = result_type(Checker.Code.FAILED, '')\n else:\n try:\n result = self.check(data, **self.arguments)\n messages = ''\n if isinstance(result, tuple):\n (result, messages) = result\n if (result not in Checker.Code):\n result = (Checker.Code.PASSED if bool(result) else Checker.Code.FAILED)\n if ((result == Checker.Code.FAILED) and self.allow_failure):\n result = Checker.Code.IGNORED\n result = result_type(result, messages)\n except NotImplementedError:\n result = result_type(Checker.Code.NOT_IMPLEMENTED, '')\n self.result = result", "docstring": "Run the check method and format the result for analysis.\n\nArgs:\ndata (DSM/DMM/MDM): DSM/DMM/MDM instance to check.\n\nReturns:\ntuple (int, str): status constant from Checker class and messages.", "source": "codesearchnet"} {"code": "def from_celery(cls, worker_name, job_dict, celery_app):\n \n if not isinstance(job_dict, dict) or 'id' not in job_dict:\n raise JobStatInvalid('The job description is missing important fields.')\n\n async_result = AsyncResult(id=job_dict['id'], app=celery_app)\n a_info = async_result.info if isinstance(async_result.info, dict) else None\n\n return JobStats(\n name=a_info.get('name', '') if a_info is not None else '',\n job_id=job_dict['id'],\n job_type=a_info.get('type', '') if a_info is not None else '',\n workflow_id=a_info.get('workflow_id', '') if a_info is not None else '',\n queue=a_info.get('queue', '') if a_info is not None else '',\n start_time=a_info.get('start_time', None) if a_info is not None else None,\n arguments=a_info.get('arguments', {}) if a_info is not None else {},\n acknowledged=job_dict['acknowledged'],\n func_name=job_dict['type'],\n hostname=job_dict['hostname'],\n worker_name=worker_name,\n worker_pid=job_dict['worker_pid'],\n routing_key=job_dict['delivery_info']['routing_key']\n )", "docstring": "Create a JobStats object from the dictionary returned by celery.\n\nArgs:\nworker_name (str): The name of the worker this jobs runs on.\njob_dict (dict): The dictionary as returned by celery.\ncelery_app: Reference to a celery application object.\n\nReturns:\nJobStats: A fully initialized JobStats object.", "source": "juraj-google-style"} {"code": "def _input_valid(input_, operation, message, output_condition_uri=None):\n ccffill = input_.fulfillment\n try:\n parsed_ffill = Fulfillment.from_uri(ccffill.serialize_uri())\n except (TypeError, ValueError, ParsingError, ASN1DecodeError, ASN1EncodeError):\n return False\n if (operation == Transaction.CREATE):\n output_valid = True\n else:\n output_valid = (output_condition_uri == ccffill.condition_uri)\n message = sha3_256(message.encode())\n if input_.fulfills:\n message.update('{}{}'.format(input_.fulfills.txid, input_.fulfills.output).encode())\n ffill_valid = parsed_ffill.validate(message=message.digest())\n return (output_valid and ffill_valid)", "docstring": "Validates a single Input against a single Output.\n\nNote:\nIn case of a `CREATE` Transaction, this method\ndoes not validate against `output_condition_uri`.\n\nArgs:\ninput_ (:class:`~bigchaindb.common.transaction.\nInput`) The Input to be signed.\noperation (str): The type of Transaction.\nmessage (str): The fulfillment message.\noutput_condition_uri (str, optional): An Output to check the\nInput against.\n\nReturns:\nbool: If the Input is valid.", "source": "codesearchnet"} {"code": "def submit(self, cmd_string, blocksize, tasks_per_node, job_name=\"parsl\"):\n \n if not self.resources:\n cur_timestamp = str(time.time() * 1000).split(\".\")[0]\n job_name = \"{0}-{1}\".format(job_name, cur_timestamp)\n\n if not self.deployment_name:\n deployment_name = '{}-deployment'.format(job_name)\n else:\n deployment_name = '{}-{}-deployment'.format(self.deployment_name,\n cur_timestamp)\n\n formatted_cmd = template_string.format(command=cmd_string,\n worker_init=self.worker_init)\n\n self.deployment_obj = self._create_deployment_object(job_name,\n self.image,\n deployment_name,\n cmd_string=formatted_cmd,\n replicas=self.init_blocks,\n volumes=self.persistent_volumes)\n logger.debug(\"Deployment name :{}\".format(deployment_name))\n self._create_deployment(self.deployment_obj)\n self.resources[deployment_name] = {'status': 'RUNNING',\n 'pods': self.init_blocks}\n\n return deployment_name", "docstring": "Submit a job\nArgs:\n- cmd_string :(String) - Name of the container to initiate\n- blocksize :(float) - Number of replicas\n- tasks_per_node (int) : command invocations to be launched per node\n\nKwargs:\n- job_name (String): Name for job, must be unique\nReturns:\n- None: At capacity, cannot provision more\n- job_id: (string) Identifier for the job", "source": "juraj-google-style"} {"code": "def memory_usage(self, string=False):\n \n if string:\n n = getsizeof(self)\n return ' '.join((str(s) for s in convert_bytes(n)))\n return self.info()['size']", "docstring": "Get the memory usage estimate of the container.\n\nArgs:\nstring (bool): Human readable string (default false)\n\nSee Also:\n:func:`~exa.core.container.Container.info`", "source": "juraj-google-style"} {"code": "def get_body(name):\n try:\n (body, propag) = _bodies[name.lower()]\n body.propagate = propag.propagate\n except KeyError as e:\n raise UnknownBodyError(e.args[0])\n return body", "docstring": "Retrieve a given body orbits and parameters\n\nArgs:\nname (str): Object name\nReturn:\nBody:", "source": "codesearchnet"} {"code": "def truncate(text, max_len=350, end='...'):\n if (len(text) <= max_len):\n return text\n return (text[:max_len].rsplit(' ', maxsplit=1)[0] + end)", "docstring": "Truncate the supplied text for display.\n\nArguments:\ntext (:py:class:`str`): The text to truncate.\nmax_len (:py:class:`int`, optional): The maximum length of the\ntext before truncation (defaults to 350 characters).\nend (:py:class:`str`, optional): The ending to use to show that\nthe text was truncated (defaults to ``'...'``).\n\nReturns:\n:py:class:`str`: The truncated text.", "source": "codesearchnet"} {"code": "def validate_labels(known_classes, passed_labels, argument_name):\n known_classes = np.array(known_classes)\n passed_labels = np.array(passed_labels)\n (unique_labels, unique_indexes) = np.unique(passed_labels, return_index=True)\n if (len(passed_labels) != len(unique_labels)):\n indexes = np.arange(0, len(passed_labels))\n duplicate_indexes = indexes[(~ np.in1d(indexes, unique_indexes))]\n duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]]\n msg = 'The following duplicate labels were passed into {0}: {1}'.format(argument_name, ', '.join(duplicate_labels))\n raise ValueError(msg)\n passed_labels_absent = (~ np.in1d(passed_labels, known_classes))\n if np.any(passed_labels_absent):\n absent_labels = [str(x) for x in passed_labels[passed_labels_absent]]\n msg = 'The following labels were passed into {0}, but were not found in labels: {1}'.format(argument_name, ', '.join(absent_labels))\n raise ValueError(msg)\n return", "docstring": "Validates the labels passed into the true_labels or pred_labels\narguments in the plot_confusion_matrix function.\n\nRaises a ValueError exception if any of the passed labels are not in the\nset of known classes or if there are duplicate labels. Otherwise returns\nNone.\n\nArgs:\nknown_classes (array-like):\nThe classes that are known to appear in the data.\npassed_labels (array-like):\nThe labels that were passed in through the argument.\nargument_name (str):\nThe name of the argument being validated.\n\nExample:\n>>> known_classes = [\"A\", \"B\", \"C\"]\n>>> passed_labels = [\"A\", \"B\"]\n>>> validate_labels(known_classes, passed_labels, \"true_labels\")", "source": "codesearchnet"} {"code": "def WriteOutput(title, locations, limit, f):\n \n output_prefix = % locals()\n\n output_suffix = % locals()\n\n f.write(transitfeed.EncodeUnicode(output_prefix))\n for source, destination in zip(locations[0:limit], locations[1:limit + 1]):\n f.write(transitfeed.EncodeUnicode(\"
  • %s\\n\" %\n LatLngsToGoogleLink(source, destination)))\n f.write(transitfeed.EncodeUnicode(output_suffix))", "docstring": "Write html to f for up to limit trips between locations.\n\nArgs:\ntitle: String used in html title\nlocations: list of (lat, lng) tuples\nlimit: maximum number of queries in the html\nf: a file object", "source": "juraj-google-style"} {"code": "def _create_candidates(self, n=1000):\n if self.grid:\n return self._candidates_from_grid(n)\n else:\n return self._random_candidates(n)", "docstring": "Generate random hyperparameter vectors\n\nArgs:\nn (int, optional): number of candidates to generate. Defaults to 1000.\n\nReturns:\ncandidates (np.array): Array of candidate hyperparameter vectors with shape\n(n_samples, len(tunables))", "source": "codesearchnet"} {"code": "def _SkipFieldValue(tokenizer):\n if tokenizer.TryConsumeByteString():\n while tokenizer.TryConsumeByteString():\n pass\n return\n if ((not tokenizer.TryConsumeIdentifier()) and (not _TryConsumeInt64(tokenizer)) and (not _TryConsumeUint64(tokenizer)) and (not tokenizer.TryConsumeFloat())):\n raise ParseError(('Invalid field value: ' + tokenizer.token))", "docstring": "Skips over a field value.\n\nArgs:\ntokenizer: A tokenizer to parse the field name and values.\n\nRaises:\nParseError: In case an invalid field value is found.", "source": "codesearchnet"} {"code": "def get_contingency_tables(self):\n return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values])", "docstring": "Create an Array of ContingencyTable objects for each probability threshold.\n\nReturns:\nArray of ContingencyTable objects", "source": "codesearchnet"} {"code": "def vec_to_surface(vec):\n \n miller = [None] * 3\n index = []\n for i, value in enumerate(vec):\n if abs(value) < 1.e-8:\n miller[i] = 0\n else:\n index.append(i)\n if len(index) == 1:\n miller[index[0]] = 1\n else:\n min_index = np.argmin([i for i in vec if i != 0])\n true_index = index[min_index]\n index.pop(min_index)\n frac = []\n for i, value in enumerate(index):\n frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))\n if len(index) == 1:\n miller[true_index] = frac[0].denominator\n miller[index[0]] = frac[0].numerator\n else:\n com_lcm = lcm(frac[0].denominator, frac[1].denominator)\n miller[true_index] = com_lcm\n miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))\n miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))\n return miller", "docstring": "Transform a float vector to a surface miller index with integers.\n\nArgs:\nvec (1 by 3 array float vector): input float vector\nReturn:\nthe surface miller index of the input vector.", "source": "juraj-google-style"} {"code": "def _extract_image_batch(images, num_images, batch_size):\n if len(ops.shape(images)) != 4:\n raise ValueError('`plot_images_gallery()` requires you to batch your `np.array` samples together.')\n num_samples = min(num_images, batch_size)\n sample = images[:num_samples, ...]\n return sample", "docstring": "Extracts a batch of images for plotting.\n\nArgs:\nimages: The 4D tensor or NumPy array of images.\nnum_images: The number of images to extract.\nbatch_size: The original batch size of the images.\n\nReturns:\nA 4D tensor or NumPy array containing the extracted images.\n\nRaises:\nValueError: If `images` is not a 4D tensor/array.", "source": "github-repos"} {"code": "def uid(uid):\n \n if uid is None:\n raise ValueError('UID cannot be None.')\n\n def decorate(test_func):\n @functools.wraps(test_func)\n def wrapper(*args, **kwargs):\n return test_func(*args, **kwargs)\n\n setattr(wrapper, 'uid', uid)\n return wrapper\n\n return decorate", "docstring": "Decorator specifying the unique identifier (UID) of a test case.\n\nThe UID will be recorded in the test's record when executed by Mobly.\n\nIf you use any other decorator for the test method, you may want to use\nthis as the outer-most one.\n\nNote a common UID system is the Universal Unitque Identifier (UUID), but\nwe are not limiting people to use UUID, hence the more generic name `UID`.\n\nArgs:\nuid: string, the uid for the decorated test function.", "source": "juraj-google-style"} {"code": "def aggregate(self, reducer, seed=default, result_selector=identity):\n if self.closed():\n raise ValueError('Attempt to call aggregate() on a closed Queryable.')\n if (not is_callable(reducer)):\n raise TypeError('aggregate() parameter reducer={0} is not callable'.format(repr(reducer)))\n if (not is_callable(result_selector)):\n raise TypeError('aggregate() parameter result_selector={0} is not callable'.format(repr(result_selector)))\n if (seed is default):\n try:\n return result_selector(fold(reducer, self))\n except TypeError as e:\n if ('empty sequence' in str(e)):\n raise ValueError('Cannot aggregate() empty sequence with no seed value')\n return result_selector(fold(reducer, self, seed))", "docstring": "Apply a function over a sequence to produce a single result.\n\nApply a binary function cumulatively to the elements of the source\nsequence so as to reduce the iterable to a single value.\n\nNote: This method uses immediate execution.\n\nArgs:\nreducer: A binary function the first positional argument of which\nis an accumulated value and the second is the update value from\nthe source sequence. The return value should be the new\naccumulated value after the update value has been incorporated.\n\nseed: An optional value used to initialise the accumulator before\niteration over the source sequence. If seed is omitted the\nand the source sequence contains only one item, then that item\nis returned.\n\nresult_selector: An optional unary function applied to the final\naccumulator value to produce the result. If omitted, defaults\nto the identity function.\n\nRaises:\nValueError: If called on an empty sequence with no seed value.\nTypeError: If reducer is not callable.\nTypeError: If result_selector is not callable.", "source": "codesearchnet"} {"code": "def get_video_formats(self):\n if not self._video_formats:\n self._video_formats = list(self._api(iterate=True).list(profileId=self.profile_id).execute())\n return self._video_formats", "docstring": "Fetches video formats from CM.\n\nReturns:\nThe lists of video formats from CM.", "source": "github-repos"} {"code": "def send_rpc_response(self, rpc_tag, result, response):\n \n\n if rpc_tag not in self.in_flight_rpcs:\n raise ArgumentError(\"In flight RPC could not be found, it may have timed out\", rpc_tag=rpc_tag)\n\n del self.in_flight_rpcs[rpc_tag]\n\n response_message = {\n 'response': response,\n 'result': result\n }\n\n try:\n self.rpc_results.set(rpc_tag, response_message)\n except KeyError:\n self._logger.warning(\"RPC response came but no one was waiting: response=%s\", response)", "docstring": "Send a response to an RPC.\n\nArgs:\nrpc_tag (str): The exact string given in a previous call to send_rpc_command\nresult (str): The result of the operation. The possible values of response are:\nservice_not_found, rpc_not_found, timeout, success, invalid_response,\ninvalid_arguments, execution_exception\nresponse (bytes): The raw bytes that we should send back as a response.", "source": "juraj-google-style"} {"code": "def set_aws_clients(self, clients):\n \n if type(clients) is not dict:\n raise TypeError(\"clients must be a dict\")\n self._aws_clients = clients", "docstring": "Stash a dictionary of AWS clients in the context object\nArgs:\nclients: dictionary of clients", "source": "juraj-google-style"} {"code": "def CopyNoFail(src, root=None):\n \n if root is None:\n root = str(CFG[\"tmp_dir\"])\n src_path = local.path(root) / src\n\n if src_path.exists():\n Copy(src_path, '.')\n return True\n return False", "docstring": "Just copy fName into the current working directory, if it exists.\n\nNo action is executed, if fName does not exist. No Hash is checked.\n\nArgs:\nsrc: The filename we want to copy to '.'.\nroot: The optional source dir we should pull fName from. Defaults\nto benchbuild.settings.CFG[\"tmpdir\"].\n\nReturns:\nTrue, if we copied something.", "source": "juraj-google-style"} {"code": "def get_knowledge_base(project_id, knowledge_base_id):\n import dialogflow_v2beta1 as dialogflow\n client = dialogflow.KnowledgeBasesClient()\n knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id)\n response = client.get_knowledge_base(knowledge_base_path)\n print('Got Knowledge Base:')\n print(' - Display Name: {}'.format(response.display_name))\n print(' - Knowledge ID: {}'.format(response.name))", "docstring": "Gets a specific Knowledge base.\n\nArgs:\nproject_id: The GCP project linked with the agent.\nknowledge_base_id: Id of the Knowledge base.", "source": "codesearchnet"} {"code": "def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions:\n job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'\n staging_bucket = f'gs:\n dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': cfg.REGION, 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming}\n if num_workers:\n dataflow_options.update({'num_workers': num_workers})\n return PipelineOptions(flags=[], **dataflow_options)", "docstring": "Function to retrieve the pipeline options.\nArgs:\nproject: GCP project to run on\nmode: Indicator to run local, cloud or template\nnum_workers: Number of Workers for running the job parallely\nmax_num_workers: Maximum number of workers running the job parallely\nReturns:\nDataflow pipeline options", "source": "github-repos"} {"code": "def delete_group_member(self, grp_name, user):\n \n self.project_service.set_auth(self._token_project)\n self.project_service.delete_group_member(grp_name, user)", "docstring": "Delete the given user to the named group.\n\nBoth group and user must already exist for this to succeed.\n\nArgs:\nname (string): Name of group.\nuser_name (string): User to delete from the group.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"} {"code": "def save_saved_model(file_name, sess, input_tensor, output_tensor):\n builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(file_name)\n tensor_info_inputs = {'input': tf.compat.v1.saved_model.utils.build_tensor_info(input_tensor)}\n tensor_info_outputs = {'output': tf.compat.v1.saved_model.utils.build_tensor_info(output_tensor)}\n signature = tf.compat.v1.saved_model.signature_def_utils.build_signature_def(inputs=tensor_info_inputs, outputs=tensor_info_outputs, method_name=tf.compat.v1.saved_model.signature_constants.PREDICT_METHOD_NAME)\n builder.add_meta_graph_and_variables(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], signature_def_map={tf.compat.v1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature})\n builder.save()", "docstring": "Writes a SavedModel out to disk.\n\nArgs:\nfile_name: Where to save the file.\nsess: TensorFlow session containing the graph.\ninput_tensor: Tensor object defining the input's properties.\noutput_tensor: Tensor object defining the output's properties.", "source": "github-repos"} {"code": "def _CheckKeyPath(self, registry_key, search_depth):\n \n if self._key_path_segments is None:\n return False\n\n if search_depth < 0 or search_depth > self._number_of_key_path_segments:\n return False\n\n \n \n if search_depth == 0:\n segment_name = ''\n else:\n segment_name = self._key_path_segments[search_depth - 1]\n\n if self._is_regex:\n if isinstance(segment_name, py2to3.STRING_TYPES):\n \n \n flags = re.DOTALL | re.IGNORECASE | re.UNICODE\n\n try:\n segment_name = r'^{0:s}$'.format(segment_name)\n segment_name = re.compile(segment_name, flags=flags)\n except sre_constants.error:\n \n return False\n\n self._key_path_segments[search_depth - 1] = segment_name\n\n else:\n segment_name = segment_name.lower()\n self._key_path_segments[search_depth - 1] = segment_name\n\n if search_depth > 0:\n if self._is_regex:\n \n if not segment_name.match(registry_key.name):\n return False\n\n elif segment_name != registry_key.name.lower():\n return False\n\n return True", "docstring": "Checks the key path find specification.\n\nArgs:\nregistry_key (WinRegistryKey): Windows Registry key.\nsearch_depth (int): number of key path segments to compare.\n\nReturns:\nbool: True if the Windows Registry key matches the find specification,\nFalse if not.", "source": "juraj-google-style"} {"code": "def get_unit(self, name):\n return Unit(client=self, data=self._single_request('Units.Get', unitName=name))", "docstring": "Retreive a specifi unit from the fleet cluster by name\n\nArgs:\nname (str): If specified, only this unit name is returned\n\nReturns:\nUnit: The unit identified by ``name`` in the fleet cluster\n\nRaises:\nfleet.v1.errors.APIError: Fleet returned a response code >= 400", "source": "codesearchnet"} {"code": "def get_pending_component_servicing():\n key = 'SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Component Based Servicing\\\\RebootPending'\n if __utils__['reg.key_exists']('HKLM', key):\n log.debug('Key exists: %s', key)\n return True\n else:\n log.debug('Key does not exist: %s', key)\n return False", "docstring": "Determine whether there are pending Component Based Servicing tasks that\nrequire a reboot.\n\n.. versionadded:: 2016.11.0\n\nReturns:\nbool: ``True`` if there are pending Component Based Servicing tasks,\notherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' system.get_pending_component_servicing", "source": "codesearchnet"} {"code": "def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None):\n pixel_values_videos = pixel_values_videos.type(self.visual.dtype)\n video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)\n split_sizes = (video_grid_thw.prod(-1) \n video_embeds = torch.split(video_embeds, split_sizes)\n return video_embeds", "docstring": "Encodes videos into continuous embeddings that can be forwarded to the language model.\n\nArgs:\npixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\nThe tensors corresponding to the input videos.\nvideo_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):\nThe temporal, height and width of feature shape of each video in LLM.", "source": "github-repos"} {"code": "def _batch_transpose(mat):\n n = distribution_util.prefer_static_rank(mat)\n perm = tf.range(n)\n perm = tf.concat([perm[:(- 2)], [perm[(- 1)], perm[(- 2)]]], axis=0)\n return tf.transpose(a=mat, perm=perm)", "docstring": "Transpose a possibly batched matrix.\n\nArgs:\nmat: A `tf.Tensor` of shape `[..., n, m]`.\n\nReturns:\nA tensor of shape `[..., m, n]` with matching batch dimensions.", "source": "codesearchnet"} {"code": "class BaseModelOutputWithIntermediateActivations(ModelOutput):\n last_hidden_states: Optional[torch.FloatTensor] = None\n intermediate_activations: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for model's outputs that also contains intermediate activations that can be used at later stages. Useful\nin the context of Vision models.:\n\nArgs:\nlast_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nSequence of hidden-states at the output of the last layer of the model.\nintermediate_activations (`tuple(torch.FloatTensor)`, *optional*):\nIntermediate activations that can be used to compute hidden states of the model at various layers.", "source": "github-repos"} {"code": "def image_feature_engineering(features, feature_tensors_dict):\n \n engineered_features = {}\n for name, feature_tensor in six.iteritems(feature_tensors_dict):\n if name in features and features[name]['transform'] == IMAGE_TRANSFORM:\n with tf.name_scope(name, 'Wx_plus_b'):\n hidden = tf.contrib.layers.fully_connected(\n feature_tensor,\n IMAGE_HIDDEN_TENSOR_SIZE)\n engineered_features[name] = hidden\n else:\n engineered_features[name] = feature_tensor\n return engineered_features", "docstring": "Add a hidden layer on image features.\n\nArgs:\nfeatures: features dict\nfeature_tensors_dict: dict of feature-name: tensor", "source": "juraj-google-style"} {"code": "def AddArguments(cls, argument_group):\n \n argument_group.add_argument(\n '--analysis', metavar='PLUGIN_LIST', dest='analysis_plugins',\n default='', action='store', type=str, help=(\n 'A comma separated list of analysis plugin names to be loaded '\n 'or \"--analysis list\" to see a list of available plugins.'))\n\n arguments = sys.argv[1:]\n argument_index = 0\n\n if '--analysis' in arguments:\n argument_index = arguments.index('--analysis') + 1\n\n if 0 < argument_index < len(arguments):\n names = [name.strip() for name in arguments[argument_index].split(',')]\n else:\n names = None\n\n if names and names != ['list']:\n manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_group, category='analysis', names=names)", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"} {"code": "def from_concrete_functions(cls, funcs, trackable_obj=None):\n TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)\n if trackable_obj is None:\n logging.warning('Please consider providing the trackable_obj argument in the from_concrete_functions. Providing without the trackable_obj argument is deprecated and it will use the deprecated conversion path.')\n for func in funcs:\n if not isinstance(func, _function.ConcreteFunction):\n message = 'This function takes in a list of ConcreteFunction.'\n if isinstance(func, _def_function.Function):\n message += ' To get the ConcreteFunction from a Function, call get_concrete_function.'\n raise ValueError(message)\n return cls(funcs, trackable_obj)", "docstring": "Creates a TFLiteConverter object from ConcreteFunctions.\n\nArgs:\nfuncs: List of TensorFlow ConcreteFunctions. The list should not contain\nduplicate elements. Currently converter can only convert a single\nConcreteFunction. Converting multiple functions is under development.\ntrackable_obj: An `AutoTrackable` object (typically `tf.module`)\nassociated with `funcs`. A reference to this object needs to be\nmaintained so that Variables do not get garbage collected since\nfunctions have a weak reference to Variables.\n\nReturns:\nTFLiteConverter object.\n\nRaises:\nInvalid input type.", "source": "github-repos"} {"code": "def bot(self, id):\n json = self.skype.conn('GET', '{0}/agents'.format(SkypeConnection.API_BOT), params={'agentId': id}, auth=SkypeConnection.Auth.SkypeToken).json().get('agentDescriptions', [])\n return (self.merge(SkypeBotUser.fromRaw(self.skype, json[0])) if json else None)", "docstring": "Retrieve a single bot.\n\nArgs:\nid (str): UUID or username of the bot\n\nReturns:\nSkypeBotUser: resulting bot user object", "source": "codesearchnet"} {"code": "def disable_inheritance(path, objectType, copy=True):\n dc = daclConstants()\n objectType = dc.getObjectTypeBit(objectType)\n path = dc.processPath(path, objectType)\n return _set_dacl_inheritance(path, objectType, False, copy, None)", "docstring": "Disable inheritance on an object\n\nArgs:\npath: The path to the object\nobjectType: The type of object (FILE, DIRECTORY, REGISTRY)\ncopy: True will copy the Inherited ACEs to the DACL before disabling inheritance\n\nReturns (dict): A dictionary containing the results\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' win_dacl.disable_inheritance c:\\temp directory", "source": "codesearchnet"} {"code": "def update_clinvar_submission_status(self, user_id, submission_id, status):\n LOG.info('closing clinvar submission \"%s\"', submission_id)\n if (status == 'open'):\n self.clinvar_submission_collection.update_many({'user_id': user_id}, {'$set': {'status': 'closed', 'updated_at': datetime.now()}})\n updated_submission = self.clinvar_submission_collection.find_one_and_update({'_id': ObjectId(submission_id)}, {'$set': {'status': status, 'updated_at': datetime.now()}}, return_document=pymongo.ReturnDocument.AFTER)\n return updated_submission", "docstring": "Set a clinvar submission ID to 'closed'\n\nArgs:\nsubmission_id(str): the ID of the clinvar submission to close\n\nReturn\nupdated_submission(obj): the submission object with a 'closed' status", "source": "codesearchnet"} {"code": "def list_permissions(self, group_name=None, resource=None):\n \n return self.service.list_permissions(group_name, resource,\n self.url_prefix, self.auth, self.session, self.session_send_opts)", "docstring": "List permission sets associated filtering by group and/or resource.\n\nArgs:\ngroup_name (string): Name of group.\nresource (intern.resource.boss.Resource): Identifies which data model object to operate on.\n\nReturns:\n(list): List of permissions.\n\nRaises:\nrequests.HTTPError on failure.", "source": "juraj-google-style"} {"code": "def StartTiming(self, profile_name):\n if (profile_name not in self._profile_measurements):\n self._profile_measurements[profile_name] = CPUTimeMeasurement()\n self._profile_measurements[profile_name].SampleStart()", "docstring": "Starts timing CPU time.\n\nArgs:\nprofile_name (str): name of the profile to sample.", "source": "codesearchnet"} {"code": "def get_completed_task(self, task, timeout=(- 1)):\n self.__wait_task_completion(task, timeout)\n return self.get(task)", "docstring": "Waits until the task is completed and returns the task resource.\n\nArgs:\ntask: TaskResource\ntimeout: Timeout in seconds\n\nReturns:\ndict: TaskResource", "source": "codesearchnet"} {"code": "def set_max_epochs(max_epochs):\n global _MAX_EPOCHS\n _MAX_EPOCHS = max_epochs", "docstring": "Limit the maximum number of epochs for any call to fit.\n\nThis will cap the number of epochs for any training run using `model.fit()`.\nThis is purely for debugging, and can also be set via the `KERAS_MAX_EPOCHS`\nenvironment variable to quickly run a script without modifying its source.\n\nArgs:\nmax_epochs: The integer limit on the number of epochs or `None`. If\n`None`, no limit is applied.", "source": "github-repos"} {"code": "def get_tld(url):\n if (url not in URLHelper.__cache):\n URLHelper.__cache[url] = urlparse(url)\n parts = URLHelper.__cache[url].netloc.split('.')\n if (len(parts) == 1):\n return ''\n else:\n return parts[(- 1)]", "docstring": "Get the tld of the given URL.\n\nArgs:\nurl (str): The URL to get the tld from.\n\nReturns:\nstr: The tld", "source": "codesearchnet"} {"code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if isinstance(pred, variables.Variable):\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", "docstring": "Return either `true_fn()` if predicate `pred` is true else `false_fn()`.\n\nIf `pred` is a bool or has a constant value, we return either `true_fn()`\nor `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.\n\nArgs:\npred: A scalar determining whether to return the result of `true_fn` or\n`false_fn`.\ntrue_fn: The callable to be performed if pred is true.\nfalse_fn: The callable to be performed if pred is false.\nname: Optional name prefix when using `tf.cond`.\n\nReturns:\nTensors returned by the call to either `true_fn` or `false_fn`.\n\nRaises:\nTypeError: If `true_fn` or `false_fn` is not callable.", "source": "github-repos"} {"code": "def post(self, path, params=None, timeout=None, event_timeout=None):\n future = self.post_async(path, params)\n self.wait_all_futures(future, timeout=timeout, event_timeout=event_timeout)\n return future.result()", "docstring": "Synchronously calls a method\n\nArgs:\npath (list): The path to post to\nparams (dict): parameters for the call\ntimeout (float): time in seconds to wait for responses, wait\nforever if None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None\n\nReturns:\nthe result from 'method'", "source": "codesearchnet"} {"code": "def parse_log(file_path):\n \n if not os.path.isfile(file_path):\n return elements.error(\"Output Log\", \"Could not open file: \" + file_path.split(os.sep)[-1])\n\n headers = [\"Converged Iterations\",\n \"Avg. Iterations to Converge\",\n \"Processor Count\",\n \"Dycore Type\"]\n\n with open(file_path, 'r') as f:\n dycore_types = {\"0\": \"Glide\",\n \"1\": \"Glam\",\n \"2\": \"Glissade\",\n \"3\": \"Albany_felix\",\n \"4\": \"BISICLES\"}\n curr_step = 0\n proc_count = 0\n iter_number = 0\n converged_iters = []\n iters_to_converge = []\n for line in f:\n split = line.split()\n if ('CISM dycore type' in line):\n if line.split()[-1] == '=':\n dycore_type = dycore_types[next(f).strip()]\n else:\n dycore_type = dycore_types[line.split()[-1]]\n elif ('total procs' in line):\n proc_count += int(line.split()[-1])\n elif ('Nonlinear Solver Step' in line):\n curr_step = int(line.split()[4])\n elif ('Compute ice velocities, time = ' in line):\n converged_iters.append(curr_step)\n curr_step = float(line.split()[-1])\n elif ('\"SOLVE_STATUS_CONVERGED\"' in line):\n split = line.split()\n iters_to_converge.append(int(split[split.index('\"SOLVE_STATUS_CONVERGED\"') + 2]))\n elif (\"Compute dH/dt\" in line):\n iters_to_converge.append(int(iter_number))\n elif len(split) > 0 and split[0].isdigit():\n iter_number = split[0]\n if iters_to_converge == []:\n iters_to_converge.append(int(iter_number))\n data = {\n \"Dycore Type\": dycore_type,\n \"Processor Count\": proc_count,\n \"Converged Iterations\": len(converged_iters),\n \"Avg. Iterations to Converge\": np.mean(iters_to_converge)\n }\n return elements.table(\"Output Log\", headers, data)", "docstring": "Parse a CISM output log and extract some information.\n\nArgs:\nfile_path: absolute path to the log file\n\nReturn:\nA dictionary created by the elements object corresponding to\nthe results of the bit for bit testing", "source": "juraj-google-style"} {"code": "def __init__(self, url, urlSchemes=None):\n \n self._urlApi = url\n self._urlSchemes = {}\n self._initRequestHeaders()\n self._urllib = urllib2\n\n if urlSchemes is not None:\n for urlScheme in urlSchemes:\n self.addUrlScheme(urlScheme)\n\n self._implicitFormat = self._urlApi.find('{format}') != -1", "docstring": "Create a new OEmbedEndpoint object.\n\nArgs:\nurl: The url of a provider API (API endpoint).\nurlSchemes: A list of URL schemes for this endpoint.", "source": "juraj-google-style"} {"code": "def find_write(driver, elem_path, write_str, clear_first=True, send_enter=False, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):\n elem = find_element(driver, elem_path=elem_path, by=by, timeout=timeout, poll_frequency=poll_frequency)\n if clear_first:\n elem.clear()\n elem.send_keys(write_str)\n if send_enter:\n elem.send_keys(Keys.ENTER)\n return elem", "docstring": "Find a writable element and write to it\n\nfind_write locates a writable element on the page, waiting\nfor up to timeout seconds. Once found, it writes the string\nto it.\n\nArgs:\ndriver (selenium webdriver or element): A driver or element\nelem_path (str): String used to located the element\nwrite_str (str): String to write\nclear_first (bool): Clear the contents before writing (default True)\nsend_enter (bool): Send a keyboard ENTER after writing string\nby (selenium By): Selenium By reference\ntimeout (int): Selenium Wait timeout, in seconds\npoll_frequency (float): Selenium Wait polling frequency, in seconds\n\nReturns:\nelement: Selenium element\n\nRaises:\nTimeoutException: Raised when target element isn't located", "source": "codesearchnet"} {"code": "def raster_statistics(raster_file):\n \n ds = gdal_Open(raster_file)\n band = ds.GetRasterBand(1)\n minv, maxv, meanv, std = band.ComputeStatistics(False)\n return minv, maxv, meanv, std", "docstring": "Get basic statistics of raster data.\n\nArgs:\nraster_file: raster file path.\n\nReturns:\nmin, max, mean, std.", "source": "juraj-google-style"} {"code": "def _expand_variables(input_str, cmake_vars):\n\n def replace(match):\n if match.group(1) in cmake_vars:\n return cmake_vars[match.group(1)]\n return ''\n return _CMAKE_ATVAR_REGEX.sub(replace, _CMAKE_VAR_REGEX.sub(replace, input_str))", "docstring": "Expands ${VARIABLE}s and @VARIABLE@s in 'input_str', using dictionary 'cmake_vars'.\n\nArgs:\ninput_str: the string containing ${VARIABLE} or @VARIABLE@ expressions to expand.\ncmake_vars: a dictionary mapping variable names to their values.\n\nReturns:\nThe expanded string.", "source": "github-repos"} {"code": "def assemble_data(data_dfs, concat_direction):\n if (concat_direction == 'horiz'):\n all_data_df = pd.concat(data_dfs, axis=1)\n n_cols = all_data_df.shape[1]\n logger.debug('all_data_df.shape[1]: {}'.format(n_cols))\n n_cols_cumulative = sum([df.shape[1] for df in data_dfs])\n assert (n_cols == n_cols_cumulative)\n elif (concat_direction == 'vert'):\n all_data_df = pd.concat(data_dfs, axis=0)\n n_rows = all_data_df.shape[0]\n logger.debug('all_data_df.shape[0]: {}'.format(n_rows))\n n_rows_cumulative = sum([df.shape[0] for df in data_dfs])\n assert (n_rows == n_rows_cumulative)\n all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1)\n return all_data_df_sorted", "docstring": "Assemble the data dfs together. Both indices are sorted.\n\nArgs:\ndata_dfs (list of pandas dfs)\nconcat_direction (string): 'horiz' or 'vert'\n\nReturns:\nall_data_df_sorted (pandas df)", "source": "codesearchnet"} {"code": "def MakeCACert(private_key, common_name=u'grr', issuer_cn=u'grr_test', issuer_c=u'US'):\n public_key = private_key.GetPublicKey()\n builder = x509.CertificateBuilder()\n issuer = x509.Name([x509.NameAttribute(oid.NameOID.COMMON_NAME, issuer_cn), x509.NameAttribute(oid.NameOID.COUNTRY_NAME, issuer_c)])\n subject = x509.Name([x509.NameAttribute(oid.NameOID.COMMON_NAME, common_name)])\n builder = builder.subject_name(subject)\n builder = builder.issuer_name(issuer)\n valid_from = (rdfvalue.RDFDatetime.Now() - rdfvalue.Duration('1d'))\n valid_until = (rdfvalue.RDFDatetime.Now() + rdfvalue.Duration('3650d'))\n builder = builder.not_valid_before(valid_from.AsDatetime())\n builder = builder.not_valid_after(valid_until.AsDatetime())\n builder = builder.serial_number(1)\n builder = builder.public_key(public_key.GetRawPublicKey())\n builder = builder.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True)\n builder = builder.add_extension(x509.SubjectKeyIdentifier.from_public_key(public_key.GetRawPublicKey()), critical=False)\n certificate = builder.sign(private_key=private_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend)\n return rdf_crypto.RDFX509Cert(certificate)", "docstring": "Generate a CA certificate.\n\nArgs:\nprivate_key: The private key to use.\ncommon_name: Name for cert.\nissuer_cn: Name for issuer.\nissuer_c: Country for issuer.\n\nReturns:\nThe certificate.", "source": "codesearchnet"} {"code": "def load_repo_addons(_globals):\n \n repos_dir = os.path.expanduser('~/.fabsetup-addon-repos')\n if os.path.isdir(repos_dir):\n basedir, repos, _ = next(os.walk(repos_dir))\n for repo_dir in [os.path.join(basedir, repo)\n for repo in repos\n \n \n if '.' not in repo]:\n sys.path.append(repo_dir)\n package_name, username = package_username(repo_dir.split('/')[-1])\n load_addon(username, package_name, _globals)", "docstring": "Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos\nas git repositories.\n\nArgs:\n_globals(dict): the globals() namespace of the fabric script.\n\nReturn: None", "source": "juraj-google-style"} {"code": "def inner(*args):\n haspoly = sum([isinstance(arg, Poly) for arg in args])\n if (not haspoly):\n return numpy.sum(numpy.prod(args, 0), 0)\n out = args[0]\n for arg in args[1:]:\n out = (out * arg)\n return sum(out)", "docstring": "Inner product of a polynomial set.\n\nArgs:\nargs (chaospy.poly.base.Poly):\nThe polynomials to perform inner product on.\n\nReturns:\n(chaospy.poly.base.Poly):\nResulting polynomial.\n\nExamples:\n>>> x,y = cp.variable(2)\n>>> P = cp.Poly([x-1, y])\n>>> Q = cp.Poly([x+1, x*y])\n>>> print(cp.inner(P, Q))\nq0^2+q0q1^2-1\n>>> x = numpy.arange(4)\n>>> print(cp.inner(x, x))\n14", "source": "codesearchnet"} {"code": "def create_cloudtrail(self, region):\n \n ct = self.session.client('cloudtrail', region_name=region)\n\n \n self.create_sns_topic(region)\n\n ct.create_trail(\n Name=self.trail_name,\n S3BucketName=self.bucket_name,\n S3KeyPrefix=self.account.account_name,\n IsMultiRegionTrail=True,\n IncludeGlobalServiceEvents=True,\n SnsTopicName=self.topic_name\n )\n self.subscribe_sns_topic_to_sqs(region)\n\n auditlog(\n event='cloudtrail.create_cloudtrail',\n actor=self.ns,\n data={\n 'account': self.account.account_name,\n 'region': region\n }\n )\n self.log.info('Created CloudTrail for {} in {} ({})'.format(self.account, region, self.bucket_name))", "docstring": "Creates a new CloudTrail Trail\n\nArgs:\nregion (str): Name of the AWS region\n\nReturns:\n`None`", "source": "juraj-google-style"} {"code": "def expect_equal(first, second, msg=None, extras=None):\n try:\n asserts.assert_equal(first, second, msg, extras)\n except signals.TestSignal as e:\n logging.exception('Expected %s equals to %s, but they are not.', first, second)\n recorder.add_error(e)", "docstring": "Expects the equality of objects, otherwise fail the test.\n\nIf the expectation is not met, the test is marked as fail after its\nexecution finishes.\n\nError message is \"first != second\" by default. Additional explanation can\nbe supplied in the message.\n\nArgs:\nfirst: The first object to compare.\nsecond: The second object to compare.\nmsg: A string that adds additional info about the failure.\nextras: An optional field for extra information to be included in test\nresult.", "source": "codesearchnet"} {"code": "def __init__(self, start, end, source_bundles):\n super().__init__()\n self._start = start\n self._end = end\n self._source_bundles = source_bundles\n self._lock = threading.RLock()\n self._range_trackers = [None] * len(source_bundles)\n self._claimed_source_ix = self._start[0]\n last = end[0] if end[1] is None else end[0] + 1\n self._cumulative_weights = [0] * start[0] + self._compute_cumulative_weights(source_bundles[start[0]:last]) + [1] * (len(source_bundles) - last - start[0])", "docstring": "Initializes ``ConcatRangeTracker``\n\nArgs:\nstart: start position, a tuple of (source_index, source_position)\nend: end position, a tuple of (source_index, source_position)\nsource_bundles: the list of source bundles in the ConcatSource", "source": "github-repos"} {"code": "def write_supercells_with_displacements(supercell, cells_with_disps, filename=\"geo.gen\"):\n \n\n \n write_dftbp(filename + \"S\", supercell)\n\n \n for ii in range(len(cells_with_disps)):\n write_dftbp(filename + \"S-{:03d}\".format(ii+1), cells_with_disps[ii])", "docstring": "Writes perfect supercell and supercells with displacements\n\nArgs:\nsupercell: perfect supercell\ncells_with_disps: supercells with displaced atoms\nfilename: root-filename", "source": "juraj-google-style"} {"code": "def __init__(self, log_dir):\n \n \n if not gfile.isdir(log_dir):\n gfile.makedirs(log_dir)\n\n self._event_writer = EventFileWriter(log_dir, 10, 120, None)\n self._step = 0\n self._closed = False", "docstring": "Create a new SummaryWriter.\n\nArgs:\nlog_dir: path to record tfevents files in.", "source": "juraj-google-style"} {"code": "def __init__(self, input_energy: energy.BitstringEnergy, num_expectation_samples: int, initial_seed: Union[None, tf.Tensor]=None, name: Union[None, str]=None):\n super().__init__(input_energy, num_expectation_samples, initial_seed, name)\n self._all_bitstrings = tf.constant(list(itertools.product([0, 1], repeat=input_energy.num_bits)), dtype=tf.int8)\n self._logits_variable = tf.Variable(-input_energy(self.all_bitstrings), trainable=False)\n self._distribution = tfd.Categorical(logits=self._logits_variable)", "docstring": "Initializes an AnalyticEnergyInference.\n\nInternally, this class saves all possible bitstrings as a tensor, whose\nenergies are calculated relative to an input energy function for sampling\nand other inference tasks.\n\nArgs:\ninput_energy: The parameterized energy function which defines this\ndistribution via the equations of an energy based model. This class\nassumes that all parameters of `energy` are `tf.Variable`s and that\nthey are all returned by `energy.variables`.\nnum_expectation_samples: Number of samples to draw and use for estimating\nthe expectation value.\ninitial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This\nseed will be used in the `sample` method. If None, the seed is updated\nafter every inference call. Otherwise, the seed is fixed.\nname: Optional name for the model.", "source": "github-repos"} {"code": "def _build_toy_dataset(n: int, data_prefix='', data2_is_categorical_integer=False, num_indexes: int=10) -> tp.EventSet:\n np.random.seed(0)\n index_values = list(range(int(num_indexes)))\n timestamps = np.sort(np.random.randn(n) * n)\n index_1 = np.random.choice(index_values, n)\n index_2 = np.random.choice(index_values, n)\n data_1 = np.random.randn(n)\n if data2_is_categorical_integer:\n data_2 = np.random.choice(list(range(int(10))), n)\n else:\n data_2 = np.random.randn(n)\n return tp.from_pandas(pd.DataFrame({'timestamp': timestamps, 'index_1': index_1, 'index_2': index_2, data_prefix + 'data_1': data_1, data_prefix + 'data_2': data_2}), indexes=['index_1', 'index_2'])", "docstring": "Builds a toy dataset with two features.\n\nArgs:\nn: Number of timestamps.\ndata_prefix: Optional prefix in the feature names.\ndata2_is_categorical_integer: If true, the second feature is\ncategorical. If false (default), the second feature is numerical.\n\nReturns:\nAn EventSet containing the toy dataset.", "source": "github-repos"} {"code": "def _refine_candidate(self, width, height):\n packer = newPacker(PackingMode.Offline, PackingBin.BFF, pack_algo=self._pack_algo, sort_algo=SORT_LSIDE, rotation=self._rotation)\n packer.add_bin(width, height)\n for r in self._rectangles:\n packer.add_rect(*r)\n packer.pack()\n if (len(packer[0]) != len(self._rectangles)):\n return None\n new_height = max(packer[0], key=(lambda x: x.top)).top\n return (width, new_height, packer)", "docstring": "Use bottom-left packing algorithm to find a lower height for the\ncontainer.\n\nArguments:\nwidth\nheight\n\nReturns:\ntuple (width, height, PackingAlgorithm):", "source": "codesearchnet"} {"code": "def variables(self):\n return self.weights", "docstring": "Returns the list of all layer variables/weights.\n\nAlias of `self.weights`.\n\nNote: This will not track the weights of nested `tf.Modules` that are not\nthemselves Keras layers.\n\nReturns:\nA list of variables.", "source": "github-repos"} {"code": "def market_close(self, session, mins) -> Session:\n \n if session not in self.exch: return SessNA\n end_time = self.exch[session][-1]\n return Session(shift_time(end_time, -int(mins) + 1), end_time)", "docstring": "Time intervals for market close\n\nArgs:\nsession: [allday, day, am, pm, night]\nmins: mintues before close\n\nReturns:\nSession of start_time and end_time", "source": "juraj-google-style"} {"code": "def Verify(self, completely=False):\n \n res = super(Block, self).Verify()\n if not res:\n return False\n\n from neo.Blockchain import GetBlockchain, GetConsensusAddress\n\n \n if self.Transactions[0].Type != TransactionType.MinerTransaction:\n return False\n for tx in self.Transactions[1:]:\n if tx.Type == TransactionType.MinerTransaction:\n return False\n\n if completely:\n bc = GetBlockchain()\n\n if self.NextConsensus != GetConsensusAddress(bc.GetValidators(self.Transactions).ToArray()):\n return False\n\n for tx in self.Transactions:\n if not tx.Verify():\n pass\n logger.error(\"Blocks cannot be fully validated at this moment. please pass completely=False\")\n raise NotImplementedError()\n \n \n \n \n \n\n return True", "docstring": "Verify the integrity of the block.\n\nArgs:\ncompletely: (Not functional at this time).\n\nReturns:\nbool: True if valid. False otherwise.", "source": "juraj-google-style"} {"code": "def DecodeMessages(self, response_comms):\n \n \n cipher_verified = False\n try:\n cipher = self.encrypted_cipher_cache.Get(response_comms.encrypted_cipher)\n stats_collector_instance.Get().IncrementCounter(\n \"grr_encrypted_cipher_cache\", fields=[\"hits\"])\n\n \n \n cipher.VerifyReceivedHMAC(response_comms)\n cipher_verified = True\n\n \n \n source = cipher.GetSource()\n remote_public_key = self._GetRemotePublicKey(source)\n except KeyError:\n stats_collector_instance.Get().IncrementCounter(\n \"grr_encrypted_cipher_cache\", fields=[\"misses\"])\n cipher = ReceivedCipher(response_comms, self.private_key)\n\n source = cipher.GetSource()\n try:\n remote_public_key = self._GetRemotePublicKey(source)\n if cipher.VerifyCipherSignature(remote_public_key):\n \n self.encrypted_cipher_cache.Put(response_comms.encrypted_cipher,\n cipher)\n cipher_verified = True\n\n except UnknownClientCertError:\n \n remote_public_key = None\n\n \n plain = cipher.Decrypt(response_comms.encrypted, response_comms.packet_iv)\n try:\n packed_message_list = rdf_flows.PackedMessageList.FromSerializedString(\n plain)\n except rdfvalue.DecodeError as e:\n raise DecryptionError(e)\n\n message_list = self.DecompressMessageList(packed_message_list)\n\n \n \n auth_state = self.VerifyMessageSignature(\n response_comms,\n packed_message_list,\n cipher,\n cipher_verified,\n response_comms.api_version,\n remote_public_key)\n \n\n \n for msg in message_list.job:\n msg.auth_state = auth_state\n msg.source = cipher.cipher_metadata.source\n\n return (message_list.job, cipher.cipher_metadata.source,\n packed_message_list.timestamp)", "docstring": "Extract and verify server message.\n\nArgs:\nresponse_comms: A ClientCommunication rdfvalue\n\nReturns:\nlist of messages and the CN where they came from.\n\nRaises:\nDecryptionError: If the message failed to decrypt properly.", "source": "juraj-google-style"} {"code": "def check_origin(self, origin):\n from ..util import check_whitelist\n parsed_origin = urlparse(origin)\n origin_host = parsed_origin.netloc.lower()\n allowed_hosts = self.application.websocket_origins\n if settings.allowed_ws_origin():\n allowed_hosts = set(settings.allowed_ws_origin())\n allowed = check_whitelist(origin_host, allowed_hosts)\n if allowed:\n return True\n else:\n log.error(\"Refusing websocket connection from Origin '%s'; use --allow-websocket-origin=%s or set BOKEH_ALLOW_WS_ORIGIN=%s to permit this; currently we allow origins %r\", origin, origin_host, origin_host, allowed_hosts)\n return False", "docstring": "Implement a check_origin policy for Tornado to call.\n\nThe supplied origin will be compared to the Bokeh server whitelist. If the\norigin is not allow, an error will be logged and ``False`` will be returned.\n\nArgs:\norigin (str) :\nThe URL of the connection origin\n\nReturns:\nbool, True if the connection is allowed, False otherwise", "source": "codesearchnet"} {"code": "def replace_first(pcoll, regex, replacement):\n regex = Regex._regex_compile(regex)\n return pcoll | Map(lambda elem: regex.sub(replacement, elem, 1))", "docstring": "Returns the matches if a portion of the line matches the regex and replaces\nthe first match with the replacement string.\n\nArgs:\nregex: the regular expression string or (re.compile) pattern.\nreplacement: the string to be substituted for each match.", "source": "github-repos"} {"code": "def begin_run_group(project):\n from benchbuild.utils.db import create_run_group\n from datetime import datetime\n (group, session) = create_run_group(project)\n group.begin = datetime.now()\n group.status = 'running'\n session.commit()\n return (group, session)", "docstring": "Begin a run_group in the database.\n\nA run_group groups a set of runs for a given project. This models a series\nof runs that form a complete binary runtime test.\n\nArgs:\nproject: The project we begin a new run_group for.\n\nReturns:\n``(group, session)`` where group is the created group in the\ndatabase and session is the database session this group lives in.", "source": "codesearchnet"} {"code": "def send(self, stream=False):\n \n \n \n \n try:\n response = self.session.request(\n self._http_method,\n self._url,\n auth=self._basic_auth,\n data=self._body,\n files=self._files,\n headers=self._headers,\n params=self._payload,\n stream=stream,\n timeout=self._timeout,\n )\n except Exception as e:\n err = 'Failed making HTTP request ({}).'.format(e)\n raise RuntimeError(err)\n\n \n self.tcex.log.info(u'Status Code: {}'.format(response.status_code))\n return response", "docstring": "Send the HTTP request via Python Requests modules.\n\nThis method will send the request to the remote endpoint. It will try to handle\ntemporary communications issues by retrying the request automatically.\n\nArgs:\nstream (bool): Boolean to enable stream download.\n\nReturns:\nRequests.Response: The Request response", "source": "juraj-google-style"} {"code": "def on_predict_end(self, logs=None):\n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_predict_end(logs)", "docstring": "Calls the `on_predict_end` methods of its callbacks.\n\nArgs:\nlogs: Dict. Currently no data is passed to this argument for this method\nbut that may change in the future.", "source": "github-repos"} {"code": "def parse_parameters(parameters, variables_mapping=None, functions_mapping=None):\n variables_mapping = (variables_mapping or {})\n functions_mapping = (functions_mapping or {})\n parsed_parameters_list = []\n parameters = utils.ensure_mapping_format(parameters)\n for (parameter_name, parameter_content) in parameters.items():\n parameter_name_list = parameter_name.split('-')\n if isinstance(parameter_content, list):\n parameter_content_list = []\n for parameter_item in parameter_content:\n if (not isinstance(parameter_item, (list, tuple))):\n parameter_item = [parameter_item]\n parameter_content_dict = dict(zip(parameter_name_list, parameter_item))\n parameter_content_list.append(parameter_content_dict)\n else:\n parsed_variables_mapping = parse_variables_mapping(variables_mapping)\n parsed_parameter_content = eval_lazy_data(parameter_content, parsed_variables_mapping, functions_mapping)\n if (not isinstance(parsed_parameter_content, list)):\n raise exceptions.ParamsError('parameters syntax error!')\n parameter_content_list = []\n for parameter_item in parsed_parameter_content:\n if isinstance(parameter_item, dict):\n parameter_dict = {key: parameter_item[key] for key in parameter_name_list}\n elif isinstance(parameter_item, (list, tuple)):\n parameter_dict = dict(zip(parameter_name_list, parameter_item))\n elif (len(parameter_name_list) == 1):\n parameter_dict = {parameter_name_list[0]: parameter_item}\n parameter_content_list.append(parameter_dict)\n parsed_parameters_list.append(parameter_content_list)\n return utils.gen_cartesian_product(*parsed_parameters_list)", "docstring": "parse parameters and generate cartesian product.\n\nArgs:\nparameters (list) parameters: parameter name and value in list\nparameter value may be in three types:\n(1) data list, e.g. [\"iOS/10.1\", \"iOS/10.2\", \"iOS/10.3\"]\n(2) call built-in parameterize function, \"${parameterize(account.csv)}\"\n(3) call custom function in debugtalk.py, \"${gen_app_version()}\"\n\nvariables_mapping (dict): variables mapping loaded from testcase config\nfunctions_mapping (dict): functions mapping loaded from debugtalk.py\n\nReturns:\nlist: cartesian product list\n\nExamples:\n>>> parameters = [\n{\"user_agent\": [\"iOS/10.1\", \"iOS/10.2\", \"iOS/10.3\"]},\n{\"username-password\": \"${parameterize(account.csv)}\"},\n{\"app_version\": \"${gen_app_version()}\"}\n]\n>>> parse_parameters(parameters)", "source": "codesearchnet"} {"code": "def __init__(self, json, Api):\n \n self.json = json\n self._index = 0\n self.Api = Api", "docstring": "Instantiates EventStream instance.\n\nArgs:\njson(list): List from deserializing txn from homeserver.\nApi(func): Generates http api when passed identity=mxid.", "source": "juraj-google-style"} {"code": "def bipartition_indices(N):\n \n result = []\n if N <= 0:\n return result\n\n for i in range(2**(N - 1)):\n part = [[], []]\n for n in range(N):\n bit = (i >> n) & 1\n part[bit].append(n)\n result.append((tuple(part[1]), tuple(part[0])))\n return result", "docstring": "Return indices for undirected bipartitions of a sequence.\n\nArgs:\nN (int): The length of the sequence.\n\nReturns:\nlist: A list of tuples containing the indices for each of the two\nparts.\n\nExample:\n>>> N = 3\n>>> bipartition_indices(N)\n[((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))]", "source": "juraj-google-style"} {"code": "def sas_logical_jbod_attachments(self):\n if (not self.__sas_logical_jbod_attachments):\n self.__sas_logical_jbod_attachments = SasLogicalJbodAttachments(self.__connection)\n return self.__sas_logical_jbod_attachments", "docstring": "Gets the SAS Logical JBOD Attachments client.\n\nReturns:\nSasLogicalJbodAttachments:", "source": "codesearchnet"} {"code": "def update(self, rid, data, raise_on_error=True):\n cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}\n return self.ds.put(rid, cache_data, raise_on_error)", "docstring": "Write updated cache data to the DataStore.\n\nArgs:\nrid (str): The record identifier.\ndata (dict): The record data.\nraise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.\n\nReturns:\nobject : Python request response.", "source": "codesearchnet"} {"code": "def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:\n boxes = center_to_corners_format(boxes)\n height, width = image_size\n boxes = boxes * torch.tensor([[width, height, width, height]])\n return boxes", "docstring": "Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1]\nto Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates.\n\nArgs:\nboxes (torch.Tensor): Bounding boxes in YOLO format\nimage_size (Tuple[int, int]): Image size in format (height, width)\n\nReturns:\ntorch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max)", "source": "github-repos"} {"code": "def _ParseToken(self, file_object, file_offset):\n token_type = self._ParseTokenType(file_object, file_offset)\n token_data = None\n token_data_map_name = self._DATA_TYPE_MAP_PER_TOKEN_TYPE.get(token_type, None)\n if token_data_map_name:\n token_data_map = self._GetDataTypeMap(token_data_map_name)\n (token_data, _) = self._ReadStructureFromFileObject(file_object, (file_offset + 1), token_data_map)\n return (token_type, token_data)", "docstring": "Parses a token.\n\nArgs:\nfile_object (dfvfs.FileIO): file-like object.\nfile_offset (int): offset of the token relative to the start of\nthe file-like object.\n\nReturns:\ntuple: containing:\nint: token type\nobject: token data or None if the token type is not supported.", "source": "codesearchnet"} {"code": "def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True):\n callback = (callback or self.progress_callback)\n finish_callback = (finish_callback or self.finish_callback)\n self.EnsureInitialized()\n while True:\n if (self.__initial_response is not None):\n response = self.__initial_response\n self.__initial_response = None\n else:\n end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks)\n response = self.__GetChunk(self.progress, end_byte, additional_headers=additional_headers)\n if (self.total_size is None):\n self.__SetTotal(response.info)\n response = self.__ProcessResponse(response)\n self._ExecuteCallback(callback, response)\n if ((response.status_code == http_client.OK) or (self.progress >= self.total_size)):\n break\n self._ExecuteCallback(finish_callback, response)", "docstring": "Stream the entire download.\n\nArgs:\ncallback: (default: None) Callback to call as each chunk is\ncompleted.\nfinish_callback: (default: None) Callback to call when the\ndownload is complete.\nadditional_headers: (default: None) Additional headers to\ninclude in fetching bytes.\nuse_chunks: (bool, default: True) If False, ignore self.chunksize\nand stream this download in a single request.\n\nReturns:\nNone. Streams bytes into self.stream.", "source": "codesearchnet"} {"code": "def __init__(self,\n nlp,\n rules: Dict,\n extractor_name: str) -> None:\n \n\n Extractor.__init__(self,\n input_type=InputType.TEXT,\n category=\"spacy_rule_extractor\",\n name=extractor_name)\n self._rules = rules[\"rules\"]\n self._nlp = copy.deepcopy(nlp)\n self._tokenizer = Tokenizer(self._nlp)\n self._matcher = Matcher(self._nlp.vocab)\n self._field_name = rules[\"field_name\"] if \"field_name\" in rules else extractor_name\n self._rule_lst = {}\n self._hash_map = {}\n for idx, a_rule in enumerate(self._rules):\n this_rule = Rule(a_rule, self._nlp)\n self._rule_lst[this_rule.identifier + \"rule_id", "docstring": "Initialize the extractor, storing the rule information and construct spacy rules\nArgs:\nnlp\nrules (Dict): spacy rules\nextractor_name: str\n\nReturns:", "source": "juraj-google-style"} {"code": "def get_absolute_positions(self, abs_pos_embeddings, has_cls_token, height, width):\n if has_cls_token:\n abs_pos_embeddings = abs_pos_embeddings[:, 1:]\n num_position = abs_pos_embeddings.shape[1]\n size = int(math.sqrt(num_position))\n if size * size != num_position:\n raise ValueError('Absolute position embeddings must be a square number.')\n if torch.jit.is_tracing() or (size != height or size != width):\n new_abs_pos_embeddings = nn.functional.interpolate(abs_pos_embeddings.reshape(1, size, size, -1).permute(0, 3, 1, 2), size=(height, width), mode='bicubic', align_corners=False)\n return new_abs_pos_embeddings.permute(0, 2, 3, 1)\n else:\n return abs_pos_embeddings.reshape(1, height, width, -1)", "docstring": "Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the\noriginal embeddings.\n\nArgs:\nabs_pos_embeddings (`torch.Tensor`):\nAbsolute positional embeddings with (1, num_position, num_channels).\nhas_cls_token (`bool`):\nIf true, has 1 embedding in abs_pos_embeddings for cls token.\nheight (`int`):\nHeight of input image tokens.\nwidth (`int`):\nWidth of input image tokens.\n\nReturns:\nAbsolute positional embeddings after processing with shape (1, height, width, num_channels)", "source": "github-repos"} {"code": "class BrosProcessor(ProcessorMixin):\n attributes = ['tokenizer']\n tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')\n\n def __init__(self, tokenizer=None, **kwargs):\n if tokenizer is None:\n raise ValueError('You need to specify a `tokenizer`.')\n super().__init__(tokenizer)\n\n def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> BatchEncoding:\n \n encoding = self.tokenizer(text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)\n return encoding\n\n def batch_decode(self, *args, **kwargs):\n \n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n return list(dict.fromkeys(tokenizer_input_names))", "docstring": "Constructs a Bros processor which wraps a BERT tokenizer.\n\n[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of\n[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.\n\nArgs:\ntokenizer (`BertTokenizerFast`, *optional*):\nAn instance of ['BertTokenizerFast`]. The tokenizer is a required input.", "source": "github-repos"} {"code": "def _CreateUserIdentifier(identifier_type=None, value=None):\n if (identifier_type in _HASHED_IDENTIFIER_TYPES):\n value = hashlib.sha256(value.strip().lower()).hexdigest()\n user_identifier = {'userIdentifierType': identifier_type, 'value': value}\n return user_identifier", "docstring": "Creates a user identifier from the specified type and value.\n\nArgs:\nidentifier_type: a str specifying the type of user identifier.\nvalue: a str value of the identifier; to be hashed using SHA-256 if needed.\n\nReturns:\nA dict specifying a user identifier, with a value hashed using SHA-256 if\nneeded.", "source": "codesearchnet"} {"code": "def graph_argument(*arg_names, **options):\n if (not arg_names):\n arg_names = ['G']\n allow_None = options.pop('allow_None', False)\n if options:\n (key, _) = options.popitem()\n msg = \"graph_argument() for an unexpected keyword argument '{}'\".format(key)\n raise TypeError(msg)\n\n def _graph_arg(f):\n argspec = getargspec(f)\n\n def _enforce_single_arg(name, args, kwargs):\n try:\n G = kwargs[name]\n except KeyError:\n raise TypeError('Graph argument missing')\n if (hasattr(G, 'edges') and hasattr(G, 'nodes')):\n kwargs[name] = (list(G.nodes), list(G.edges))\n elif _is_integer(G):\n kwargs[name] = (list(range(G)), list(itertools.combinations(range(G), 2)))\n elif (isinstance(G, abc.Sequence) and (len(G) == 2)):\n if isinstance(G[0], integer_types):\n kwargs[name] = (list(range(G[0])), G[1])\n elif (allow_None and (G is None)):\n return G\n else:\n raise ValueError('Unexpected graph input form')\n return\n\n @wraps(f)\n def new_f(*args, **kwargs):\n bound_args = inspect.getcallargs(f, *args, **kwargs)\n final_args = list(bound_args.pop(argspec.varargs, ()))\n final_kwargs = bound_args.pop(argspec.keywords, {})\n final_kwargs.update(bound_args)\n for name in arg_names:\n _enforce_single_arg(name, final_args, final_kwargs)\n return f(*final_args, **final_kwargs)\n return new_f\n return _graph_arg", "docstring": "Decorator to coerce given graph arguments into a consistent form.\n\nThe wrapped function will accept either an integer n, interpreted as a\ncomplete graph of size n, or a nodes/edges pair, or a NetworkX graph. The\nargument will then be converted into a nodes/edges 2-tuple.\n\nArgs:\n*arg_names (optional, default='G'):\nThe names of the arguments for input graphs.\n\nallow_None (bool, optional, default=False):\nAllow None as an input graph in which case it is passed through as\nNone.", "source": "codesearchnet"} {"code": "def ReceiveMessagesRelationalFlows(self, client_id, messages):\n \n now = time.time()\n unprocessed_msgs = []\n message_handler_requests = []\n dropped_count = 0\n for session_id, msgs in iteritems(\n collection.Group(messages, operator.attrgetter(\"session_id\"))):\n\n \n leftover_msgs = self.HandleWellKnownFlows(msgs)\n\n for msg in leftover_msgs:\n if (msg.auth_state != msg.AuthorizationState.AUTHENTICATED and\n msg.session_id != self.unauth_allowed_session_id):\n dropped_count += 1\n continue\n\n if session_id in queue_manager.session_id_map:\n message_handler_requests.append(\n rdf_objects.MessageHandlerRequest(\n client_id=msg.source.Basename(),\n handler_name=queue_manager.session_id_map[session_id],\n request_id=msg.response_id,\n request=msg.payload))\n else:\n unprocessed_msgs.append(msg)\n\n if dropped_count:\n logging.info(\"Dropped %d unauthenticated messages for %s\", dropped_count,\n client_id)\n\n if unprocessed_msgs:\n flow_responses = []\n for message in unprocessed_msgs:\n flow_responses.append(\n rdf_flow_objects.FlowResponseForLegacyResponse(message))\n\n data_store.REL_DB.WriteFlowResponses(flow_responses)\n\n for msg in unprocessed_msgs:\n if msg.type == rdf_flows.GrrMessage.Type.STATUS:\n stat = rdf_flows.GrrStatus(msg.payload)\n if stat.status == rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED:\n \n crash_details = rdf_client.ClientCrash(\n client_id=client_id,\n session_id=msg.session_id,\n backtrace=stat.backtrace,\n crash_message=stat.error_message,\n nanny_status=stat.nanny_status,\n timestamp=rdfvalue.RDFDatetime.Now())\n events.Events.PublishEvent(\n \"ClientCrash\", crash_details, token=self.token)\n\n if message_handler_requests:\n data_store.REL_DB.WriteMessageHandlerRequests(message_handler_requests)\n\n logging.debug(\"Received %s messages from %s in %s sec\", len(messages),\n client_id,\n time.time() - now)", "docstring": "Receives and processes messages for flows stored in the relational db.\n\nArgs:\nclient_id: The client which sent the messages.\nmessages: A list of GrrMessage RDFValues.", "source": "juraj-google-style"} {"code": "def _funm_svd(a, func):\n (U, s, Vh) = la.svd(a, lapack_driver='gesvd')\n S = np.diag(func(s))\n return U.dot(S).dot(Vh)", "docstring": "Apply real scalar function to singular values of a matrix.\n\nArgs:\na (array_like): (N, N) Matrix at which to evaluate the function.\nfunc (callable): Callable object that evaluates a scalar function f.\n\nReturns:\nndarray: funm (N, N) Value of the matrix function specified by func\nevaluated at `A`.", "source": "codesearchnet"} {"code": "def find_field(item_list, cond, comparator, target_field):\n for item in item_list:\n if (comparator(item, cond) and (target_field in item)):\n return item[target_field]\n return None", "docstring": "Finds the value of a field in a dict object that satisfies certain\nconditions.\n\nArgs:\nitem_list: A list of dict objects.\ncond: A param that defines the condition.\ncomparator: A function that checks if an dict satisfies the condition.\ntarget_field: Name of the field whose value to be returned if an item\nsatisfies the condition.\n\nReturns:\nTarget value or None if no item satisfies the condition.", "source": "codesearchnet"} {"code": "def gaussian_square(duration: int, amp: complex, sigma: float, risefall: int, name: str=None) -> SamplePulse:\n center = (duration / 2)\n width = (duration - (2 * risefall))\n zeroed_width = (duration + 2)\n return _sampled_gaussian_square_pulse(duration, amp, center, width, sigma, zeroed_width=zeroed_width, name=name)", "docstring": "Generates gaussian square `SamplePulse`.\n\nCentered at `duration/2` and zeroed at `t=-1` and `t=duration+1` to prevent\nlarge initial/final discontinuities.\n\nApplies `left` sampling strategy to generate discrete pulse from continuous function.\n\nArgs:\nduration: Duration of pulse. Must be greater than zero.\namp: Pulse amplitude.\nsigma: Width (standard deviation) of gaussian rise/fall portion of the pulse.\nrisefall: Number of samples over which pulse rise and fall happen. Width of\nsquare portion of pulse will be `duration-2*risefall`.\nname: Name of pulse.", "source": "codesearchnet"} {"code": "def set_fig_title(self, title, **kwargs):\n prop_default = {'fontsize': 20}\n for (prop, default) in prop_default.items():\n kwargs[prop] = kwargs.get(prop, default)\n self.figure.fig_title = title\n self.figure.fig_title_kwargs = kwargs\n return", "docstring": "Set overall figure title.\n\nSet title for overall figure. This is not for a specific plot.\nIt will place the title at the top of the figure with a call to ``fig.suptitle``.\n\nArgs:\ntitle (str): Figure title.\n\nKeywork Arguments:\nx/y (float, optional): The x/y location of the text in figure coordinates.\nDefaults are 0.5 for x and 0.98 for y.\nhorizontalalignment/ha (str, optional): The horizontal alignment of\nthe text relative to (x, y). Optionas are 'center', 'left', or 'right'.\nDefault is 'center'.\nverticalalignment/va (str, optional): The vertical alignment of the text\nrelative to (x, y). Optionas are 'top', 'center', 'bottom',\nor 'baseline'. Default is 'top'.\nfontsize/size (int, optional): The font size of the text. Default is 20.", "source": "codesearchnet"} {"code": "def _line_and_col_to_offset(lines, line, col):\n offset = 0\n for (index, contents) in enumerate(lines, 1):\n if (index == line):\n return (offset + col)\n offset += len(contents)\n raise ValueError('Offset {}:{} not found'.format(line, col))", "docstring": "Figure out the offset into a file for a particular line and col.\n\nThis can return offsets that don't actually exist in the file. If you\nspecify a line that exists and a col that is past the end of that line, this\nwill return a \"fake\" offset. This is to account for the fact that a\nWorkItem's end_pos is one-past the end of a mutation, and hence potentially\none-past the end of a file.\n\nArgs:\nlines: A sequence of the lines in a file.\nline: A one-based index indicating the line in the file.\ncol: A zero-based index indicating the column on `line`.\n\nRaises: ValueError: If the specified line found in the file.", "source": "codesearchnet"} {"code": "def connect_direct(self, device, calibration=True):\n if (not isinstance(device, ScanResult)):\n if isinstance(device, str):\n device = ScanResult(device, fmt_addr_raw(device))\n elif isinstance(device, unicode):\n device = device.encode('ascii')\n device = ScanResult(device, fmt_addr_raw(device))\n else:\n logger.warn('Expected ScanResult, found type {} instead!'.format(type(device)))\n return (False, None)\n logger.debug('Connecting directly to device address'.format(device.addr))\n self._set_state(self._STATE_CONNECTING)\n self.api.ble_cmd_gap_connect_direct(device.raw_addr, 0, 6, 14, 100, 50)\n self._wait_for_state(self._STATE_CONNECTING, 5)\n if (self.state != self._STATE_CONNECTED):\n logger.warn('Connection failed!')\n self._set_state(self._STATE_GAP_END)\n self.api.ble_cmd_gap_end_procedure()\n self._wait_for_state(self._STATE_GAP_END)\n return (False, None)\n conn_handle = self.conn_handles[(- 1)]\n logger.info('Connection OK, handle is 0x{:02X}'.format(conn_handle))\n sk8 = SK8(self, conn_handle, device, calibration)\n self._add_device(sk8)\n sk8._discover_services()\n return (True, sk8)", "docstring": "Establish a connection to a single SK8.\n\nArgs:\ndevice: either a :class:`ScanResult` or a plain hardware address string\nin xx:xx:xx:xx:xx:xx format.\ncalibration (bool): True to attempt to load calibration data for this\ndevice after connection, False otherwise. See :meth:`SK8.load_calibration`.\n\nReturns:\ntuple (`result`, `device`), where `result` is a bool indicating if a\nconnection was created successfully. If `result` is True, `device` will\nbe set to a new :class:`SK8` instance. Otherwise it will be None.", "source": "codesearchnet"} {"code": "def list(self):\n response = self._swimlane.request('get', 'app')\n return [App(self._swimlane, item) for item in response.json()]", "docstring": "Retrieve list of all apps\n\nReturns:\n:class:`list` of :class:`~swimlane.core.resources.app.App`: List of all retrieved apps", "source": "codesearchnet"} {"code": "def _policy_loss(self, old_policy, policy, action, advantage, length):\n with tf.name_scope('policy_loss'):\n kl = tf.contrib.distributions.kl_divergence(old_policy, policy)\n kl = tf.check_numerics(kl, 'kl')\n kl = tf.reduce_mean(self._mask(kl, length), 1)\n policy_gradient = tf.exp((policy.log_prob(action) - old_policy.log_prob(action)))\n surrogate_loss = (- tf.reduce_mean(self._mask((policy_gradient * tf.stop_gradient(advantage)), length), 1))\n surrogate_loss = tf.check_numerics(surrogate_loss, 'surrogate_loss')\n kl_penalty = (self._penalty * kl)\n cutoff_threshold = (self._config.kl_target * self._config.kl_cutoff_factor)\n cutoff_count = tf.reduce_sum(tf.cast((kl > cutoff_threshold), tf.int32))\n with tf.control_dependencies([tf.cond((cutoff_count > 0), (lambda : tf.Print(0, [cutoff_count], 'kl cutoff! ')), int)]):\n kl_cutoff = ((self._config.kl_cutoff_coef * tf.cast((kl > cutoff_threshold), tf.float32)) * ((kl - cutoff_threshold) ** 2))\n policy_loss = ((surrogate_loss + kl_penalty) + kl_cutoff)\n entropy = tf.reduce_mean(policy.entropy(), axis=1)\n if self._config.entropy_regularization:\n policy_loss -= (self._config.entropy_regularization * entropy)\n summary = tf.summary.merge([tf.summary.histogram('entropy', entropy), tf.summary.histogram('kl', kl), tf.summary.histogram('surrogate_loss', surrogate_loss), tf.summary.histogram('kl_penalty', kl_penalty), tf.summary.histogram('kl_cutoff', kl_cutoff), tf.summary.histogram('kl_penalty_combined', (kl_penalty + kl_cutoff)), tf.summary.histogram('policy_loss', policy_loss), tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)), tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)), tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])\n policy_loss = tf.reduce_mean(policy_loss, 0)\n return (tf.check_numerics(policy_loss, 'policy_loss'), summary)", "docstring": "Compute the policy loss composed of multiple components.\n\n1. The policy gradient loss is importance sampled from the data-collecting\npolicy at the beginning of training.\n2. The second term is a KL penalty between the policy at the beginning of\ntraining and the current policy.\n3. Additionally, if this KL already changed more than twice the target\namount, we activate a strong penalty discouraging further divergence.\n\nArgs:\nold_policy: Action distribution of the behavioral policy.\npolicy: Sequences of distribution params of the current policy.\naction: Sequences of actions.\nadvantage: Sequences of advantages.\nlength: Batch of sequence lengths.\n\nReturns:\nTuple of loss tensor and summary tensor.", "source": "codesearchnet"} {"code": "def _parse_metadata(self, message):\n \n\n \n metadata = Metadata(source=self.actor_urn).__dict__\n metadata['thread_ts'] = message.get('thread_ts')\n if 'presence' in message:\n metadata['presence'] = message['presence']\n\n if 'text' in message:\n metadata['text'] = message['text']\n elif 'previous_message' in message:\n \n if 'text' in message['previous_message']:\n metadata['text'] = message['previous_message']['text']\n else:\n metadata['text'] = None\n else:\n metadata['text'] = None\n\n if 'user' in message:\n metadata['source_user'] = message['user']\n elif 'bot_id' in message:\n metadata['source_user'] = self.get_userid_from_botid(\n message['bot_id'])\n elif 'message' in message and 'user' in message['message']:\n metadata['source_user'] = message['message']['user']\n else:\n metadata['source_user'] = None\n\n metadata['user_id'] = metadata['source_user']\n metadata['display_name'] = self.get_username(metadata['source_user'])\n\n if 'channel' in message:\n metadata['source_channel'] = message['channel']\n \n if message['channel'].startswith('D'):\n metadata['is_private_message'] = True\n else:\n metadata['is_private_message'] = False\n\n metadata['source_connector'] = 'slack'\n\n return metadata", "docstring": "Parse incoming messages to build metadata dict\nLots of 'if' statements. It sucks, I know.\n\nArgs:\nmessage (dict): JSON dump of message sent from Slack\n\nReturns:\nLegobot.Metadata", "source": "juraj-google-style"} {"code": "def process_event(event):\n if (event.type == EventType.ON_CONVERSATION_TURN_STARTED):\n print()\n print(event)\n if ((event.type == EventType.ON_CONVERSATION_TURN_FINISHED) and event.args and (not event.args['with_follow_on_turn'])):\n print()\n if (event.type == EventType.ON_DEVICE_ACTION):\n for (command, params) in event.actions:\n print('Do command', command, 'with params', str(params))", "docstring": "Pretty prints events.\n\nPrints all events that occur with two spaces between each new\nconversation and a single space between turns of a conversation.\n\nArgs:\nevent(event.Event): The current event to process.", "source": "codesearchnet"} {"code": "def add_mapped_chain_ids(self, mapped_chains):\n mapped_chains = ssbio.utils.force_list(mapped_chains)\n for c in mapped_chains:\n if (c not in self.mapped_chains):\n self.mapped_chains.append(c)\n log.debug('{}: added to list of mapped chains'.format(c))\n else:\n log.debug('{}: chain already in list of mapped chains, not adding'.format(c))", "docstring": "Add chains by ID into the mapped_chains attribute\n\nArgs:\nmapped_chains (str, list): Chain ID or list of IDs", "source": "codesearchnet"} {"code": "def synthesize(self, duration, freqs_in_hz=[440.]):\n \n freqs = np.array(freqs_in_hz)\n scaling = 1 / len(freqs)\n sr = int(self.samplerate)\n cps = freqs / sr\n ts = (duration / Seconds(1)) * sr\n ranges = np.array([np.arange(0, ts * c, c) for c in cps])\n raw = (np.sin(ranges * (2 * np.pi)) * scaling).sum(axis=0)\n return AudioSamples(raw, self.samplerate)", "docstring": "Synthesize one or more sine waves\n\nArgs:\nduration (numpy.timdelta64): The duration of the sound to be\nsynthesized\nfreqs_in_hz (list of float): Numbers representing the frequencies\nin hz that should be synthesized", "source": "juraj-google-style"} {"code": "def variants_export_header(case_obj):\n \n header = []\n header = header + EXPORT_HEADER\n \n for individual in case_obj['individuals']:\n display_name = str(individual['display_name'])\n header.append('AD_reference_'+display_name) \n header.append('AD_alternate_'+display_name) \n header.append('GT_quality_'+display_name) \n return header", "docstring": "Returns a header for the CSV file with the filtered variants to be exported.\n\nArgs:\ncase_obj(scout.models.Case)\n\nReturns:\nheader: includes the fields defined in scout.constants.variants_export EXPORT_HEADER\n+ AD_reference, AD_alternate, GT_quality for each sample analysed for a case", "source": "juraj-google-style"} {"code": "def build_evaluation(variant_specific, variant_id, user_id, user_name,\n institute_id, case_id, classification, criteria):\n \n criteria = criteria or []\n evaluation_obj = dict(\n variant_specific = variant_specific,\n variant_id = variant_id,\n institute_id = institute_id,\n case_id = case_id,\n classification = classification,\n user_id = user_id,\n user_name = user_name,\n created_at = datetime.datetime.now(),\n )\n criteria_objs = []\n for info in criteria:\n criteria_obj = {}\n \n \n criteria_obj['term'] = info['term']\n if 'comment' in info:\n criteria_obj['comment'] = info['comment']\n if 'links' in info:\n criteria_obj['links'] = info['links']\n criteria_objs.append(criteria_obj)\n\n evaluation_obj['criteria'] = criteria_objs\n\n return evaluation_obj", "docstring": "Build a evaluation object ready to be inserted to database\n\nArgs:\nvariant_specific(str): md5 string for the specific variant\nvariant_id(str): md5 string for the common variant\nuser_id(str)\nuser_name(str)\ninstitute_id(str)\ncase_id(str)\nclassification(str): The ACMG classification\ncriteria(list(dict)): A list of dictionaries with ACMG criterias\n\nReturns:\nevaluation_obj(dict): Correctly formatted evaluation object", "source": "juraj-google-style"} {"code": "def _ParseMRUListExKey(\n self, parser_mediator, registry_key, codepage='cp1252'):\n \n try:\n mrulistex = self._ParseMRUListExValue(registry_key)\n except (ValueError, errors.ParseError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'unable to parse MRUListEx value with error: {0!s}'.format(exception))\n return\n\n if not mrulistex:\n return\n\n values_dict = {}\n found_terminator = False\n for entry_index, entry_number in enumerate(mrulistex):\n \n if entry_number == -1:\n break\n\n if found_terminator:\n parser_mediator.ProduceExtractionWarning((\n 'found additional MRUListEx entries after terminator in key: '\n '{0:s}.').format(registry_key.path))\n\n \n found_terminator = False\n\n value_string = self._ParseMRUListExEntryValue(\n parser_mediator, registry_key, entry_index, entry_number,\n codepage=codepage)\n\n value_text = 'Index: {0:d} [MRU Value {1:d}]'.format(\n entry_index + 1, entry_number)\n\n values_dict[value_text] = value_string\n\n event_data = windows_events.WindowsRegistryEventData()\n event_data.key_path = registry_key.path\n event_data.offset = registry_key.offset\n event_data.regvalue = values_dict\n event_data.source_append = self._SOURCE_APPEND\n\n event = time_events.DateTimeValuesEvent(\n registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Extract event objects from a MRUListEx Registry key.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\ncodepage (Optional[str]): extended ASCII string codepage.", "source": "juraj-google-style"} {"code": "def should_use_network(self, request):\n \n return (self.networking and\n all((fn(request) for fn in self.network_filters)))", "docstring": "Verifies if real networking mode should be used for the given\nrequest, passing it to the registered network filters.\n\nArguments:\nrequest (pook.Request): outgoing HTTP request to test.\n\nReturns:\nbool", "source": "juraj-google-style"} {"code": "def _flatten_obs(self, obs_dict, verbose=False):\n \n ob_lst = []\n for key in obs_dict:\n if key in self.keys:\n if verbose:\n print(\"adding key: {}\".format(key))\n ob_lst.append(obs_dict[key])\n return np.concatenate(ob_lst)", "docstring": "Filters keys of interest out and concatenate the information.\n\nArgs:\nobs_dict: ordered dictionary of observations", "source": "juraj-google-style"} {"code": "def _format_variants(self, variant, index, case_obj, add_all_info=False):\n \n header_line = self.head.header\n \n vcf_individuals = set([ind_id for ind_id in self.head.individuals])\n\n \n info_dict = dict(variant.INFO)\n\n chrom = variant.CHROM\n if chrom.startswith('chr') or chrom.startswith('CHR'):\n chrom = chrom[3:]\n\n variant_obj = Variant(\n CHROM=chrom,\n POS=variant.POS,\n ID=variant.ID,\n REF=variant.REF,\n ALT=variant.ALT[0],\n QUAL=variant.QUAL,\n FILTER=variant.FILTER,\n )\n variant_obj._set_variant_id()\n\n logger.debug(\"Creating a variant object of variant {0}\".format(\n variant_obj.variant_id))\n\n variant_obj.index = index\n logger.debug(\"Updating index to: {0}\".format(\n index))\n\n \n variant_obj.start = variant.start\n variant_obj.stop = variant.end\n\n \n \n if self.variant_type == 'sv':\n variant_obj.stop = int(info_dict.get('END', variant_obj.POS))\n self._add_sv_coordinates(variant_obj)\n variant_obj.sv_type = info_dict.get('SVTYPE')\n\n \n \n occurances = info_dict.get('OCC')\n if occurances:\n logger.debug(\"Updating occurances to: {0}\".format(\n occurances))\n variant_obj['occurances'] = float(occurances)\n variant_obj.add_frequency('OCC', occurances)\n\n else:\n self._add_thousand_g(variant_obj, info_dict)\n self._add_cadd_score(variant_obj, info_dict)\n self._add_genetic_models(variant_obj, info_dict)\n self._add_transcripts(variant_obj, info_dict)\n self._add_exac(variant_obj, info_dict)\n \n self._add_hgnc_symbols(variant_obj)\n\n if add_all_info:\n self._add_genotype_calls(variant_obj, str(variant), case_obj)\n self._add_compounds(variant_obj, info_dict)\n self._add_gmaf(variant_obj, info_dict)\n self._add_genes(variant_obj)\n\n\n \n self._add_consequences(variant_obj, str(variant))\n self._add_most_severe_consequence(variant_obj)\n self._add_impact_severity(variant_obj)\n self._add_rank_score(variant_obj, info_dict)\n variant_obj.set_max_freq()\n return variant_obj", "docstring": "Return a Variant object\n\nFormat variant make a variant that includes enough information for\nthe variant view.\nIf add_all_info then all transcripts will be parsed\n\nArgs:\nvariant (cython2.Variant): A variant object\nindex (int): The index of the variant\ncase_obj (puzzle.models.Case): A case object", "source": "juraj-google-style"} {"code": "def abort(cls, mapreduce_id, **kwargs):\n \n cls(key_name=\"%s:%s\" % (mapreduce_id, cls._KEY_NAME),\n command=cls.ABORT).put(**kwargs)", "docstring": "Causes a job to abort.\n\nArgs:\nmapreduce_id: The job to abort. Not verified as a valid job.", "source": "juraj-google-style"} {"code": "def needkwargs(*argnames):\n required = set(argnames)\n\n def decorator(func):\n\n def inner(*args, **kwargs):\n missing = (required - set(kwargs))\n if missing:\n err = ('%s kwargs are missing.' % list(missing))\n raise ValueError(err)\n return func(*args, **kwargs)\n return inner\n return decorator", "docstring": "Function decorator which checks that the decorated function is called\nwith a set of required kwargs.\n\nArgs:\n*argnames: String keyword argument names.\n\nRaises:\nValueError: If a required kwarg is missing in the decorated function\ncall.", "source": "codesearchnet"} {"code": "def path(self, value):\n if (not value.endswith('/')):\n self._path = '{v}/'.format(v=value)\n else:\n self._path = value", "docstring": "Setter for 'path' property\n\nArgs:\nvalue (str): Absolute path to scan", "source": "codesearchnet"} {"code": "def SetOption(self, section, option, value, overwrite=True):\n if ((not overwrite) and self.config.has_option(section, option)):\n return\n if (not self.config.has_section(section)):\n self.config.add_section(section)\n self.config.set(section, option, str(value))", "docstring": "Set the value of an option in the config file.\n\nArgs:\nsection: string, the section of the config file to check.\noption: string, the option to set the value of.\nvalue: string, the value to set the option.\noverwrite: bool, True to overwrite an existing value in the config file.", "source": "codesearchnet"} {"code": "def visit_arithmetic(self, arithmetic: _evaluation.ArithmeticNode) -> _sql_data_types.Select:\n lhs_result = self.visit(arithmetic.left)\n rhs_result = self.visit(arithmetic.right)\n sql_data_type = _sql_data_types.coerce(lhs_result.sql_data_type, rhs_result.sql_data_type)\n lhs_subquery = lhs_result.as_operand()\n rhs_subquery = rhs_result.as_operand()\n if sql_data_type == _sql_data_types.String:\n sql_value = f'CONCAT({lhs_subquery}, {rhs_subquery})'\n elif arithmetic.op == _ast.Arithmetic.Op.MODULO:\n sql_value = f'MOD({lhs_subquery}, {rhs_subquery})'\n elif arithmetic.op == _ast.Arithmetic.Op.TRUNCATED_DIVISION:\n sql_value = f'DIV({lhs_subquery}, {rhs_subquery})'\n else:\n sql_value = f'({lhs_subquery} {arithmetic.op} {rhs_subquery})'\n sql_alias = 'arith_'\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None)", "docstring": "Translates a FHIRPath arithmetic expression to Standard SQL.\n\nEach operand is expected to be a collection of a single element. Both\noperands must be of the same type, or of compatible types according to the\nrules of implicit conversion.\n\nArgs:\narithmetic: The `_Arithmetic` Expression node.\n\nReturns:\nA compiled Standard SQL expression.", "source": "github-repos"} {"code": "def _get_client_by_hostname(self, hostname):\n \n \n print('Searching for client: {0:s}'.format(hostname))\n try:\n search_result = self.grr_api.SearchClients(hostname)\n except grr_errors.UnknownError as exception:\n self.state.add_error('Could not search for host {0:s}: {1!s}'.format(\n hostname, exception\n ), critical=True)\n return None\n\n result = []\n for client in search_result:\n if hostname.lower() in client.data.os_info.fqdn.lower():\n result.append((client.data.last_seen_at, client))\n\n if not result:\n self.state.add_error(\n 'Could not get client_id for {0:s}'.format(hostname), critical=True)\n return None\n\n last_seen, client = sorted(result, key=lambda x: x[0], reverse=True)[0]\n \n last_seen_datetime = datetime.datetime.utcfromtimestamp(\n last_seen / 1000000)\n \n \n last_seen_seconds = (\n datetime.datetime.utcnow() - last_seen_datetime).total_seconds()\n last_seen_minutes = int(round(last_seen_seconds / 60))\n\n print('{0:s}: Found active client'.format(client.client_id))\n print('Found active client: {0:s}'.format(client.client_id))\n print('Client last seen: {0:s} ({1:d} minutes ago)'.format(\n last_seen_datetime.strftime('%Y-%m-%dT%H:%M:%S+0000'),\n last_seen_minutes))\n\n return client", "docstring": "Search GRR by hostname and get the latest active client.\n\nArgs:\nhostname: hostname to search for.\n\nReturns:\nGRR API Client object\n\nRaises:\nDFTimewolfError: if no client ID found for hostname.", "source": "juraj-google-style"} {"code": "def do_tortoisehg_report(repos, output):\n \n import operator\n import xml.etree.ElementTree as ET\n\n root = ET.Element('reporegistry')\n item = ET.SubElement(root, 'treeitem')\n\n group = ET.SubElement(item, 'group', attrib=Dict(name='groupname'))\n\n def fullname_to_shortname(fullname):\n \n shortname = fullname.replace(os.environ['HOME'], '~')\n shortname = shortname.lstrip('./')\n return shortname\n\n for repo in sorted(repos, key=operator.attrgetter('fpath')):\n fullname = os.path.join(\n os.path.dirname(repo.fpath),\n os.path.basename(repo.fpath))\n shortname = fullname_to_shortname(fullname)\n if repo.prefix != '.hg':\n shortname = \"%s%s\" % (shortname, repo.prefix)\n _ = ET.SubElement(group, 'repo',\n attrib=Dict(\n root=repo.fpath,\n shortname=shortname,\n basenode='0'*40))\n _\n print('', file=output)\n print(\"\" % \"TODO\", file=output)\n print(ET.dump(root), file=output)", "docstring": "Generate a thg-reporegistry.xml file from a list of repos and print\nto output\n\nArgs:\nrepos (iterable): iterable of Repository subclass instances\noutput (writeable): output stream to which THG XML will be printed", "source": "juraj-google-style"} {"code": "def set(self, option, value=None):\n option = self._container.optionxform(option)\n if (option in self.options()):\n self.__getitem__(option).value = value\n else:\n self.__setitem__(option, value)\n return self", "docstring": "Set an option for chaining.\n\nArgs:\noption (str): option name\nvalue (str): value, default None", "source": "codesearchnet"} {"code": "def get_output_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')", "docstring": "Retrieves the output shape(s) of a layer at a given node.\n\nArgs:\nnode_index: Integer, index of the node\nfrom which to retrieve the attribute.\nE.g. `node_index=0` will correspond to the\nfirst time the layer was called.\n\nReturns:\nA shape tuple\n(or list of shape tuples if the layer has multiple outputs).\n\nRaises:\nRuntimeError: If called in Eager mode.", "source": "github-repos"} {"code": "def tile_and_concat(image, latent, concat_latent=True):\n if (not concat_latent):\n return image\n image_shape = common_layers.shape_list(image)\n latent_shape = common_layers.shape_list(latent)\n (height, width) = (image_shape[1], image_shape[2])\n latent_dims = latent_shape[1]\n height_multiples = (height \n pad = (height - (height_multiples * latent_dims))\n latent = tf.reshape(latent, ((- 1), latent_dims, 1, 1))\n latent = tf.tile(latent, (1, height_multiples, width, 1))\n latent = tf.pad(latent, [[0, 0], [(pad \n return tf.concat([image, latent], axis=(- 1))", "docstring": "Tile latent and concatenate to image across depth.\n\nArgs:\nimage: 4-D Tensor, (batch_size X height X width X channels)\nlatent: 2-D Tensor, (batch_size X latent_dims)\nconcat_latent: If set to False, the image is returned as is.\n\nReturns:\nconcat_latent: 4-D Tensor, (batch_size X height X width X channels+1)\nlatent tiled and concatenated to the image across the channels.", "source": "codesearchnet"} {"code": "def contains_reference_without_get_key(node: AbstractSyntaxTree) -> bool:\n if isinstance(node, Identifier) and isinstance(node.data_type, _fhir_path_data_types.StructureDataType) and (node.data_type.element_type == 'Reference'):\n if not node.has_parent or not isinstance(node.parent, Invocation):\n return True\n if node.parent.lhs == node:\n parent_invocation = node.parent\n else:\n if not node.parent.has_parent or not isinstance(node.parent.parent, Invocation):\n return True\n parent_invocation = node.parent.parent\n if not isinstance(parent_invocation.rhs, Function) or parent_invocation.rhs.identifier.value != 'getReferenceKey':\n return True\n for child in node.children or ():\n if contains_reference_without_get_key(child):\n return True\n return False", "docstring": "Checks if the AST contains a reference without a getReferenceKey call.\n\nArgs:\nnode: The root node of the abstract syntax tree to search.\n\nReturns:\nTrue if the abstract syntax tree contains an identifier to a reference\nwithout a getReferenceKey call against it. False otherwise.", "source": "github-repos"} {"code": "def symmetric_difference(self, other):\n other = self._as_multiset(other)\n result = self.__class__()\n _total = 0\n _elements = result._elements\n self_elements = self._elements\n other_elements = other._elements\n dist_elements = (set(self_elements.keys()) | set(other_elements.keys()))\n for element in dist_elements:\n multiplicity = self_elements.get(element, 0)\n other_multiplicity = other_elements.get(element, 0)\n new_multiplicity = ((multiplicity - other_multiplicity) if (multiplicity > other_multiplicity) else (other_multiplicity - multiplicity))\n _total += new_multiplicity\n if (new_multiplicity > 0):\n _elements[element] = new_multiplicity\n result._total = _total\n return result", "docstring": "r\"\"\"Return a new set with elements in either the set or other but not both.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms.symmetric_difference('abc'))\n['a', 'c']\n\nYou can also use the ``^`` operator for the same effect. However, the operator version\nwill only accept a set as other operator, not any iterable, to avoid errors.\n\n>>> ms = Multiset('aab')\n>>> sorted(ms ^ Multiset('aaac'))\n['a', 'b', 'c']\n\nFor a variant of the operation which modifies the multiset in place see\n:meth:`symmetric_difference_update`.\n\nArgs:\nother: The other set to take the symmetric difference with. Can also be any :class:`~typing.Iterable`\\[~T]\nor :class:`~typing.Mapping`\\[~T, :class:`int`] which are then converted to :class:`Multiset`\\[~T].\n\nReturns:\nThe resulting symmetric difference multiset.", "source": "codesearchnet"} {"code": "def MakeSuiteFromList(t, name=''):\n hist = MakeHistFromList(t)\n d = hist.GetDict()\n return MakeSuiteFromDict(d)", "docstring": "Makes a suite from an unsorted sequence of values.\n\nArgs:\nt: sequence of numbers\nname: string name for this suite\n\nReturns:\nSuite object", "source": "codesearchnet"} {"code": "def _lease_valid(self, lease):\n \n if not lease.exist:\n return None\n\n if lease.has_env:\n return lease.uuid_path\n else:\n self._release(lease)\n return None", "docstring": "Check if the given lease exist and still has a prefix that owns it.\nIf the lease exist but its prefix isn't, remove the lease from this\nstore.\n\nArgs:\nlease (lago.subnet_lease.Lease): Object representation of the\nlease\n\nReturns:\nstr or None: If the lease and its prefix exists, return the path\nto the uuid of the prefix, else return None.", "source": "juraj-google-style"} {"code": "async def _get_or_fetch_conversation(self, conv_id):\n \n conv = self._conv_dict.get(conv_id, None)\n if conv is None:\n logger.info('Fetching unknown conversation %s', conv_id)\n res = await self._client.get_conversation(\n hangouts_pb2.GetConversationRequest(\n request_header=self._client.get_request_header(),\n conversation_spec=hangouts_pb2.ConversationSpec(\n conversation_id=hangouts_pb2.ConversationId(\n id=conv_id\n )\n ), include_event=False\n )\n )\n conv_state = res.conversation_state\n event_cont_token = None\n if conv_state.HasField('event_continuation_token'):\n event_cont_token = conv_state.event_continuation_token\n return self._add_conversation(conv_state.conversation,\n event_cont_token=event_cont_token)\n else:\n return conv", "docstring": "Get a cached conversation or fetch a missing conversation.\n\nArgs:\nconv_id: string, conversation identifier\n\nRaises:\nNetworkError: If the request to fetch the conversation fails.\n\nReturns:\n:class:`.Conversation` with matching ID.", "source": "juraj-google-style"} {"code": "def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:\n upper_bound1 = abs(up[0]) * abs(reg_scale)\n upper_bound2 = abs(up[0]) * abs(reg_scale) * 2\n step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))\n left_values = [-step ** i + 1 for i in range(max_num_bins \n right_values = [step ** i - 1 for i in range(1, max_num_bins \n values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]\n values = torch.cat(values, 0)\n return values", "docstring": "Generates the non-uniform Weighting Function W(n) for bounding box regression.\n\nArgs:\nmax_num_bins (int): Max number of the discrete bins.\nup (Tensor): Controls upper bounds of the sequence,\nwhere maximum offset is ±up * H / W.\nreg_scale (float): Controls the curvature of the Weighting Function.\nLarger values result in flatter weights near the central axis W(max_num_bins/2)=0\nand steeper weights at both ends.\nReturns:\nTensor: Sequence of Weighting Function.", "source": "github-repos"} {"code": "def delete_plan(self, plan_code):\n \n return self.client._delete(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())", "docstring": "Delete an entire subscription plan associated with the merchant.\n\nArgs:\nplan_code: Plan’s identification code for the merchant.\n\nReturns:", "source": "juraj-google-style"} {"code": "def closest_distance(item_a, time_a, item_b, time_b, max_value):\n \n return np.minimum(item_a.closest_distance(time_a, item_b, time_b), max_value) / float(max_value)", "docstring": "Euclidean distance between the pixels in item_a and item_b closest to each other.\n\nArgs:\nitem_a: STObject from the first set in ObjectMatcher\ntime_a: Time integer being evaluated\nitem_b: STObject from the second set in ObjectMatcher\ntime_b: Time integer being evaluated\nmax_value: Maximum distance value used as scaling value and upper constraint.\n\nReturns:\nDistance value between 0 and 1.", "source": "juraj-google-style"} {"code": "class XLNetPoolerAnswerClass(nn.Module):\n\n def __init__(self, config: XLNetConfig):\n super().__init__()\n self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n self.activation = nn.Tanh()\n self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)\n\n def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n \n hsz = hidden_states.shape[-1]\n assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n if start_positions is not None:\n start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n start_states = hidden_states.gather(-2, start_positions).squeeze(-2)\n if cls_index is not None:\n cls_index = cls_index[:, None, None].expand(-1, -1, hsz)\n cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)\n else:\n cls_token_state = hidden_states[:, -1, :]\n x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))\n x = self.activation(x)\n x = self.dense_1(x).squeeze(-1)\n return x", "docstring": "Compute SQuAD 2.0 answer class from classification and start tokens hidden states.\n\nArgs:\nconfig ([`XLNetConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"} {"code": "def log_correction(self, event, action):\n action = str(action)\n self.history.info(action)\n self._corrections.append(dict(event=event.as_dict(), action=action))", "docstring": "This method should be called once we have fixed the problem associated to this event.\nIt adds a new entry in the correction history of the node.\n\nArgs:\nevent: :class:`AbinitEvent` that triggered the correction.\naction (str): Human-readable string with info on the action perfomed to solve the problem.", "source": "codesearchnet"} {"code": "def _create_and_save_file_init_hash_table_model_tf1(self, output_path: str, tags: Collection[str], signature_def_key: str) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:\n with session.Session(graph=ops.Graph()) as sess:\n input_vocabs_placeholder, lookup_tensor, output_tensor = self._create_table_init_from_file_model_tf1(sess)\n inputs = {'input_vocabs': input_vocabs_placeholder}\n outputs = {'lookup': lookup_tensor, 'output': output_tensor}\n self._save_tf1_model(sess, output_path, signature_def_key, tags, inputs=inputs, outputs=outputs, init_op=lookup_ops.tables_initializer(), assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))\n return (inputs, outputs)", "docstring": "Creates and saves a model that uses a file-initialized table.\n\nThe asset file \"vocab_file.txt\" is used to initialize a hash table.\n\nArgs:\noutput_path: Path to the directory to save the created model.\ntags: Set of strings that identifies the saved meta graph.\nsignature_def_key: Name of the SignatureDef. Used to identify the\nSignatureDef within the meta graph.\n\nReturns:\ninputs: A mapping of input_key -> input_tensor (placeholder). The input\nkey is \"input_vocabs\".\noutputs: A mapping of output_key -> output_tensor. The output keys are\n\"lookup\" and \"output\".", "source": "github-repos"} {"code": "def generate(self, id_or_uri):\n uri = (self._client.build_uri(id_or_uri) + '/generate')\n return self._client.get(uri)", "docstring": "Generates and returns a random range.\n\nArgs:\nid_or_uri:\nID or URI of range.\n\nReturns:\ndict: A dict containing a list with IDs.", "source": "codesearchnet"} {"code": "def _get_definitions(source):\n max_len = 0\n descs = collections.OrderedDict()\n lines = (s.strip() for s in source.splitlines())\n non_empty_lines = (s for s in lines if s)\n for line in non_empty_lines:\n if line:\n (arg, desc) = re.split('\\\\s\\\\s+', line.strip())\n arg_len = len(arg)\n if (arg_len > max_len):\n max_len = arg_len\n descs[arg] = desc\n return (descs, max_len)", "docstring": "Extract a dictionary of arguments and definitions.\n\nArgs:\nsource: The source for a section of a usage string that contains\ndefinitions.\n\nReturns:\nA two-tuple containing a dictionary of all arguments and definitions as\nwell as the length of the longest argument.", "source": "codesearchnet"} {"code": "def has_same_sumformula(self, other):\n \n same_atoms = True\n for atom in set(self['atom']):\n own_atom_number = len(self[self['atom'] == atom])\n other_atom_number = len(other[other['atom'] == atom])\n same_atoms = (own_atom_number == other_atom_number)\n if not same_atoms:\n break\n return same_atoms", "docstring": "Determines if ``other`` has the same sumformula\n\nArgs:\nother (molecule):\n\nReturns:\nbool:", "source": "juraj-google-style"} {"code": "def unzip(x, split_dim, current_length, num_splits=2, name=None):\n \n with tf.name_scope(name, 'unzip', [x]) as scope:\n x = tf.convert_to_tensor(x, name='x')\n \n all_splits = tf.split(\n value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)\n splits = [[] for _ in xrange(num_splits)]\n for i in xrange(current_length):\n splits[i % num_splits].append(all_splits[i])\n return [tf.concat(s, split_dim) for s in splits]", "docstring": "Splits a tensor by unzipping along the split_dim.\n\nFor example the following array split into 2 would be:\n[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]\nand by 3:\n[1, 2, 3, 4] -> [1, 4], [2], [3]\n\nArgs:\nx: The tensor to split.\nsplit_dim: The dimension to split along.\ncurrent_length: Current length along the split_dim.\nnum_splits: The number of splits.\nname: Optional name for this op.\nReturns:\nA length num_splits sequence.", "source": "juraj-google-style"} {"code": "def set_size(self, w, h):\n \n self.attributes['width'] = str(w)\n self.attributes['height'] = str(h)", "docstring": "Sets the rectangle size.\n\nArgs:\nw (int): width of the rectangle\nh (int): height of the rectangle", "source": "juraj-google-style"} {"code": "def map_exp_ids(self, exp):\n \n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]", "docstring": "Maps ids to feature names.\n\nArgs:\nexp: list of tuples [(id, weight), (id,weight)]\n\nReturns:\nlist of tuples (feature_name, weight)", "source": "juraj-google-style"} {"code": "def download_url(self, url, **kwargs):\n \n if self.baseurl and ':\n url = join(self.baseurl, url)\n return self.resolver.download_to_directory(self.directory, url, **kwargs)", "docstring": "Download a URL to the workspace.\n\nArgs:\nurl (string): URL to download to directory\n**kwargs : See :py:mod:`ocrd.resolver.Resolver`\n\nReturns:\nThe local filename of the downloaded file", "source": "juraj-google-style"} {"code": "def _file_size(self, field):\n \n size = 0\n try:\n handle = open(self._files[field], \"r\")\n size = os.fstat(handle.fileno()).st_size\n handle.close()\n except:\n size = 0\n self._file_lengths[field] = size\n return self._file_lengths[field]", "docstring": "Returns the file size for given file field.\n\nArgs:\nfield (str): File field\n\nReturns:\nint. File size", "source": "juraj-google-style"} {"code": "def from_row_and_group(row: int, group: int):\n \n for sym in _pt_data.keys():\n el = Element(sym)\n if el.row == row and el.group == group:\n return el\n raise ValueError(\"No element with this row and group!\")", "docstring": "Returns an element from a row and group number.\n\nArgs:\nrow (int): Row number\ngroup (int): Group number\n\n.. note::\nThe 18 group number system is used, i.e., Noble gases are group 18.", "source": "juraj-google-style"} {"code": "def fit_to_structure(self, structure, symprec=0.1):\n sga = SpacegroupAnalyzer(structure, symprec)\n symm_ops = sga.get_symmetry_operations(cartesian=True)\n return (sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops))", "docstring": "Returns a tensor that is invariant with respect to symmetry\noperations corresponding to a structure\n\nArgs:\nstructure (Structure): structure from which to generate\nsymmetry operations\nsymprec (float): symmetry tolerance for the Spacegroup Analyzer\nused to generate the symmetry operations", "source": "codesearchnet"} {"code": "def hgnc_id(self, hgnc_symbol, build='37'):\n \n \n query = {'hgnc_symbol':hgnc_symbol, 'build':build}\n projection = {'hgnc_id':1, '_id':0}\n res = self.hgnc_collection.find(query, projection)\n\n if res.count() > 0:\n return res[0]['hgnc_id']\n else:\n return None", "docstring": "Query the genes with a hgnc symbol and return the hgnc id\n\nArgs:\nhgnc_symbol(str)\nbuild(str)\n\nReturns:\nhgnc_id(int)", "source": "juraj-google-style"} {"code": "def _has_old_request_ended(self, shard_state):\n assert (shard_state.slice_start_time is not None)\n assert (shard_state.slice_request_id is not None)\n request_ids = [shard_state.slice_request_id]\n logs = None\n try:\n logs = list(logservice.fetch(request_ids=request_ids))\n except (apiproxy_errors.FeatureNotEnabledError, apiproxy_errors.CapabilityDisabledError) as e:\n logging.warning('Ignoring exception: %s', e)\n if ((not logs) or (not logs[0].finished)):\n return False\n return True", "docstring": "Whether previous slice retry has ended according to Logs API.\n\nArgs:\nshard_state: shard state.\n\nReturns:\nTrue if the request of previous slice retry has ended. False if it has\nnot or unknown.", "source": "codesearchnet"} {"code": "def __init__(self,\n tablename: str = None,\n table: Table = None,\n metadata: MetaData = None) -> None:\n \n assert table is not None or tablename, \"No table information provided\"\n assert not (tablename and table is not None), (\n \"Specify either table or tablename, not both\")\n self._table = table\n self._tablename = tablename\n self._metadata = metadata", "docstring": "Initialize with either ``tablename`` or ``table``, not both.\n\nArgs:\ntablename: string name of the table\ntable: SQLAlchemy :class:`Table` object\nmetadata: optional :class:`MetaData` object", "source": "juraj-google-style"} {"code": "def StopTiming(self, profile_name):\n \n measurements = self._profile_measurements.get(profile_name)\n if measurements:\n measurements.SampleStop()\n\n sample = '{0:f}\\t{1:s}\\t{2:f}\\n'.format(\n measurements.start_sample_time, profile_name,\n measurements.total_cpu_time)\n self._WritesString(sample)", "docstring": "Stops timing CPU time.\n\nArgs:\nprofile_name (str): name of the profile to sample.", "source": "juraj-google-style"} {"code": "class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):\n\n def __init__(self, max_length: int, eos_token_id: int):\n self.max_length = max_length\n self.eos_token_id = eos_token_id\n\n def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n new_scores = jnp.full(scores.shape, -float('inf'))\n apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)\n scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)\n return scores", "docstring": "[`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.\n\nArgs:\nmax_length (`int`):\nThe maximum length of the sequence to be generated.\neos_token_id (`int`):\nThe id of the token to force as the last generated token when `max_length` is reached.", "source": "github-repos"} {"code": "def package_releases(self, project_name):\n \n try:\n return self._connection.package_releases(project_name)\n except Exception as err:\n raise PyPIClientError(err)", "docstring": "Retrieve the versions from PyPI by ``project_name``.\n\nArgs:\nproject_name (str): The name of the project we wish to retrieve\nthe versions of.\n\nReturns:\nlist: Of string versions.", "source": "juraj-google-style"} {"code": "async def leave_conversation(self, conv_id):\n logger.info('Leaving conversation: {}'.format(conv_id))\n (await self._conv_dict[conv_id].leave())\n del self._conv_dict[conv_id]", "docstring": "Leave a conversation.\n\nArgs:\nconv_id (str): ID of conversation to leave.", "source": "codesearchnet"} {"code": "def reduce_to_2d(arr):\n if (not isinstance(arr, np.ndarray)):\n raise ValueError('reduce_to_2d requires a numpy.ndarray')\n ndims = len(arr.shape)\n if (ndims < 2):\n raise ValueError('reduce_to_2d requires an array of dimensionality >=2')\n slices = (([0] * (ndims - 2)) + [slice(None), slice(None)])\n return arr[slices]", "docstring": "Given a np.npdarray with nDims > 2, reduce it to 2d.\n\nIt does this by selecting the zeroth coordinate for every dimension greater\nthan two.\n\nArgs:\narr: a numpy ndarray of dimension at least 2.\n\nReturns:\nA two-dimensional subarray from the input array.\n\nRaises:\nValueError: If the argument is not a numpy ndarray, or the dimensionality\nis too low.", "source": "codesearchnet"} {"code": "def _MakeCacheInvariant(self, urn, age):\n \n precondition.AssertType(urn, Text)\n return \"%s:%s\" % (urn, self.ParseAgeSpecification(age))", "docstring": "Returns an invariant key for an AFF4 object.\n\nThe object will be cached based on this key. This function is specifically\nextracted to ensure that we encapsulate all security critical aspects of the\nAFF4 object so that objects do not leak across security boundaries.\n\nArgs:\nurn: The urn of the object.\nage: The age policy used to build this object. Should be one of\nALL_TIMES, NEWEST_TIME or a range.\n\nReturns:\nA key into the cache.", "source": "juraj-google-style"} {"code": "def config(self, name=\"skype\"):\n \n self.conn(\"PUT\", \"{0}/users/ME/endpoints/{1}/presenceDocs/messagingService\"\n .format(self.conn.msgsHost, self.id),\n auth=SkypeConnection.Auth.RegToken,\n json={\"id\": \"messagingService\",\n \"type\": \"EndpointPresenceDoc\",\n \"selfLink\": \"uri\",\n \"privateInfo\": {\"epname\": name},\n \"publicInfo\": {\"capabilities\": \"\",\n \"type\": 1,\n \"skypeNameVersion\": \"skype.com\",\n \"nodeInfo\": \"xx\",\n \"version\": \"908/1.30.0.128\"}})", "docstring": "Configure this endpoint to allow setting presence.\n\nArgs:\nname (str): display name for this endpoint", "source": "juraj-google-style"} {"code": "def _process_query(self, query, prepared=False):\n \n\n \n if prepared is True:\n files = {'query': str(query)}\n\n logger.debug('About to submit the following query {}'.format(query))\n\n res, status = self.post(\n self.disambiguate_service,\n files=files,\n headers={'Accept': 'application/json'},\n )\n\n if status == 200:\n return self.decode(res), status\n else:\n logger.debug('Disambiguation failed.')\n return None, status\n\n text = query['text']\n\n sentence_coordinates = [\n {\n \"offsetStart\": 0,\n \"offsetEnd\": len(text)\n }\n ]\n\n total_nb_sentences = len(sentence_coordinates) \n sentences_groups = []\n\n if len(text) > self.max_text_length:\n res, status_code = self.segment(text)\n\n if status_code == 200:\n sentence_coordinates = res['sentences']\n total_nb_sentences = len(sentence_coordinates)\n else:\n logger.error('Error during the segmentation of the text.')\n\n logger.debug(\n 'Text too long, split in {} sentences; building groups of {} '\n 'sentences.'.format(\n total_nb_sentences, self.sentences_per_group\n )\n )\n sentences_groups = self._group_sentences(\n total_nb_sentences,\n self.sentences_per_group\n )\n else:\n query['sentence'] = \"true\"\n\n if total_nb_sentences > 1:\n query['sentences'] = sentence_coordinates\n\n if len(sentences_groups) > 0:\n for group in sentences_groups:\n query['processSentence'] = group\n\n res, status_code = self._process_query(query, prepared=True)\n\n if status_code == 200:\n if 'entities' in res:\n query['entities'] = res[u'entities']\n query['language'] = res[u'language']\n else:\n logger.error(\n \"Error when processing the query {}\".format(query)\n )\n return None, status_code\n\n else:\n res, status_code = self._process_query(query, prepared=True)\n\n if status_code == 200:\n query['language'] = res[u'language']\n if 'entities' in res:\n query['entities'] = res[u'entities']\n else:\n logger.error(\"Error when processing the query {}\".format(query))\n return None, status_code\n\n return query, status_code", "docstring": "Process query recursively, if the text is too long,\nit is split and processed bit a bit.\n\nArgs:\nquery (sdict): Text to be processed.\nprepared (bool): True when the query is ready to be submitted via\nPOST request.\nReturns:\nstr: Body ready to be submitted to the API.", "source": "juraj-google-style"} {"code": "def Execute(cmd, args, time_limit=(- 1), bypass_whitelist=False, daemon=False, use_client_context=False, cwd=None):\n if ((not bypass_whitelist) and (not IsExecutionWhitelisted(cmd, args))):\n logging.info('Execution disallowed by whitelist: %s %s.', cmd, ' '.join(args))\n return (b'', b'Execution disallowed by whitelist.', (- 1), (- 1))\n if daemon:\n pid = os.fork()\n if (pid == 0):\n try:\n os.setsid()\n except OSError:\n pass\n _Execute(cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)\n os._exit(0)\n else:\n return _Execute(cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)", "docstring": "Executes commands on the client.\n\nThis function is the only place where commands will be executed\nby the GRR client. This makes sure that all issued commands are compared to a\nwhite list and no malicious commands are issued on the client machine.\n\nArgs:\ncmd: The command to be executed.\nargs: List of arguments.\ntime_limit: Time in seconds the process is allowed to run.\nbypass_whitelist: Allow execution of things that are not in the whitelist.\nNote that this should only ever be called on a binary that passes the\nVerifySignedBlob check.\ndaemon: Start the new process in the background.\nuse_client_context: Run this script in the client's context. Defaults to\nsystem context.\ncwd: Current working directory for the command.\n\nReturns:\nA tuple of stdout, stderr, return value and time taken.", "source": "codesearchnet"} {"code": "def _parse_volumes(volume_values: dict) -> str:\n for v_values in volume_values:\n for (v_key, v_value) in v_values.items():\n if (v_key == 'source'):\n if (v_value == '.'):\n source = os.path.dirname(os.path.abspath(__file__))\n else:\n source = v_value\n if (v_key == 'target'):\n target = v_value\n volume_spec = [((source + ':') + target)]\n return volume_spec", "docstring": "Parse volumes key.\n\nArgs:\nvolume_values (dict): volume configuration values\n\nReturns:\nstring, volume specification with mount source and container path", "source": "codesearchnet"} {"code": "def set_tag(self, key, value, update_session=True):\n existing_tags = {x.key: x for x in self.tags}\n if (key in existing_tags):\n tag = existing_tags[key]\n if (tag.value == value):\n return False\n tag.value = value\n else:\n tag = Tag()\n tag.resource_id = self.id\n tag.key = key\n tag.value = value\n self.tags.append(tag)\n if update_session:\n db.session.add(tag)\n return True", "docstring": "Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or\n`False` if there were no changes to be made.\n\nArgs:\nkey (str): Key of the tag\nvalue (str): Value of the tag\nupdate_session (bool): Automatically add the change to the SQLAlchemy session. Default: True\n\nReturns:\n`bool`", "source": "codesearchnet"} {"code": "def WriteToPath(obj, filepath):\n with io.open(filepath, mode='w', encoding='utf-8') as filedesc:\n WriteToFile(obj, filedesc)", "docstring": "Serializes and writes given Python object to the specified YAML file.\n\nArgs:\nobj: A Python object to serialize.\nfilepath: A path to the file into which the object is to be written.", "source": "codesearchnet"} {"code": "def run_cgmlst(blast_runner, full=False):\n \n from sistr.src.serovar_prediction.constants import genomes_to_serovar\n\n df_cgmlst_profiles = ref_cgmlst_profiles()\n\n logging.debug('{} distinct cgMLST330 profiles'.format(df_cgmlst_profiles.shape[0]))\n\n logging.info('Running BLAST on serovar predictive cgMLST330 alleles')\n cgmlst_fasta_path = CGMLST_CENTROID_FASTA_PATH if not full else CGMLST_FULL_FASTA_PATH\n blast_outfile = blast_runner.blast_against_query(cgmlst_fasta_path)\n logging.info('Reading BLAST output file \"{}\"'.format(blast_outfile))\n blast_reader = BlastReader(blast_outfile)\n if blast_reader.df is None:\n logging.error('No cgMLST330 alleles found!')\n return ({'distance': 1.0,\n 'genome_match': None,\n 'serovar': None,\n 'matching_alleles': 0,\n 'subspecies': None,\n 'cgmlst330_ST': None,},\n {}, )\n logging.info('Found {} cgMLST330 allele BLAST results'.format(blast_reader.df.shape[0]))\n\n\n df_cgmlst_blastn = process_cgmlst_results(blast_reader.df)\n\n marker_match_results = matches_to_marker_results(df_cgmlst_blastn[df_cgmlst_blastn.is_match])\n contig_blastn_records = alleles_to_retrieve(df_cgmlst_blastn)\n retrieved_marker_alleles = get_allele_sequences(blast_runner.fasta_path,\n contig_blastn_records,\n full=full)\n logging.info('Type retrieved_marker_alleles %s', type(retrieved_marker_alleles))\n all_marker_results = marker_match_results.copy()\n for marker, res in retrieved_marker_alleles.items():\n all_marker_results[marker] = res\n for marker in df_cgmlst_profiles.columns:\n if marker not in all_marker_results:\n all_marker_results[marker] = {'blast_result': None,\n 'name': None,\n 'seq': None,}\n cgmlst_results = {}\n for marker, res in all_marker_results.items():\n try:\n cgmlst_results[marker] = int(res['name'])\n except:\n logging.error('Missing cgmlst_results for %s', marker)\n logging.debug(res)\n\n logging.info('Calculating number of matching alleles to serovar predictive cgMLST330 profiles')\n df_relatives = find_closest_related_genome(cgmlst_results, df_cgmlst_profiles)\n genome_serovar_dict = genomes_to_serovar()\n df_relatives['serovar'] = [genome_serovar_dict[genome] for genome in df_relatives.index]\n logging.debug('Top 5 serovar predictive cgMLST profiles:\\n{}'.format(df_relatives.head()))\n spp = None\n subspeciation_tuple = cgmlst_subspecies_call(df_relatives)\n if subspeciation_tuple is not None:\n spp, distance, spp_counter = subspeciation_tuple\n logging.info('Top subspecies by cgMLST is \"{}\" (min dist={}, Counter={})'.format(spp, distance, spp_counter))\n else:\n logging.warning('Subspeciation by cgMLST was not possible!')\n\n cgmlst_serovar = None\n cgmlst_matching_genome = None\n cgmlst_matching_alleles = 0\n cgmlst_distance = 1.0\n for idx, row in df_relatives.iterrows():\n cgmlst_distance = row['distance']\n cgmlst_matching_alleles = row['matching']\n cgmlst_serovar = row['serovar'] if cgmlst_distance <= 1.0 else None\n cgmlst_matching_genome = idx if cgmlst_distance <= 1.0 else None\n logging.info('Top serovar by cgMLST profile matching: \"{}\" with {} matching alleles, distance={:.1%}'.format(\n cgmlst_serovar,\n cgmlst_matching_alleles,\n cgmlst_distance\n ))\n break\n\n cgmlst_st = None\n cgmlst_markers_sorted = sorted(all_marker_results.keys())\n cgmlst_allele_names = []\n marker = None\n for marker in cgmlst_markers_sorted:\n try:\n aname = all_marker_results[marker]['name']\n if aname:\n cgmlst_allele_names.append(str(aname))\n else:\n break\n except:\n break\n if len(cgmlst_allele_names) == len(cgmlst_markers_sorted):\n cgmlst_st = allele_name('-'.join(cgmlst_allele_names))\n logging.info('cgMLST330 Sequence Type=%s', cgmlst_st)\n else:\n logging.warning('Could not compute cgMLST330 Sequence Type due to missing data (marker %s)', marker)\n return ({'distance': cgmlst_distance,\n 'genome_match': cgmlst_matching_genome,\n 'serovar': cgmlst_serovar,\n 'matching_alleles': cgmlst_matching_alleles,\n 'subspecies': spp,\n 'cgmlst330_ST': cgmlst_st,},\n all_marker_results, )", "docstring": "Perform in silico cgMLST on an input genome\n\nArgs:\nblast_runner (sistr.src.blast_wrapper.BlastRunner): blastn runner object with genome fasta initialized\n\nReturns:\ndict: cgMLST ref genome match, distance to closest ref genome, subspecies and serovar predictions\ndict: marker allele match results (seq, allele name, blastn results)", "source": "juraj-google-style"} {"code": "def save_model(self, fname, pretty=False):\n \n with open(fname, \"w\") as f:\n xml_str = ET.tostring(self.root, encoding=\"unicode\")\n if pretty:\n \n parsed_xml = xml.dom.minidom.parseString(xml_str)\n xml_str = parsed_xml.toprettyxml(newl=\"\")\n f.write(xml_str)", "docstring": "Saves the xml to file.\n\nArgs:\nfname: output file location\npretty: attempts!! to pretty print the output", "source": "juraj-google-style"} {"code": "def get_restore_path(filename):\n \n path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path, filename + '.pickle')", "docstring": "get_restore_path: returns path to directory for restoration points\nArgs:\nfilename (str): Name of file to store\nReturns: string path to file", "source": "juraj-google-style"} {"code": "def assign_sub(self, variable, value):\n variable.assign_sub(value)", "docstring": "Subtract a value from a variable.\n\nThis should be used in optimizers instead of\n`variable.assign_sub(value)` to support backend specific optimizations.\nNote that the variable can be a model variable or an optimizer variable;\nit can be a backend native variable or a Keras variable.\n\nArgs:\nvariable: The variable to update.\nvalue: The value to add to the variable.", "source": "github-repos"} {"code": "def _time_to_datetime(value):\n if (not isinstance(value, datetime.time)):\n raise TypeError(('Cannot convert to datetime expected time value; received %s' % value))\n return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond)", "docstring": "Convert a time to a datetime for Cloud Datastore storage.\n\nArgs:\nvalue: A datetime.time object.\n\nReturns:\nA datetime object with date set to 1970-01-01.", "source": "codesearchnet"} {"code": "def _write_custom_summaries(self, step, logs=None):\n logs = logs or {}\n if context.executing_eagerly():\n with self.writer.as_default(), summary_ops_v2.record_if(True):\n for name, value in logs.items():\n if isinstance(value, np.ndarray):\n value = value.item()\n summary_ops_v2.scalar(name, value, step=step)\n else:\n for name, value in logs.items():\n if isinstance(value, np.ndarray):\n value = value.item()\n summary = tf_summary.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = value\n summary_value.tag = name\n self.writer.add_summary(summary, step)\n self.writer.flush()", "docstring": "Writes metrics out as custom scalar summaries.\n\nArgs:\nstep: the global step to use for TensorBoard.\nlogs: dict. Keys are scalar summary names, values are\nNumPy scalars.", "source": "github-repos"} {"code": "def __init__(self, primitive_handler_: primitive_handler.PrimitiveHandler, generator: _JsonTextGenerator, json_format: _FhirJsonFormat) -> None:\n self.primitive_handler = primitive_handler_\n self.generator = generator\n self.json_format = json_format", "docstring": "Creates a new instance of JsonPrinter.\n\nNote that this is for *internal-use* only. External clients should leverage\none of the available class constructors such as:\n`JsonPrinter.pretty_printer(...)`, `JsonPrinter.compact_printer()`, etc.\n\nArgs:\nprimitive_handler_: Responsible for returning PrimitiveWrappers.\ngenerator: The type of _JsonTextGenerator used to handle whitespace and\nnewline additions.\njson_format: The style of FHIR JSON to output.", "source": "github-repos"} {"code": "def ground_temperature_depth(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `ground_temperature_depth`'.format(value))\n\n self._ground_temperature_depth = value", "docstring": "Corresponds to IDD Field `ground_temperature_depth`\n\nArgs:\nvalue (float): value for IDD Field `ground_temperature_depth`\nUnit: m\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def get_shell_code(self, shell=None, parent_environ=None, style=OutputStyle.file):\n executor = self._create_executor(interpreter=create_shell(shell), parent_environ=parent_environ)\n if (self.load_path and os.path.isfile(self.load_path)):\n executor.env.REZ_RXT_FILE = self.load_path\n self._execute(executor)\n return executor.get_output(style)", "docstring": "Get the shell code resulting from intepreting this context.\n\nArgs:\nshell (str): Shell type, for eg 'bash'. If None, the current shell\ntype is used.\nparent_environ (dict): Environment to interpret the context within,\ndefaults to os.environ if None.\nstyle (): Style to format shell code in.", "source": "codesearchnet"} {"code": "def remove_segments(self, segments_to_remove):\n v_ind = self.vertex_indices_in_segments(segments_to_remove)\n self.segm = {name: faces for (name, faces) in self.segm.iteritems() if (name not in segments_to_remove)}\n self.remove_vertices(v_ind)", "docstring": "Remove the faces and vertices for given segments, keeping all others.\n\nArgs:\nsegments_to_remove: a list of segnments whose vertices will be removed", "source": "codesearchnet"} {"code": "def minimum(self):\n return min([(x, energy) for (_, x, energy, _, _) in self.get_kinks()], key=(lambda i: i[1]))", "docstring": "Finds the minimum reaction energy E_min and corresponding\nmixing ratio x_min.\n\nReturns:\nTuple (x_min, E_min).", "source": "codesearchnet"} {"code": "class Identity(Initializer):\n\n def __init__(self, gain=1.0):\n self.gain = gain\n\n def __call__(self, shape, dtype=None, **kwargs):\n \n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if len(shape) != 2:\n raise ValueError('Identity matrix initializer can only be used for 2D matrices.')\n initializer = linalg_ops.eye(*shape, dtype=dtype)\n return self.gain * initializer\n\n def get_config(self):\n return {'gain': self.gain}", "docstring": "Initializer that generates the identity matrix.\n\nAlso available via the shortcut function `tf.keras.initializers.identity`.\n\nOnly usable for generating 2D matrices.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.Identity()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.Identity()\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\nArgs:\ngain: Multiplicative factor to apply to the identity matrix.", "source": "github-repos"} {"code": "def connect(self, **kwargs):\n self.app = self._app.connect(**kwargs)\n try:\n self._top_window = self.app.top_window().wrapper_object()\n self.set_foreground()\n except RuntimeError:\n self._top_window = None", "docstring": "Connect to window and set it foreground\n\nArgs:\n**kwargs: optional arguments\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def fromFile(cls, filename):\n\t\t\n\n\t\t\n\t\toFile = open(filename)\n\n\t\t\n\t\tdDetails = JSON.decodef(oFile)\n\n\t\t\n\t\treturn cls(dDetails)", "docstring": "From File\n\nLoads a JSON file and creates a Node instance from it\n\nArgs:\nfilename (str): The filename to load\n\nReturns:\n_NodeInterface", "source": "juraj-google-style"} {"code": "def parse(file_contents, file_name):\n try:\n yaml.load(file_contents)\n except Exception:\n (_, exc_value, _) = sys.exc_info()\n return 'Cannot Parse: {file_name}: \\n {exc_value}'.format(file_name=file_name, exc_value=exc_value)", "docstring": "This takes a list of filenames and their paths of expected yaml files and\ntried to parse them, erroring if there are any parsing issues.\n\nArgs:\nfile_contents (str): Contents of a yml file\n\nRaises:\nyaml.parser.ParserError: Raises an error if the file contents cannot be\nparsed and interpreted as yaml", "source": "codesearchnet"} {"code": "def show_in_external_file_explorer(fnames=None):\n \n if not isinstance(fnames, (tuple, list)):\n fnames = [fnames]\n for fname in fnames:\n open_file_in_external_explorer(fname)", "docstring": "Show files in external file explorer\n\nArgs:\nfnames (list): Names of files to show.", "source": "juraj-google-style"} {"code": "def solid_angle(center, coords):\n \n\n \n r = [np.subtract(c, center) for c in coords]\n\n \n r_norm = [np.linalg.norm(i) for i in r]\n\n \n \n angle = 0\n for i in range(1, len(r) - 1):\n j = i + 1\n tp = np.abs(np.dot(r[0], np.cross(r[i], r[j])))\n de = r_norm[0] * r_norm[i] * r_norm[j] + \\\n r_norm[j] * np.dot(r[0], r[i]) + \\\n r_norm[i] * np.dot(r[0], r[j]) + \\\n r_norm[0] * np.dot(r[i], r[j])\n if de == 0:\n my_angle = 0.5 * pi if tp > 0 else -0.5 * pi\n else:\n my_angle = np.arctan(tp / de)\n angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2\n\n return angle", "docstring": "Helper method to calculate the solid angle of a set of coords from the\ncenter.\n\nArgs:\ncenter (3x1 array): Center to measure solid angle from.\ncoords (Nx3 array): List of coords to determine solid angle.\n\nReturns:\nThe solid angle.", "source": "juraj-google-style"} {"code": "def add_dimension(self, dimension, dim_pos, dim_val, vdim=False, **kwargs):\n dimension = asdim(dimension)\n if (dimension in self.dimensions()):\n raise Exception('{dim} dimension already defined'.format(dim=dimension.name))\n if (vdim and self._deep_indexable):\n raise Exception('Cannot add value dimension to object that is deep indexable')\n if vdim:\n dims = self.vdims[:]\n dims.insert(dim_pos, dimension)\n dimensions = dict(vdims=dims)\n dim_pos += self.ndims\n else:\n dims = self.kdims[:]\n dims.insert(dim_pos, dimension)\n dimensions = dict(kdims=dims)\n if (isinstance(dim_val, basestring) or (not hasattr(dim_val, '__iter__'))):\n dim_val = cycle([dim_val])\n elif (not (len(dim_val) == len(self))):\n raise ValueError('Added dimension values must be same lengthas existing keys.')\n items = OrderedDict()\n for (dval, (key, val)) in zip(dim_val, self.data.items()):\n if vdim:\n new_val = list(val)\n new_val.insert(dim_pos, dval)\n items[key] = tuple(new_val)\n else:\n new_key = list(key)\n new_key.insert(dim_pos, dval)\n items[tuple(new_key)] = val\n return self.clone(items, **dict(dimensions, **kwargs))", "docstring": "Adds a dimension and its values to the object\n\nRequires the dimension name or object, the desired position in\nthe key dimensions and a key value scalar or sequence of the\nsame length as the existing keys.\n\nArgs:\ndimension: Dimension or dimension spec to add\ndim_pos (int) Integer index to insert dimension at\ndim_val (scalar or ndarray): Dimension value(s) to add\nvdim: Disabled, this type does not have value dimensions\n**kwargs: Keyword arguments passed to the cloned element\n\nReturns:\nCloned object containing the new dimension", "source": "codesearchnet"} {"code": "def csv_to_transactions(handle, source_encoding='latin1', date_format='%d-%m-%Y', thousand_sep='.', decimal_sep=','):\n trans = Transactions()\n rows = csv.reader(handle, delimiter=';', quotechar='\"')\n for (index, row) in enumerate(rows):\n trans.append(Parse.csv_row_to_transaction(index, row))\n return trans", "docstring": "Parses CSV data from stream and returns ``Transactions``.\n\nArgs:\nindex: The index of this row in the original CSV file. Used for\nsorting ``Transaction``s by their order of appearance.\n\nrow: The row containing strings for [transfer_date, posted_date,\nmessage, money_amount, money_total].\n\nsource_encoding: The encoding that will be used to decode strings\nto UTF-8.\n\ndate_format: The format of dates in this row.\n\nthousand_sep: The thousand separator in money amounts.\n\ndecimal_sep: The decimal separator in money amounts.\n\nReturns:\nA ``Transactions`` object.", "source": "codesearchnet"} {"code": "def _cancel_grpc(operations_stub, operation_name):\n \n request_pb = operations_pb2.CancelOperationRequest(name=operation_name)\n operations_stub.CancelOperation(request_pb)", "docstring": "Cancel an operation using a gRPC client.\n\nArgs:\noperations_stub (google.longrunning.operations_pb2.OperationsStub):\nThe gRPC operations stub.\noperation_name (str): The name of the operation.", "source": "juraj-google-style"} {"code": "def disconnect(signal, receiver):\n inputkey = __make_id(receiver)\n with __lock:\n __purge()\n receivers = __receivers.get(signal)\n for idx in six.moves.range(len(receivers)):\n connected = receivers[idx]()\n if (inputkey != __make_id(connected)):\n continue\n del receivers[idx]\n return True\n return False", "docstring": "Disconnect the receiver `func` from the signal, identified by\n`signal_id`.\n\nArgs:\nsignal: The signal identifier.\nreceiver: The callable receiver to disconnect.\n\nReturns:\nTrue if the receiver was successfully disconnected. False otherwise.", "source": "codesearchnet"} {"code": "def reset_spent_time(self, **kwargs):\n path = ('%s/%s/reset_spent_time' % (self.manager.path, self.get_id()))\n return self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Resets the time spent working on the object.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabTimeTrackingError: If the time tracking update cannot be done", "source": "codesearchnet"} {"code": "def add_all_transport_reactions(model, boundaries, allow_duplicates=False):\n all_reactions = {}\n if (not allow_duplicates):\n for rxnid in model.database.reactions:\n rx = model.database.get_reaction(rxnid)\n all_reactions[rx] = rxnid\n boundary_pairs = set()\n for (source, dest) in boundaries:\n if (source != dest):\n boundary_pairs.add(tuple(sorted((source, dest))))\n added = set()\n added_pairs = set()\n initial_compounds = set(model.compounds)\n reactions = set(model.database.reactions)\n for compound in initial_compounds:\n for (c1, c2) in boundary_pairs:\n compound1 = compound.in_compartment(c1)\n compound2 = compound.in_compartment(c2)\n pair = (compound1, compound2)\n if (pair in added_pairs):\n continue\n rxnid_tp = create_transport_id(reactions, compound1, compound2)\n reaction_tp = Reaction(Direction.Both, {compound1: (- 1), compound2: 1})\n if (reaction_tp not in all_reactions):\n model.database.set_reaction(rxnid_tp, reaction_tp)\n reactions.add(rxnid_tp)\n else:\n rxnid_tp = all_reactions[reaction_tp]\n if (not model.has_reaction(rxnid_tp)):\n added.add(rxnid_tp)\n model.add_reaction(rxnid_tp)\n added_pairs.add(pair)\n return added", "docstring": "Add all transport reactions to database and to model.\n\nAdd transport reactions for all boundaries. Boundaries are defined\nby pairs (2-tuples) of compartment IDs. Transport reactions are\nadded for all compounds in the model, not just for compounds in the\ntwo boundary compartments.\n\nArgs:\nmodel: :class:`psamm.metabolicmodel.MetabolicModel`.\nboundaries: Set of compartment boundary pairs.\n\nReturns:\nSet of IDs of reactions that were added.", "source": "codesearchnet"} {"code": "def get_help_commands(server_prefix):\n \n\n datapacks = []\n\n _dir = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n for module_name in os.listdir(\"{}/../\".format(_dir)):\n if not module_name.startswith(\"_\") and not module_name.startswith(\"!\"):\n help_command = \"`{}help {}`\".format(server_prefix, module_name)\n datapacks.append((module_name, help_command, True))\n\n return datapacks", "docstring": "Get the help commands for all modules\n\nArgs:\nserver_prefix: The server command prefix\n\nReturns:\ndatapacks (list): A list of datapacks for the help commands for all the modules", "source": "juraj-google-style"} {"code": "def pipelines(self, **kwargs):\n path = ('%s/%s/pipelines' % (self.manager.path, self.get_id()))\n return self.manager.gitlab.http_get(path, **kwargs)", "docstring": "List the merge request pipelines.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabListError: If the list could not be retrieved\n\nReturns:\nRESTObjectList: List of changes", "source": "codesearchnet"} {"code": "def AnsiText(text, command_list=None, reset=True):\n \n command_list = command_list or ['reset']\n if reset:\n return '%s%s%s' % (_AnsiCmd(command_list), text, _AnsiCmd(['reset']))\n else:\n return '%s%s' % (_AnsiCmd(command_list), text)", "docstring": "Wrap text in ANSI/SGR escape codes.\n\nArgs:\ntext: String to encase in sgr escape sequence.\ncommand_list: List of strings, each string represents an sgr value.\ne.g. 'fg_blue', 'bg_yellow'\nreset: Boolean, if to add a reset sequence to the suffix of the text.\n\nReturns:\nString with sgr characters added.", "source": "juraj-google-style"} {"code": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n residual = hidden_states\n hidden_states = self.layer_norm1(hidden_states)\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions)\n hidden_states = hidden_states + residual\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = hidden_states + residual\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\nhidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`torch.FloatTensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n`(config.encoder_attention_heads,)`.\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.", "source": "github-repos"} {"code": "def get_and_check_single_upstream_artifact_full_path(context, task_id, path):\n \n abs_path = get_single_upstream_artifact_full_path(context, task_id, path)\n if not os.path.exists(abs_path):\n raise ScriptWorkerTaskException(\n 'upstream artifact with path: {}, does not exist'.format(abs_path)\n )\n\n return abs_path", "docstring": "Return the full path where an upstream artifact is located on disk.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\ntask_id (str): the task id of the task that published the artifact\npath (str): the relative path of the artifact\n\nReturns:\nstr: absolute path to the artifact\n\nRaises:\nscriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.", "source": "juraj-google-style"} {"code": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n num_image_tokens = [self.image_seq_length] * len(image_sizes)\n num_image_patches = [1] * len(image_sizes)\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\nimage_sizes (List[List[str]], *optional*):\nThe input sizes formatted as (height, width) per each image.\nReturns:\nDict[str, List[int]]: A dictionary mapping each modality (\"image\", \"video\", \"audio\")\nto a list containing the number of placeholder tokens required. If the model doesn't accept\na certain modality or no input sizes are provided, the dict value is set to an empty list.", "source": "github-repos"} {"code": "def _matches(field, params):\n fieldattrs = six.iteritems(params)\n return all(((getattr(field, attr) == val) for (attr, val) in fieldattrs))", "docstring": "Return True if the input TypedField `field` contains instance attributes\nthat match the input parameters.\n\nArgs:\nfield: A TypedField instance.\nparams: A dictionary of TypedField instance attribute-to-value mappings.\n\nReturns:\nTrue if the input TypedField matches the input parameters.", "source": "codesearchnet"} {"code": "def match_file(self, f, update_file=False):\n \n if self.map_func is not None:\n val = self.map_func(f)\n else:\n m = self.regex.search(f.path)\n val = m.group(1) if m is not None else None\n\n return self._astype(val)", "docstring": "Determine whether the passed file matches the Entity.\n\nArgs:\nf (File): The File instance to match against.\n\nReturns: the matched value if a match was found, otherwise None.", "source": "juraj-google-style"} {"code": "def StatEntryFromStat(stat, pathspec, ext_attrs=True):\n result = rdf_client_fs.StatEntry(pathspec=pathspec)\n for attr in _STAT_ATTRS:\n value = getattr(stat.GetRaw(), attr, None)\n if (value is None):\n continue\n value = int(value)\n if (value < 0):\n value &= 4294967295\n setattr(result, attr, value)\n result.st_flags_linux = stat.GetLinuxFlags()\n result.st_flags_osx = stat.GetOsxFlags()\n if ext_attrs:\n result.ext_attrs = list(GetExtAttrs(stat.GetPath()))\n return result", "docstring": "Build a stat entry object from a given stat object.\n\nArgs:\nstat: A `Stat` object.\npathspec: A `PathSpec` from which `stat` was obtained.\next_attrs: Whether to include extended file attributes in the result.\n\nReturns:\n`StatEntry` object.", "source": "codesearchnet"} {"code": "def get_poi(self, **kwargs):\n params = {'coordinateX': kwargs.get('longitude'), 'coordinateY': kwargs.get('latitude'), 'tipos': util.ints_to_string(kwargs.get('types')), 'Radius': kwargs.get('radius'), 'cultureInfo': util.language_code(kwargs.get('lang'))}\n result = self.make_request('geo', 'get_poi', **params)\n if (not util.check_result(result, 'poiList')):\n return (False, 'UNKNOWN ERROR')\n values = util.response_list(result, 'poiList')\n return (True, [emtype.Poi(**a) for a in values])", "docstring": "Obtain a list of POI in the given radius.\n\nArgs:\nlatitude (double): Latitude in decimal degrees.\nlongitude (double): Longitude in decimal degrees.\ntypes (list[int] | int): POI IDs (or empty list to get all).\nradius (int): Radius (in meters) of the search.\nlang (str): Language code (*es* or *en*).\n\nReturns:\nStatus boolean and parsed response (list[Poi]), or message string\nin case of error.", "source": "codesearchnet"} {"code": "def new_vertex(self):\n vertex = Vertex(len(self.vertices))\n self.vertices.append(vertex)\n return vertex", "docstring": "Creates and returns a new vertex.\n\nReturns:\nA new Vertex instance with a unique index.", "source": "codesearchnet"} {"code": "def index(self, nurest_object):\n \n for index, obj in enumerate(self):\n if obj.equals(nurest_object):\n return index\n\n raise ValueError(\"%s is not in %s\" % (nurest_object, self))", "docstring": "Get index of the given item\nArgs:\nnurest_object (bambou.NURESTObject): the NURESTObject object to verify\n\nReturns:\nReturns the position of the object.\n\nRaises:\nRaise a ValueError exception if object is not present", "source": "juraj-google-style"} {"code": "def from_json(cls, json, _reader=blobstore.BlobReader):\n \n return cls(json[cls.BLOB_KEY_PARAM],\n json[cls.START_FILE_INDEX_PARAM],\n json[cls.END_FILE_INDEX_PARAM],\n json[cls.OFFSET_PARAM],\n _reader)", "docstring": "Creates an instance of the InputReader for the given input shard state.\n\nArgs:\njson: The InputReader state as a dict-like object.\n_reader: For dependency injection.\n\nReturns:\nAn instance of the InputReader configured using the values of json.", "source": "juraj-google-style"} {"code": "def list_metric_defs_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metricdefinitions?api-version=', INSIGHTS_METRICS_API])\n return do_get(endpoint, access_token)", "docstring": "List the monitoring metric definitions for a resource.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\nresource_provider (str): Type of resource provider.\nresource_type (str): Type of resource.\nresource_name (str): Name of resource.\n\nReturns:\nHTTP response. JSON body of metric definitions.", "source": "codesearchnet"} {"code": "def plot_generated_images(images, fname):\n fig = plt.figure(figsize=(4, 4))\n canvas = backend_agg.FigureCanvasAgg(fig)\n for (i, image) in enumerate(images):\n ax = fig.add_subplot(4, 4, (i + 1))\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.imshow(image.reshape(IMAGE_SHAPE[:(- 1)]), cmap='Greys_r')\n fig.tight_layout()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n canvas.print_figure(fname, format='png')", "docstring": "Save a synthetic image as a PNG file.\n\nArgs:\nimages: samples of synthetic images generated by the generative network.\nfname: Python `str`, filename to save the plot to.", "source": "codesearchnet"} {"code": "def _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight):\n per_example_aggregation_loss = _calculate_aggregation_loss_known(logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels)\n if use_answer_as_supervision:\n per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)\n return aggregation_loss_weight * per_example_aggregation_loss", "docstring": "Calculates the aggregation loss per example.\n\nArgs:\nlogits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):\nLogits per aggregation operation.\naggregate_mask (`tf.Tensor` of shape `(batch_size, )`):\nA mask set to 1 for examples that should use aggregation functions.\naggregation_labels (`tf.Tensor` of shape `(batch_size, )`):\nAggregation function id for every example in the batch.\nuse_answer_as_supervision (`bool`, *optional*):\nWhether to use the answer as the only supervision for aggregation examples.\nnum_aggregation_labels (`int`, *optional*, defaults to 0):\nThe number of aggregation operators to predict.\naggregation_loss_weight (`float`, *optional*, defaults to 1.0):\nImportance weight for the aggregation loss.\n\nReturns:\naggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example.", "source": "github-repos"} {"code": "def _MakeEnumValueDescriptor(self, value_proto, index):\n \n\n return descriptor.EnumValueDescriptor(\n name=value_proto.name,\n index=index,\n number=value_proto.number,\n options=_OptionsOrNone(value_proto),\n type=None)", "docstring": "Creates a enum value descriptor object from a enum value proto.\n\nArgs:\nvalue_proto: The proto describing the enum value.\nindex: The index of the enum value.\n\nReturns:\nAn initialized EnumValueDescriptor object.", "source": "juraj-google-style"} {"code": "def __init__(self, length=None, group_type=None, group_id=None,\n buckets=None):\n \n super().__init__()\n self.length = length\n self.group_type = group_type\n self.group_id = group_id\n self.buckets = buckets", "docstring": "Create a GroupDescStats with the optional parameters below.\n\nArgs:\nlength (int): Length of this entry.\ngroup_type (|GroupType_v0x04|): One of OFPGT_*.\ngroup_id (int): Group identifier.\nbuckets (|ListOfBuckets_v0x04|): List of buckets in group.", "source": "juraj-google-style"} {"code": "def convert_args_to_laid_out_tensors(xs):\n ret = []\n for x in xs:\n if hasattr(x, 'to_laid_out_tensor'):\n ret.append(x.to_laid_out_tensor())\n else:\n ret.append(x)\n return ret", "docstring": "Convert list elements to laid-out-tensors when possible.\n\nArgs:\nxs: a list\nReturns:\na list", "source": "codesearchnet"} {"code": "def __init__(self, latitude, longitude, name):\n \n super(Waypoint, self).__init__(latitude, longitude)\n self.name = name.upper()", "docstring": "Initialise a new ``Waypoint`` object.\n\nArgs:\nlatitude (float): Waypoint's latitude\nlongitude (float): Waypoint's longitude\nname (str): Comment for waypoint", "source": "juraj-google-style"} {"code": "def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'):\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if len(pool_size) != 2:\n raise ValueError('`pool_size` must be a tuple of 2 integers.')\n if len(strides) != 2:\n raise ValueError('`strides` must be a tuple of 2 integers.')\n x, tf_data_format = _preprocess_conv2d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if tf_data_format == 'NHWC':\n strides = (1,) + strides + (1,)\n pool_size = (1,) + pool_size + (1,)\n else:\n strides = (1, 1) + strides\n pool_size = (1, 1) + pool_size\n if pool_mode == 'max':\n x = nn.max_pool(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n elif pool_mode == 'avg':\n x = nn.avg_pool(x, pool_size, strides, padding=padding, data_format=tf_data_format)\n else:\n raise ValueError('Invalid pooling mode: ' + str(pool_mode))\n if data_format == 'channels_first' and tf_data_format == 'NHWC':\n x = array_ops.transpose(x, (0, 3, 1, 2))\n return x", "docstring": "2D Pooling.\n\nArgs:\nx: Tensor or variable.\npool_size: tuple of 2 integers.\nstrides: tuple of 2 integers.\npadding: string, `\"same\"` or `\"valid\"`.\ndata_format: string, `\"channels_last\"` or `\"channels_first\"`.\npool_mode: string, `\"max\"` or `\"avg\"`.\n\nReturns:\nA tensor, result of 2D pooling.\n\nRaises:\nValueError: if `data_format` is neither `\"channels_last\"` or\n`\"channels_first\"`.\nValueError: if `pool_size` is not a tuple of 2 integers.\nValueError: if `strides` is not a tuple of 2 integers.\nValueError: if `pool_mode` is neither `\"max\"` or `\"avg\"`.", "source": "github-repos"} {"code": "def import_user_module():\n return importlib.import_module(FLAGS.wrapped_tpu_test_module_relative, calculate_parent_python_path(sys.argv[0]))", "docstring": "Imports the flag-specified user test code.\n\nThis runs all top-level statements in the user module, specifically flag\ndefinitions.\n\nReturns:\nThe user test module.", "source": "github-repos"} {"code": "def _new_convolution(self, use_bias):\n\n def clean_dict(input_dict):\n if (input_dict and (not use_bias)):\n cleaned_dict = input_dict.copy()\n cleaned_dict.pop('b', None)\n return cleaned_dict\n return input_dict\n return self._conv_class(output_channels=(4 * self._output_channels), kernel_shape=self._kernel_shape, stride=self._stride, rate=self._rate, padding=self._padding, use_bias=use_bias, initializers=clean_dict(self._initializers), partitioners=clean_dict(self._partitioners), regularizers=clean_dict(self._regularizers), name='conv')", "docstring": "Returns new convolution.\n\nArgs:\nuse_bias: Use bias in convolutions. If False, clean_dict removes bias\nentries from initializers, partitioners and regularizers passed to\nthe constructor of the convolution.", "source": "codesearchnet"} {"code": "def simplify_U(theta, phi, lam):\n gate = U3Gate(theta, phi, lam)\n if (abs((gate.params[0] % (2.0 * math.pi))) < _CUTOFF_PRECISION):\n gate = U1Gate(((gate.params[0] + gate.params[1]) + gate.params[2]))\n if isinstance(gate, U3Gate):\n if (abs(((gate.params[0] - (math.pi / 2)) % (2.0 * math.pi))) < _CUTOFF_PRECISION):\n gate = U2Gate(gate.params[1], (gate.params[2] + (gate.params[0] - (math.pi / 2))))\n if (abs(((gate.params[0] + (math.pi / 2)) % (2.0 * math.pi))) < _CUTOFF_PRECISION):\n gate = U2Gate((gate.params[1] + math.pi), ((gate.params[2] - math.pi) + (gate.params[0] + (math.pi / 2))))\n if (isinstance(gate, U1Gate) and (abs((gate.params[0] % (4.0 * math.pi))) < _CUTOFF_PRECISION)):\n gate = IdGate()\n return gate", "docstring": "Return the gate u1, u2, or u3 implementing U with the fewest pulses.\n\nThe returned gate implements U exactly, not up to a global phase.\n\nArgs:\ntheta, phi, lam: input Euler rotation angles for a general U gate\n\nReturns:\nGate: one of IdGate, U1Gate, U2Gate, U3Gate.", "source": "codesearchnet"} {"code": "def extraterrestrial_direct_normal_radiation(self, value=9999.0):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `extraterrestrial_direct_normal_radiation`'.format(value))\n if (value < 0.0):\n raise ValueError('value need to be greater or equal 0.0 for field `extraterrestrial_direct_normal_radiation`')\n self._extraterrestrial_direct_normal_radiation = value", "docstring": "Corresponds to IDD Field `extraterrestrial_direct_normal_radiation`\n\nArgs:\nvalue (float): value for IDD Field `extraterrestrial_direct_normal_radiation`\nUnit: Wh/m2\nvalue >= 0.0\nMissing value: 9999.0\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def get_answers_for_student(student_item):\n \n submissions = sub_api.get_submissions(student_item)\n if not submissions:\n return Answers()\n\n latest_submission = submissions[0]\n latest_answer_item = latest_submission.get('answer', {})\n return Answers(latest_answer_item.get(ANSWER_LIST_KEY, []))", "docstring": "Retrieve answers from backend for a student and question\n\nArgs:\nstudent_item (dict): The location of the problem this submission is\nassociated with, as defined by a course, student, and item.\n\nReturns:\nAnswers: answers for the student", "source": "juraj-google-style"} {"code": "def populate_audit_fields(self, event):\n event.updated = self._data\n event.original = self.get_original()._data", "docstring": "Populates the the audit JSON fields with raw data from the model, so\nall changes can be tracked and diffed.\n\nArgs:\nevent (Event): The Event instance to attach the data to\ninstance (fleaker.db.Model): The newly created/updated model", "source": "codesearchnet"} {"code": "def createNote(self, title=None, text=None):\n \n node = _node.Note()\n if title is not None:\n node.title = title\n if text is not None:\n node.text = text\n self.add(node)\n return node", "docstring": "Create a new managed note. Any changes to the note will be uploaded when :py:meth:`sync` is called.\n\nArgs:\ntitle (str): The title of the note.\ntext (str): The text of the note.\n\nReturns:\ngkeepapi.node.List: The new note.", "source": "juraj-google-style"} {"code": "def skip_magic(code_line, magic_list):\n for magic in magic_list:\n if code_line.startswith(magic):\n return True\n return False", "docstring": "Checks if the cell has magic, that is not Python-based.\n\nArgs:\ncode_line: A line of Python code\nmagic_list: A list of jupyter \"magic\" exceptions\n\nReturns:\nIf the line jupyter \"magic\" line, not Python line\n\n>>> skip_magic('!ls -laF', ['%', '!', '?'])\nTrue", "source": "github-repos"} {"code": "def _validate_type(self, properties_spec, value):\n \n if 'type' not in properties_spec.keys():\n \n def_name = self.get_definition_name_from_ref(properties_spec['$ref'])\n return self.validate_definition(def_name, value)\n\n \n elif properties_spec['type'] == 'array':\n if not isinstance(value, list):\n return False\n\n \n if ('type' in properties_spec['items'].keys() and\n any(not self.check_type(item, properties_spec['items']['type']) for item in value)):\n return False\n \n elif ('$ref' in properties_spec['items'].keys()):\n def_name = self.get_definition_name_from_ref(properties_spec['items']['$ref'])\n if any(not self.validate_definition(def_name, item) for item in value):\n return False\n\n else: \n if not self.check_type(value, properties_spec['type']):\n return False\n\n return True", "docstring": "Validate the given value with the given property spec.\n\nArgs:\nproperties_dict: specification of the property to check (From definition not route).\nvalue: value to check.\n\nReturns:\nTrue if the value is valid for the given spec.", "source": "juraj-google-style"} {"code": "def exists(self, file_path, check_link=False):\n if (check_link and self.islink(file_path)):\n return True\n file_path = make_string_path(file_path)\n if (file_path is None):\n raise TypeError\n if (not file_path):\n return False\n if (file_path == self.dev_null.name):\n return (not self.is_windows_fs)\n try:\n if self.is_filepath_ending_with_separator(file_path):\n return False\n file_path = self.resolve_path(file_path)\n except (IOError, OSError):\n return False\n if (file_path == self.root.name):\n return True\n path_components = self._path_components(file_path)\n current_dir = self.root\n for component in path_components:\n current_dir = self._directory_content(current_dir, component)[1]\n if (not current_dir):\n return False\n return True", "docstring": "Return true if a path points to an existing file system object.\n\nArgs:\nfile_path: The path to examine.\n\nReturns:\n(bool) True if the corresponding object exists.\n\nRaises:\nTypeError: if file_path is None.", "source": "codesearchnet"} {"code": "def make_worksheet(self, sheet_name=None):\n if (sheet_name is None):\n sheet_name = self.table_name\n if (not sheet_name):\n sheet_name = ''\n self._stream = self.workbook.add_worksheet(sheet_name)\n self._current_data_row = self._first_data_row", "docstring": "Make a worksheet to the current workbook.\n\nArgs:\nsheet_name (str):\nName of the worksheet to create. The name will be automatically generated\n(like ``\"Sheet1\"``) if the ``sheet_name`` is empty.", "source": "codesearchnet"} {"code": "def maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n epoch = backend.eval(self._ckpt_saved_epoch)\n if mode == mode_keys.ModeKeys.TRAIN and epoch >= 0:\n return epoch + 1\n return initial_epoch", "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery.\n\nWhen `_ckpt_saved_epoch` attribute exists and is not\n`CKPT_SAVED_EPOCH_UNUSED_VALUE`, this is under multi-worker training setting\nand indicates the worker is recovering from previous failure. In this case,\ninfer `initial_epoch` from `self._ckpt_saved_epoch` to continue previous\nunfinished training from certain epoch.\n\nArgs:\ninitial_epoch: The original initial_epoch user passes in in `fit()`.\nmode: The mode for running `model.fit()`.\n\nReturns:\nIf the training is recovering from previous failure under multi-worker\ntraining setting, return the epoch the training is supposed to continue\nat. Otherwise, return the `initial_epoch` the user passes in.", "source": "github-repos"} {"code": "def remove(self, future):\n \n if self._loop.get_debug():\n logger.debug(\"Removing %s from the linked list.\", future)\n if future.prev is None:\n assert future is self.head\n self.head = future.next\n if self.head is None:\n self.tail = None\n if not self.cancelled():\n self.set_result(None)\n else:\n self.head.prev = None\n elif future.next is None:\n assert future is self.tail\n self.tail = future.prev\n if self.tail is None:\n self.head = None\n if not self.cancelled():\n self.set_result(None)\n else:\n self.tail.prev = None", "docstring": "Remove an object from the linked list.\n\nArgs:\nfuture (PlasmaObjectFuture): A PlasmaObjectFuture instance.", "source": "juraj-google-style"} {"code": "def filter_spent_outputs(self, outputs):\n links = [o.to_dict() for o in outputs]\n txs = list(query.get_spending_transactions(self.connection, links))\n spends = {TransactionLink.from_dict(input_['fulfills']) for tx in txs for input_ in tx['inputs']}\n return [ff for ff in outputs if (ff not in spends)]", "docstring": "Remove outputs that have been spent\n\nArgs:\noutputs: list of TransactionLink", "source": "codesearchnet"} {"code": "class AriaTextDecoderLayer(GradientCheckpointingLayer):\n\n def __init__(self, config: AriaTextConfig, layer_idx: int):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx)\n self.mlp = AriaTextMoELayer(config)\n self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n\n def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n return outputs", "docstring": "Aria Text Decoder Layer.\n\nThis class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.\n\nArgs:\nconfig (`AriaTextConfig`):\nConfiguration object for the text component of the model.\nlayer_idx (`int`):\nIndex of the layer.", "source": "github-repos"} {"code": "def get_gradient_components(self, value):\n raise NotImplementedError(f'{type(self).__name__}.get_gradient_components()')", "docstring": "Returns the components of `value` that should be included in gradients.\n\nThis method may not call TensorFlow ops, since any new ops added to the\ngraph would not be properly tracked by the gradient mechanisms.\n\nArgs:\nvalue: A `CompositeTensor` value.\n\nReturns:\nA nested structure of `Tensor` or `IndexedSlices`.", "source": "github-repos"} {"code": "def print_map(map_source, x, y, zoom=14, width=297, height=210, dpi=300, format='pdf'):\n bbox = get_print_bbox(x, y, zoom, width, height, dpi)\n tiles = [get_tiles(tile_layer, bbox) for tile_layer in map_source.layers if (tile_layer.min_zoom <= zoom <= tile_layer.max_zoom)]\n img = stitch_map(tiles, width, height, bbox, dpi)\n outfile = NamedTemporaryFile(delete=False)\n img.save(outfile, format, quality=100, dpi=(dpi, dpi))\n outfile.close()\n return outfile.name", "docstring": "Download map tiles and stitch them together in a single image, ready for printing.\n\nArgs:\nmap_source (MapSource): Map to download\nx (float): map center x-coordinate in Mercator projection (EPSG:4326)\ny (float): map center y-coordinate in Mercator projection (EPSG:4326)\nzoom (int): tile zoom level to use for printing\nwidth (float): page width in mm\nheight (float): page height in mm\ndpi (int): resolution in dots per inch\nformat (str): output format. Anything supported by ``Pillow.Image.save``. E.g. \"pdf\", \"jpeg\", \"png\".\n\nReturns:\nstr: path of temporary output file.", "source": "codesearchnet"} {"code": "def GetSshkeyMap(self, since=None):\n return SshkeyUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['sshkey_object'], since)", "docstring": "Return the ssh map from this source.\n\nArgs:\nsince: Get data only changed since this timestamp (inclusive) or None\nfor all data.\n\nReturns:\ninstance of shadow.SSHMap", "source": "github-repos"} {"code": "def case_report_content(store, institute_obj, case_obj):\n variant_types = {'causatives_detailed': 'causatives', 'suspects_detailed': 'suspects', 'classified_detailed': 'acmg_classification', 'tagged_detailed': 'manual_rank', 'dismissed_detailed': 'dismiss_variant', 'commented_detailed': 'is_commented'}\n data = case_obj\n for individual in data['individuals']:\n try:\n sex = int(individual.get('sex', 0))\n except ValueError as err:\n sex = 0\n individual['sex_human'] = SEX_MAP[sex]\n individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype'])\n data['comments'] = store.events(institute_obj, case=case_obj, comments=True)\n data['manual_rank_options'] = MANUAL_RANK_OPTIONS\n data['dismissed_options'] = DISMISS_VARIANT_OPTIONS\n data['genetic_models'] = dict(GENETIC_MODELS)\n data['report_created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')\n evaluated_variants = {}\n for vt in variant_types:\n evaluated_variants[vt] = []\n for var_type in ['causatives', 'suspects']:\n vt = '_'.join([var_type, 'detailed'])\n for var_id in case_obj.get(var_type, []):\n variant_obj = store.variant(var_id)\n if (not variant_obj):\n continue\n evaluated_variants[vt].append(variant_obj)\n for var_obj in store.evaluated_variants(case_id=case_obj['_id']):\n for vt in variant_types:\n keyword = variant_types[vt]\n if (keyword in var_obj):\n evaluated_variants[vt].append(var_obj)\n for var_type in evaluated_variants:\n decorated_variants = []\n for var_obj in evaluated_variants[var_type]:\n if (var_obj['category'] == 'snv'):\n decorated_info = variant_decorator(store=store, institute_obj=institute_obj, case_obj=case_obj, variant_id=None, variant_obj=var_obj, add_case=False, add_other=False, get_overlapping=False)\n else:\n decorated_info = sv_variant(store=store, institute_id=institute_obj['_id'], case_name=case_obj['display_name'], variant_obj=var_obj, add_case=False, get_overlapping=False)\n decorated_variants.append(decorated_info['variant'])\n data[var_type] = decorated_variants\n return data", "docstring": "Gather contents to be visualized in a case report\n\nArgs:\nstore(adapter.MongoAdapter)\ninstitute_obj(models.Institute)\ncase_obj(models.Case)\n\nReturns:\ndata(dict)", "source": "codesearchnet"} {"code": "def AddArguments(cls, argument_group):\n \n argument_group.add_argument(\n '--disable_zeromq', '--disable-zeromq', action='store_false',\n dest='use_zeromq', default=True, help=(\n 'Disable queueing using ZeroMQ. A Multiprocessing queue will be '\n 'used instead.'))", "docstring": "Adds command line arguments to an argument group.\n\nThis function takes an argument parser or an argument group object and adds\nto it all the command line arguments this helper supports.\n\nArgs:\nargument_group (argparse._ArgumentGroup|argparse.ArgumentParser):\nargparse group.", "source": "juraj-google-style"} {"code": "def sticky_attribute_assignment(trackable, name, value):\n if isinstance(value, (tracking.TrackedList, tracking.TrackedDict, tracking.TrackedSet)) and hasattr(trackable, '_tracked'):\n trackable._tracked.append(name)\n if not tracking.is_tracking_enabled():\n return value\n if isinstance(value, tf.__internal__.tracking.Trackable):\n trackable._track_trackable(value, name=name, overwrite=True)\n return value", "docstring": "Adds dependencies, called from __setattr__.\n\nArgs:\ntrackable: The object to add dependencies to (generally the one having\nan attribute assigned).\nname: The attribute name being assigned.\nvalue: The value being assigned. Not necessarily a trackable object.\n\nReturns:\nThe value which should be stored in the attribute.", "source": "github-repos"} {"code": "def get_values_and_permissible(values: Iterable[Tuple[(Any, str)]], add_none: bool=False, none_description: str='[None]') -> Tuple[(List[Tuple[(Any, str)]], List[Any])]:\n permissible_values = list((x[0] for x in values))\n if add_none:\n none_tuple = (SERIALIZED_NONE, none_description)\n values = ([none_tuple] + list(values))\n return (values, permissible_values)", "docstring": "Used when building Colander nodes.\n\nArgs:\nvalues: an iterable of tuples like ``(value, description)`` used in\nHTML forms\n\nadd_none: add a tuple ``(None, none_description)`` at the start of\n``values`` in the result?\n\nnone_description: the description used for ``None`` if ``add_none``\nis set\n\nReturns:\na tuple ``(values, permissible_values)``, where\n\n- ``values`` is what was passed in (perhaps with the addition of the\n\"None\" tuple at the start)\n- ``permissible_values`` is a list of all the ``value`` elements of\nthe original ``values``", "source": "codesearchnet"} {"code": "def convert_upsample(params, w_name, scope_name, inputs, layers, weights, names):\n print('Converting upsample...')\n if (params['mode'] != 'nearest'):\n raise AssertionError('Cannot convert non-nearest upsampling')\n if (names == 'short'):\n tf_name = ('UPSL' + random_string(4))\n elif (names == 'keep'):\n tf_name = w_name\n else:\n tf_name = (w_name + str(random.random()))\n if ('height_scale' in params):\n scale = (params['height_scale'], params['width_scale'])\n elif (len(inputs) == 2):\n scale = layers[(inputs[(- 1)] + '_np')][(- 2):]\n upsampling = keras.layers.UpSampling2D(size=scale, name=tf_name)\n layers[scope_name] = upsampling(layers[inputs[0]])", "docstring": "Convert nearest upsampling layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"} {"code": "def parse_outputtrans(path_dir):\n run_type = None\n warning = None\n efermi = None\n gap = None\n doping_levels = []\n with open(os.path.join(path_dir, 'boltztrap.outputtrans'), 'r') as f:\n for line in f:\n if ('WARNING' in line):\n warning = line\n elif ('Calc type:' in line):\n run_type = line.split()[(- 1)]\n elif line.startswith('VBM'):\n efermi = Energy(line.split()[1], 'Ry').to('eV')\n elif line.startswith('Egap:'):\n gap = Energy(float(line.split()[1]), 'Ry').to('eV')\n elif line.startswith('Doping level number'):\n doping_levels.append(float(line.split()[6]))\n return (run_type, warning, efermi, gap, doping_levels)", "docstring": "Parses .outputtrans file\n\nArgs:\npath_dir: dir containing boltztrap.outputtrans\n\nReturns:\ntuple - (run_type, warning, efermi, gap, doping_levels)", "source": "codesearchnet"} {"code": "def _Open(self, path_spec=None, mode='rb'):\n \n if not path_spec:\n raise ValueError('Missing path specification.')\n\n store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec)\n if store_index is None:\n raise errors.PathSpecError(\n 'Unable to retrieve store index from path specification.')\n\n self._file_system = resolver.Resolver.OpenFileSystem(\n path_spec, resolver_context=self._resolver_context)\n vshadow_volume = self._file_system.GetVShadowVolume()\n\n if (store_index < 0 or\n store_index >= vshadow_volume.number_of_stores):\n raise errors.PathSpecError((\n 'Unable to retrieve VSS store: {0:d} from path '\n 'specification.').format(store_index))\n\n vshadow_store = vshadow_volume.get_store(store_index)\n if not vshadow_store.has_in_volume_data():\n raise IOError((\n 'Unable to open VSS store: {0:d} without in-volume stored '\n 'data.').format(store_index))\n\n self._vshadow_store = vshadow_store", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\nmode (Optional[str]): file access mode.\n\nRaises:\nAccessError: if the access to open the file was denied.\nIOError: if the file-like object could not be opened.\nOSError: if the file-like object could not be opened.\nPathSpecError: if the path specification is incorrect.\nValueError: if the path specification is invalid.", "source": "juraj-google-style"} {"code": "def unflatten(guide, falttened_input):\n return [(unflatten(sub_list, falttened_input) if isinstance(sub_list, list) else next(falttened_input)) for sub_list in guide]", "docstring": "Unflatten a falttened generator.\n\nArgs:\nguide: A guide list to follow the structure\nfalttened_input: A flattened iterator object\n\nUsage:\n\nguide = [[\"a\"], [\"b\",\"c\",\"d\"], [[\"e\"]], [\"f\"]]\ninput_list = [0, 1, 2, 3, 4, 5, 6, 7]\nunflatten(guide, iter(input_list))\n>> [[0], [1, 2, 3], [[4]], [5]]", "source": "codesearchnet"} {"code": "def _patch_expand_path(self, settings, name, value):\n \n if os.path.isabs(value):\n return os.path.normpath(value)\n\n \n value = os.path.expanduser(value)\n\n \n \n if not os.path.isabs(value) and self.projectdir:\n value = os.path.join(self.projectdir, value)\n\n return os.path.normpath(value)", "docstring": "Patch a path to expand home directory and make absolute path.\n\nArgs:\nsettings (dict): Current settings.\nname (str): Setting name.\nvalue (str): Path to patch.\n\nReturns:\nstr: Patched path to an absolute path.", "source": "juraj-google-style"} {"code": "def __init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint=None):\n updater.Updater.__init__(self, map_name, timestamp_dir, cache_options, automount_mountpoint)\n self.local_master = False\n if self.OPT_LOCAL_MASTER in cache_options:\n if cache_options[self.OPT_LOCAL_MASTER] == 'yes':\n self.local_master = True", "docstring": "Initialize automount-specific updater options.\n\nArgs:\nmap_name: A string representing the type of the map we are an Updater for.\ntimestamp_dir: A string with the directory containing our timestamp files.\ncache_options: A dict containing the options for any caches we create.\nautomount_mountpoint: An optional string containing automount path info.", "source": "github-repos"} {"code": "def _fact_to_tuple(self, fact):\n if fact.category:\n category = fact.category.name\n else:\n category = ''\n description = (fact.description or '')\n return FactTuple(start=fact.start.strftime(self.datetime_format), end=fact.end.strftime(self.datetime_format), activity=text_type(fact.activity.name), duration=fact.get_string_delta(format='%M'), category=text_type(category), description=text_type(description))", "docstring": "Convert a ``Fact`` to its normalized tuple.\n\nThis is where all type conversion for ``Fact`` attributes to strings as\nwell as any normalization happens.\n\nNote:\nBecause different writers may require different types, we need to\ndo this individually.\n\nArgs:\nfact (hamster_lib.Fact): Fact to be converted.\n\nReturns:\nFactTuple: Tuple representing the original ``Fact``.", "source": "codesearchnet"} {"code": "def zip_ll(data, means, M):\n (genes, cells) = data.shape\n clusters = means.shape[1]\n ll = np.zeros((cells, clusters))\n d0 = (data == 0)\n d1 = (data > 0)\n for i in range(clusters):\n means_i = np.tile(means[(:, i)], (cells, 1))\n means_i = means_i.transpose()\n L_i = np.tile(M[(:, i)], (cells, 1))\n L_i = L_i.transpose()\n ll_0 = np.log((L_i + ((1 - L_i) * np.exp((- means_i)))))\n ll_0 = np.where(((L_i == 0) & (means_i == 0)), (- means_i), ll_0)\n ll_1 = ((np.log((1 - L_i)) + xlogy(data, means_i)) - means_i)\n ll_0 = np.where(d0, ll_0, 0.0)\n ll_1 = np.where(d1, ll_1, 0.0)\n ll[(:, i)] = np.sum((ll_0 + ll_1), 0)\n return ll", "docstring": "Calculates the zero-inflated Poisson log-likelihood.\n\nArgs:\ndata (array): genes x cells\nmeans (array): genes x k\nM (array): genes x k - this is the zero-inflation parameter.\n\nReturns:\ncells x k array of log-likelihood for each cell/cluster pair.", "source": "codesearchnet"} {"code": "def remove_item(self, val):\n \n return cache.lrem(self.key, json.dumps(val))", "docstring": "Removes given item from the list.\n\nArgs:\nval: Item\n\nReturns:\nCache backend response.", "source": "juraj-google-style"} {"code": "def watch(self, key, pipeline=False):\n \n if pipeline:\n self._pipeline.watch(key)\n else:\n self._db.watch(key)", "docstring": "Watch the given key.\n\nMarks the given key to be watch for conditional execution\nof a transaction.\n\nArgs:\nkey (str): Key that needs to be watched\npipeline (bool): True, start a transaction block. Default false.", "source": "juraj-google-style"} {"code": "async def teardown_client(self, client_id):\n \n\n client_info = self._client_info(client_id)\n\n self.adapter.remove_monitor(client_info['monitor'])\n conns = client_info['connections']\n\n for conn_string, conn_id in conns.items():\n try:\n self._logger.debug(\"Disconnecting client %s from conn %s at teardown\", client_id, conn_string)\n await self.adapter.disconnect(conn_id)\n except: \n self._logger.exception(\"Error disconnecting device during teardown_client: conn_string=%s\", conn_string)\n\n del self._clients[client_id]", "docstring": "Release all resources held by a client.\n\nThis method must be called and awaited whenever a client is\ndisconnected. It ensures that all of the client's resources are\nproperly released and any devices they have connected to are\ndisconnected cleanly.\n\nArgs:\nclient_id (str): The client that we should tear down.\n\nRaises:\nArgumentError: The client_id is unknown.", "source": "juraj-google-style"} {"code": "def call(func, args):\n assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(func.__name__)\n raw_func = (func if isinstance(func, FunctionType) else func.__class__.__call__)\n hints = collections.defaultdict((lambda : Any), get_type_hints(raw_func))\n argspec = _getargspec(raw_func)\n named_args = {}\n varargs = ()\n for (k, nk, v) in _normalize(args):\n if (nk == argspec.varargs):\n hints[nk] = Tuple[(hints[nk], ...)]\n elif ((nk not in argspec.args) and (argspec.varkw in hints)):\n hints[nk] = hints[argspec.varkw]\n try:\n value = cast(hints[nk], v)\n except TypeError as e:\n _LOGGER.exception(e)\n six.raise_from(exc.InvalidCliValueError(k, v), e)\n if (nk == argspec.varargs):\n varargs = value\n elif (((nk in argspec.args) or argspec.varkw) and ((nk not in named_args) or (named_args[nk] is None))):\n named_args[nk] = value\n return func(*varargs, **named_args)", "docstring": "Call the function with args normalized and cast to the correct types.\n\nArgs:\nfunc: The function to call.\nargs: The arguments parsed by docopt.\n\nReturns:\nThe return value of func.", "source": "codesearchnet"} {"code": "def setup_colorbars(self, plot_call_sign):\n self.fig.colorbar(plot_call_sign, cax=self.cbar_ax, ticks=self.cbar_ticks, orientation=self.cbar_orientation)\n getattr(self.cbar_ax, (('set_' + self.cbar_var) + 'ticklabels'))(self.cbar_tick_labels, fontsize=self.cbar_ticks_fontsize)\n getattr(self.cbar_ax, (('set_' + self.cbar_var) + 'label'))(self.cbar_label, fontsize=self.cbar_label_fontsize, labelpad=self.cbar_label_pad)\n return", "docstring": "Setup colorbars for each type of plot.\n\nTake all of the optional performed during ``__init__`` method and makes the colorbar.\n\nArgs:\nplot_call_sign (obj): Plot instance of ax.contourf with colormapping to\nadd as a colorbar.", "source": "codesearchnet"} {"code": "def convert_response(check_response, project_id):\n \n if not check_response or not check_response.checkErrors:\n return _IS_OK\n\n \n theError = check_response.checkErrors[0]\n error_tuple = _CHECK_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN)\n if error_tuple[1].find(u'{') == -1: \n return error_tuple\n\n updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.detail or u'')\n return error_tuple[0], updated_msg, error_tuple[2]", "docstring": "Computes a http status code and message `CheckResponse`\n\nThe return value a tuple (code, message, api_key_is_bad) where\n\ncode: is the http status code\nmessage: is the message to return\napi_key_is_bad: indicates that a given api_key is bad\n\nArgs:\ncheck_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.CheckResponse`):\nthe response from calling an api\n\nReturns:\ntuple(code, message, bool)", "source": "juraj-google-style"} {"code": "def xmlrpc_notify(self, app_id, token_or_token_list, aps_dict_or_list):\n \n d = self.apns_service(app_id).write(\n encode_notifications(\n [t.replace(' ', '') for t in token_or_token_list] \n if (type(token_or_token_list) is list)\n else token_or_token_list.replace(' ', ''),\n aps_dict_or_list))\n if d:\n def _finish_err(r):\n \n \n \n \n raise xmlrpc.Fault(500, 'Connection to the APNS server could not be made.')\n return d.addCallbacks(lambda r: None, _finish_err)", "docstring": "Sends push notifications to the Apple APNS server. Multiple\nnotifications can be sent by sending pairing the token/notification\narguments in lists [token1, token2], [notification1, notification2].\n\nArguments:\napp_id provisioned app_id to send to\ntoken_or_token_list token to send the notification or a list of tokens\naps_dict_or_list notification dicts or a list of notifications\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def set_internal_tacking_values(self,\n min_non_zero_index,\n max_index,\n total_added):\n \n if max_index >= 0:\n self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index))\n if min_non_zero_index >= 0:\n self.min_value = self.get_value_from_index(min_non_zero_index)\n self.total_count = total_added", "docstring": "Called during decoding and add to adjust the new min/max value and\ntotal count\n\nArgs:\nmin_non_zero_index min nonzero index of all added counts (-1 if none)\nmax_index max index of all added counts (-1 if none)", "source": "juraj-google-style"} {"code": "def _are_scopes_sufficient(authorized_scopes, sufficient_scopes):\n \n for sufficient_scope_set in sufficient_scopes:\n if sufficient_scope_set.issubset(authorized_scopes):\n return True\n return False", "docstring": "Check if a list of authorized scopes satisfies any set of sufficient scopes.\n\nArgs:\nauthorized_scopes: a list of strings, return value from oauth.get_authorized_scopes\nsufficient_scopes: a set of sets of strings, return value from _process_scopes", "source": "juraj-google-style"} {"code": "def get(self, key):\n lock.acquire()\n try:\n if (key not in self):\n return None\n current_time = time.time()\n if (self[key].expire > current_time):\n return self[key].value\n deletes = []\n for (k, val) in self.items():\n if (val.expire <= current_time):\n deletes.append(k)\n for k in deletes:\n del self[k]\n return None\n finally:\n lock.release()", "docstring": "Get an object from the cache\n\nArguments:\nkey (str): Cache key\n\nReturns:\nCached object", "source": "codesearchnet"} {"code": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, nms_threshold: float=0.7):\n out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n batch_size, num_queries, num_labels = out_logits.shape\n if target_sizes is not None:\n if len(out_logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n prob = out_logits.sigmoid()\n all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)\n all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)\n all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode='floor')\n all_labels = all_indexes % out_logits.shape[2]\n boxes = center_to_corners_format(out_bbox)\n boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))\n if target_sizes is not None:\n if isinstance(target_sizes, List):\n img_h = torch.Tensor([i[0] for i in target_sizes])\n img_w = torch.Tensor([i[1] for i in target_sizes])\n else:\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n boxes = boxes * scale_fct[:, None, :]\n results = []\n for b in range(batch_size):\n box = boxes[b]\n score = all_scores[b]\n lbls = all_labels[b]\n pre_topk = score.topk(min(10000, num_queries * num_labels)).indices\n box = box[pre_topk]\n score = score[pre_topk]\n lbls = lbls[pre_topk]\n keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]\n score = score[keep_inds]\n lbls = lbls[keep_inds]\n box = box[keep_inds]\n results.append({'scores': score[score > threshold], 'labels': lbls[score > threshold], 'boxes': box[score > threshold]})\n return results", "docstring": "Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\noutputs ([`DetrObjectDetectionOutput`]):\nRaw outputs of the model.\nthreshold (`float`, *optional*, defaults to 0.5):\nScore threshold to keep object detection predictions.\ntarget_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\nTensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n(height, width) of each image in the batch. If left to None, predictions will not be resized.\nnms_threshold (`float`, *optional*, defaults to 0.7):\nNMS threshold.\n\nReturns:\n`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\nin the batch as predicted by the model.", "source": "github-repos"} {"code": "def setup(config_root=''):\n config = _load_config(root=config_root)\n logging_config = config.get('core', {}).get('logging', {})\n log_level = logging_config.get('level', 'INFO').upper()\n log_handlers = (logging_config.get('handlers') or ['syslog'])\n ulogger.setup_logging(progname='gordon-janitor', level=log_level, handlers=log_handlers)\n return config", "docstring": "Service configuration and logging setup.\n\nConfiguration defined in ``gordon-janitor-user.toml`` will overwrite\n``gordon-janitor.toml``.\n\nArgs:\nconfig_root (str): where configuration should load from,\ndefaults to current working directory.\nReturns:\nA dict for Gordon service configuration", "source": "codesearchnet"} {"code": "def from_dict(cls, data):\n args = []\n if (('id' in data) and ('data' in data)):\n measurement_class = CanMessage\n args.append(('Bus %s: 0x%x' % (data.get('bus', '?'), data['id'])))\n args.append(data['data'])\n else:\n measurement_class = cls._class_from_name(data['name'])\n if (measurement_class == Measurement):\n args.append(data['name'])\n args.append(data['value'])\n return measurement_class(*args, event=data.get('event', None), override_unit=True)", "docstring": "Create a new Measurement subclass instance using the given dict.\n\nIf Measurement.name_from_class was previously called with this data's\nassociated Measurement sub-class in Python, the returned object will be\nan instance of that sub-class. If the measurement name in ``data`` is\nunrecognized, the returned object will be of the generic ``Measurement``\ntype.\n\nArgs:\ndata (dict): the data for the new measurement, including at least a\nname and value.", "source": "codesearchnet"} {"code": "def ParseDict(js_dict, message, ignore_unknown_fields=False):\n parser = _Parser(ignore_unknown_fields)\n parser.ConvertMessage(js_dict, message)\n return message", "docstring": "Parses a JSON dictionary representation into a message.\n\nArgs:\njs_dict: Dict representation of a JSON message.\nmessage: A protocol buffer message to merge into.\nignore_unknown_fields: If True, do not raise errors for unknown fields.\n\nReturns:\nThe same message passed as argument.", "source": "codesearchnet"} {"code": "def _GenDiscoveryDoc(service_class_names, output_path, hostname=None, application_path=None):\n output_files = []\n service_configs = GenApiConfig(service_class_names, hostname=hostname, config_string_generator=discovery_generator.DiscoveryGenerator(), application_path=application_path)\n for (api_name_version, config) in service_configs.iteritems():\n discovery_name = (api_name_version + '.discovery')\n output_files.append(_WriteFile(output_path, discovery_name, config))\n return output_files", "docstring": "Write discovery documents generated from the service classes to file.\n\nArgs:\nservice_class_names: A list of fully qualified ProtoRPC service names.\noutput_path: The directory to output the discovery docs to.\nhostname: A string hostname which will be used as the default version\nhostname. If no hostname is specificied in the @endpoints.api decorator,\nthis value is the fallback. Defaults to None.\napplication_path: A string containing the path to the AppEngine app.\n\nReturns:\nA list of discovery doc filenames.", "source": "codesearchnet"} {"code": "def _joined_array(num_dims, reduce_dim):\n formatter = '{:0%db}' % (num_dims - 1)\n result = np.zeros(shape=[2] * (num_dims - 1), dtype='S%d' % (2 * num_dims))\n flat = result.ravel()\n for i in range(2 ** (num_dims - 1)):\n dims = formatter.format(i)\n flat[i] = ''.join([(dims[:reduce_dim] + '%d' + dims[reduce_dim:]) % j for j in range(2)])\n return result", "docstring": "Creates an ndarray with the result from reduce_join on input_array.\n\nArgs:\nnum_dims: The number of dimensions of the original input array.\nreduce_dim: The dimension to reduce.\n\nReturns:\nAn ndarray of shape [2] * (num_dims - 1).", "source": "github-repos"} {"code": "def fetch_result(self, trial):\n trial_future = self._find_item(self._running, trial)\n if (not trial_future):\n raise ValueError('Trial was not running.')\n self._running.pop(trial_future[0])\n with warn_if_slow('fetch_result'):\n result = ray.get(trial_future[0])\n if isinstance(result, _LocalWrapper):\n result = result.unwrap()\n return result", "docstring": "Fetches one result of the running trials.\n\nReturns:\nResult of the most recent trial training run.", "source": "codesearchnet"} {"code": "def __init__(self, submission_id, submissions, storage_bucket):\n \n super(AttackSubmission, self).__init__(submission_id, submissions,\n storage_bucket)\n if (self.type != TYPE_TARGETED) and (self.type != TYPE_NONTARGETED):\n raise WorkerError('Incorrect attack type for submission \"{0}\"'.format(\n submission_id))", "docstring": "Initializes AttackSubmission.\n\nArgs:\nsubmission_id: ID of the submission\nsubmissions: instance of CompetitionSubmissions with all submissions\nstorage_bucket: storage bucket where all submissions are stored\n\nRaises:\nWorkerError: if submission has incorrect type", "source": "juraj-google-style"} {"code": "def watch(self, path, recursive=False):\n self._logger.info('Initializing watcher for path \"%s\"', path)\n handler = FileHandler(self)\n self._observer = Observer()\n self._observer.schedule(handler, path, recursive)\n self._logger.info('Starting watcher')\n self._observer.start()\n self._watch = True\n try:\n self._logger.info('Waiting for file events')\n while self._watch:\n time.sleep(1)\n except KeyboardInterrupt:\n self.stop_watching()\n self._observer.join()", "docstring": "Watch for files in a directory and apply normalizations.\n\nWatch for new or changed files in a directory and apply\nnormalizations over them.\n\nArgs:\npath: Path to the directory.\nrecursive: Whether to find files recursively or not.", "source": "codesearchnet"} {"code": "def _GetVSSStoreIdentifiers(self, scan_node):\n \n if not scan_node or not scan_node.path_spec:\n raise errors.SourceScannerError('Invalid scan node.')\n\n volume_system = vshadow_volume_system.VShadowVolumeSystem()\n volume_system.Open(scan_node.path_spec)\n\n volume_identifiers = self._source_scanner.GetVolumeIdentifiers(\n volume_system)\n if not volume_identifiers:\n return []\n\n \n if self._vss_stores:\n if self._vss_stores == 'all':\n vss_stores = range(1, volume_system.number_of_volumes + 1)\n else:\n vss_stores = self._vss_stores\n\n selected_volume_identifiers = self._NormalizedVolumeIdentifiers(\n volume_system, vss_stores, prefix='vss')\n\n if not set(selected_volume_identifiers).difference(volume_identifiers):\n return selected_volume_identifiers\n\n try:\n volume_identifiers = self._PromptUserForVSSStoreIdentifiers(\n volume_system, volume_identifiers)\n\n except KeyboardInterrupt:\n raise errors.UserAbort('File system scan aborted.')\n\n return self._NormalizedVolumeIdentifiers(\n volume_system, volume_identifiers, prefix='vss')", "docstring": "Determines the VSS store identifiers.\n\nArgs:\nscan_node (dfvfs.SourceScanNode): scan node.\n\nReturns:\nlist[str]: VSS store identifiers.\n\nRaises:\nSourceScannerError: if the format of or within the source is not\nsupported or the scan node is invalid.\nUserAbort: if the user requested to abort.", "source": "juraj-google-style"} {"code": "def __init__(self, index, height):\n \n self.index = index\n self.height = height", "docstring": "Create an instance.\n\nArgs:\nindex (int):\nheight (int):", "source": "juraj-google-style"} {"code": "def ParseRow(self, parser_mediator, row_offset, row):\n \n filename = row.get('name', None)\n md5_hash = row.get('md5', None)\n mode = row.get('mode_as_string', None)\n\n inode_number = row.get('inode', None)\n if '-' in inode_number:\n inode_number, _, _ = inode_number.partition('-')\n\n try:\n inode_number = int(inode_number, 10)\n except (TypeError, ValueError):\n inode_number = None\n\n data_size = self._GetIntegerValue(row, 'size')\n user_uid = self._GetIntegerValue(row, 'uid')\n user_gid = self._GetIntegerValue(row, 'gid')\n\n event_data = MactimeEventData()\n event_data.filename = filename\n event_data.inode = inode_number\n event_data.md5 = md5_hash\n event_data.mode_as_string = mode\n event_data.offset = row_offset\n event_data.size = data_size\n event_data.user_gid = user_gid\n\n if user_uid is None:\n event_data.user_sid = None\n else:\n \n event_data.user_sid = '{0:d}'.format(user_uid)\n\n for value_name, timestamp_description in iter(\n self._TIMESTAMP_DESC_MAP.items()):\n posix_time = self._GetIntegerValue(row, value_name)\n \n if not posix_time:\n continue\n\n date_time = dfdatetime_posix_time.PosixTime(timestamp=posix_time)\n event = time_events.DateTimeValuesEvent(date_time, timestamp_description)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a line of the log file and produces events.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow_offset (int): number of the corresponding line.\nrow (dict[str, str]): fields of a single row, as specified in COLUMNS.", "source": "juraj-google-style"} {"code": "def _align_hydrogen_atoms(mol1, mol2, heavy_indices1,\n heavy_indices2):\n \n num_atoms = mol2.NumAtoms()\n all_atom = set(range(1, num_atoms+1))\n hydrogen_atoms1 = all_atom - set(heavy_indices1)\n hydrogen_atoms2 = all_atom - set(heavy_indices2)\n label1 = heavy_indices1 + tuple(hydrogen_atoms1)\n label2 = heavy_indices2 + tuple(hydrogen_atoms2)\n\n cmol1 = ob.OBMol()\n for i in label1:\n oa1 = mol1.GetAtom(i)\n a1 = cmol1.NewAtom()\n a1.SetAtomicNum(oa1.GetAtomicNum())\n a1.SetVector(oa1.GetVector())\n cmol2 = ob.OBMol()\n for i in label2:\n oa2 = mol2.GetAtom(i)\n a2 = cmol2.NewAtom()\n a2.SetAtomicNum(oa2.GetAtomicNum())\n a2.SetVector(oa2.GetVector())\n\n aligner = ob.OBAlign(False, False)\n aligner.SetRefMol(cmol1)\n aligner.SetTargetMol(cmol2)\n aligner.Align()\n aligner.UpdateCoords(cmol2)\n\n hydrogen_label2 = []\n hydrogen_label1 = list(range(len(heavy_indices1) + 1, num_atoms + 1))\n for h2 in range(len(heavy_indices2) + 1, num_atoms + 1):\n distance = 99999.0\n idx = hydrogen_label1[0]\n a2 = cmol2.GetAtom(h2)\n for h1 in hydrogen_label1:\n a1 = cmol1.GetAtom(h1)\n d = a1.GetDistance(a2)\n if d < distance:\n distance = d\n idx = h1\n hydrogen_label2.append(idx)\n hydrogen_label1.remove(idx)\n\n hydrogen_orig_idx2 = label2[len(heavy_indices2):]\n hydrogen_canon_orig_map2 = [(canon, orig) for canon, orig\n in zip(hydrogen_label2,\n hydrogen_orig_idx2)]\n hydrogen_canon_orig_map2.sort(key=lambda m: m[0])\n hydrogen_canon_indices2 = [x[1] for x in hydrogen_canon_orig_map2]\n\n canon_label1 = label1\n canon_label2 = heavy_indices2 + tuple(hydrogen_canon_indices2)\n\n return canon_label1, canon_label2", "docstring": "Align the label of topologically identical atoms of second molecule\ntowards first molecule\n\nArgs:\nmol1: First molecule. OpenBabel OBMol object\nmol2: Second molecule. OpenBabel OBMol object\nheavy_indices1: inchi label map of the first molecule\nheavy_indices2: label map of the second molecule\n\nReturn:\ncorrected label map of all atoms of the second molecule", "source": "juraj-google-style"} {"code": "async def set_headline(self, name, level, message):\n \n\n if name not in self.services:\n raise ArgumentError(\"Unknown service name\", short_name=name)\n\n self.services[name]['state'].set_headline(level, message)\n\n headline = self.services[name]['state'].headline.to_dict()\n await self._notify_update(name, 'new_headline', headline)", "docstring": "Set the sticky headline for a service.\n\nArgs:\nname (string): The short name of the service to query\nlevel (int): The level of the message (info, warning, error)\nmessage (string): The message contents", "source": "juraj-google-style"} {"code": "def read(self, size=None):\n \n if not self._is_open:\n raise IOError('Not opened.')\n\n if self._range_offset < 0 or self._range_size < 0:\n raise IOError('Invalid data range.')\n\n if self._current_offset < 0:\n raise IOError(\n 'Invalid current offset: {0:d} value less than zero.'.format(\n self._current_offset))\n\n if self._current_offset >= self._range_size:\n return b''\n\n if size is None:\n size = self._range_size\n if self._current_offset + size > self._range_size:\n size = self._range_size - self._current_offset\n\n self._file_object.seek(\n self._range_offset + self._current_offset, os.SEEK_SET)\n\n data = self._file_object.read(size)\n\n self._current_offset += len(data)\n\n return data", "docstring": "Reads a byte string from the file-like object at the current offset.\n\nThe function will read a byte string of the specified size or\nall of the remaining data if no size was specified.\n\nArgs:\nsize (Optional[int]): number of bytes to read, where None is all\nremaining data.\n\nReturns:\nbytes: data read.\n\nRaises:\nIOError: if the read failed.\nOSError: if the read failed.", "source": "juraj-google-style"} {"code": "def SendEmail(self, to_addresses, from_address, subject, message, attachments=None, is_html=True, cc_addresses=None, message_id=None, headers=None):\n headers = (headers or {})\n msg = MIMEMultipart('alternative')\n if is_html:\n text = self.RemoveHtmlTags(message)\n part1 = MIMEText(text, 'plain')\n msg.attach(part1)\n part2 = MIMEText(message, 'html')\n msg.attach(part2)\n else:\n part1 = MIMEText(message, 'plain')\n msg.attach(part1)\n if attachments:\n for (file_name, file_data) in iteritems(attachments):\n part = MIMEBase('application', 'octet-stream')\n part.set_payload(file_data)\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', ('attachment; filename=\"%s\"' % file_name))\n msg.attach(part)\n msg['Subject'] = subject\n from_address = self.AddEmailDomain(from_address)\n to_addresses = self.SplitEmailsAndAppendEmailDomain(to_addresses)\n cc_addresses = self.SplitEmailsAndAppendEmailDomain((cc_addresses or ''))\n msg['From'] = from_address\n msg['To'] = ','.join(to_addresses)\n if cc_addresses:\n msg['CC'] = ','.join(cc_addresses)\n if message_id:\n msg.add_header('Message-ID', message_id)\n for (header, value) in iteritems(headers):\n msg.add_header(header, value)\n try:\n s = smtplib.SMTP(config.CONFIG['Worker.smtp_server'], int(config.CONFIG['Worker.smtp_port']))\n s.ehlo()\n if config.CONFIG['Worker.smtp_starttls']:\n s.starttls()\n s.ehlo()\n if (config.CONFIG['Worker.smtp_user'] and config.CONFIG['Worker.smtp_password']):\n s.login(config.CONFIG['Worker.smtp_user'], config.CONFIG['Worker.smtp_password'])\n s.sendmail(from_address, (to_addresses + cc_addresses), msg.as_string())\n s.quit()\n except (socket.error, smtplib.SMTPException) as e:\n raise RuntimeError(('Could not connect to SMTP server to send email. Please check config option Worker.smtp_server. Currently set to %s. Error: %s' % (config.CONFIG['Worker.smtp_server'], e)))", "docstring": "This method sends an email notification.\n\nArgs:\nto_addresses: blah@mycompany.com string, list of addresses as csv string,\nor rdf_standard.DomainEmailAddress\nfrom_address: blah@mycompany.com string\nsubject: email subject string\nmessage: message contents string, as HTML or plain text\nattachments: iterable of filename string and file data tuples,\ne.g. {\"/file/name/string\": filedata}\nis_html: true if message is in HTML format\ncc_addresses: blah@mycompany.com string, or list of addresses as\ncsv string\nmessage_id: smtp message_id. Used to enable conversation threading\nheaders: dict of str-> str, headers to set\nRaises:\nRuntimeError: for problems connecting to smtp server.", "source": "codesearchnet"} {"code": "def raise_for_status(status: int, headers: MutableMapping, data: MutableMapping) -> None:\n if (status != 200):\n if (status == 429):\n if isinstance(data, str):\n error = data\n else:\n error = data.get('error', 'ratelimited')\n try:\n retry_after = int(headers.get('Retry-After', 1))\n except ValueError:\n retry_after = 1\n raise exceptions.RateLimited(retry_after, error, status, headers, data)\n else:\n raise exceptions.HTTPException(status, headers, data)", "docstring": "Check request response status\n\nArgs:\nstatus: Response status\nheaders: Response headers\ndata: Response data\n\nRaises:\n:class:`slack.exceptions.RateLimited`: For 429 status code\n:class:`slack.exceptions:HTTPException`:", "source": "codesearchnet"} {"code": "def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):\n destdb = destslab.initdb(destdbname, sourcedb.dupsort)\n statdict = destslab.stat(db=destdb)\n if (statdict['entries'] > 0):\n raise s_exc.DataAlreadyExists()\n rowcount = 0\n for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE):\n (ccount, acount) = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb)\n if ((ccount != len(chunk)) or (acount != len(chunk))):\n raise s_exc.BadCoreStore(mesg='Unexpected number of values written')\n rowcount += len(chunk)\n if ((progresscb is not None) and (0 == (rowcount % PROGRESS_PERIOD))):\n progresscb(rowcount)\n return rowcount", "docstring": "Copy an entire database in this slab to a new database in potentially another slab.\n\nArgs:\nsourcedb (LmdbDatabase): which database in this slab to copy rows from\ndestslab (LmdbSlab): which slab to copy rows to\ndestdbname (str): the name of the database to copy rows to in destslab\nprogresscb (Callable[int]): if not None, this function will be periodically called with the number of rows\ncompleted\n\nReturns:\n(int): the number of rows copied\n\nNote:\nIf any rows already exist in the target database, this method returns an error. This means that one cannot\nuse destdbname=None unless there are no explicit databases in the destination slab.", "source": "codesearchnet"} {"code": "def get_num_code_systems_per_value_set(engine: sqlalchemy.engine.base.Engine, table: sqlalchemy.sql.expression.TableClause) -> Dict[str, int]:\n query = sqlalchemy.select([table.c.valueseturi, table.c.valuesetversion, sqlalchemy.func.array_agg(sqlalchemy.distinct(table.c.system)).label('systems')]).group_by(table.c.valueseturi, table.c.valuesetversion)\n with engine.connect() as connection:\n systems_per_value_set = connection.execute(query)\n return _query_results_to_code_system_counts(systems_per_value_set.fetchall())", "docstring": "Queries `table` for the code systems referenced by each value set.\n\nLooks up the code systems referenced by each value set decribed in the\nvalueset_codes `table`. Returns counts for the number of code systems\nreferenced by each value set.\n\nIf the value sets' URL contains a \"|version\" suffix, reports the number of\ncode systems referenced by that value set and version.\nIf the url does not contain a \"|version\" suffix, the number of code systems\nacross all versions of the value set will be reported.\n\nArgs:\nengine: The SqlAlachemy engine to use when performing queries.\ntable: The SqlAlchemy table to query.\n\nReturns:\nA CodeSystemCounts object for accessing code systems information.", "source": "github-repos"} {"code": "def unload(self, keepables=None):\n to_del = [ds_id for (ds_id, projectable) in self.datasets.items() if ((ds_id not in self.wishlist) and ((not keepables) or (ds_id not in keepables)))]\n for ds_id in to_del:\n LOG.debug('Unloading dataset: %r', ds_id)\n del self.datasets[ds_id]", "docstring": "Unload all unneeded datasets.\n\nDatasets are considered unneeded if they weren't directly requested\nor added to the Scene by the user or they are no longer needed to\ngenerate composites that have yet to be generated.\n\nArgs:\nkeepables (iterable): DatasetIDs to keep whether they are needed\nor not.", "source": "codesearchnet"} {"code": "def get_synth_input_fn(height, width, num_channels, num_classes):\n\n def input_fn(is_training, data_dir, batch_size, *args, **kwargs):\n images = tf.zeros((batch_size, height, width, num_channels), tf.float32)\n labels = tf.zeros((batch_size, num_classes), tf.int32)\n return tf.data.Dataset.from_tensors((images, labels)).repeat()\n return input_fn", "docstring": "Returns an input function that returns a dataset with zeroes.\n\nThis is useful in debugging input pipeline performance, as it removes all\nelements of file reading and image preprocessing.\n\nArgs:\nheight: Integer height that will be used to create a fake image tensor.\nwidth: Integer width that will be used to create a fake image tensor.\nnum_channels: Integer depth that will be used to create a fake image tensor.\nnum_classes: Number of classes that should be represented in the fake labels\ntensor\n\nReturns:\nAn input_fn that can be used in place of a real one to return a dataset\nthat can be used for iteration.", "source": "codesearchnet"} {"code": "def getSet(self, name):\n return lock_and_call((lambda : Set(self._impl.getSet(name))), self._lock)", "docstring": "Get the set with the corresponding name.\n\nArgs:\nname: Name of the set to be found.\n\nRaises:\nTypeError: if the specified set does not exist.", "source": "codesearchnet"} {"code": "def update_vlan(self, name, vid, vni):\n \n cmd = 'vxlan vlan %s vni %s' % (vid, vni)\n return self.configure_interface(name, cmd)", "docstring": "Adds a new vlan to vni mapping for the interface\n\nEosVersion:\n4.13.7M\n\nArgs:\nvlan (str, int): The vlan id to map to the vni\nvni (str, int): The vni value to use\n\nReturns:\nTrue if the command completes successfully", "source": "juraj-google-style"} {"code": "def _add_asset_to_collection(asset_filename, asset_tensor):\n asset_proto = meta_graph_pb2.AssetFileDef()\n asset_proto.filename = asset_filename\n asset_proto.tensor_info.name = asset_tensor.name\n asset_any_proto = Any()\n asset_any_proto.Pack(asset_proto)\n ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)", "docstring": "Builds an asset proto and adds it to the asset collection of the graph.\n\nArgs:\nasset_filename: The filename of the asset to be added.\nasset_tensor: The asset tensor used to populate the tensor info of the\nasset proto.", "source": "github-repos"} {"code": "def __init__(self, path):\n \n self.path = path\n self.on_create = None\n self.on_modify = None\n self.on_delete = None\n self.jobs = None", "docstring": "Initialize the Directory Watcher\nArgs:\npath: path of the directory to watch", "source": "juraj-google-style"} {"code": "def find(self, *strings, **kwargs):\n start = kwargs.pop('start', 0)\n stop = kwargs.pop('stop', None)\n keys_only = kwargs.pop('keys_only', False)\n results = {string: [] for string in strings}\n stop = (len(self) if (stop is None) else stop)\n for (i, line) in enumerate(self[start:stop]):\n for string in strings:\n if (string in line):\n if keys_only:\n results[string].append(i)\n else:\n results[string].append((i, line))\n if (len(strings) == 1):\n return results[strings[0]]\n return results", "docstring": "Search the entire editor for lines that match the string.\n\n.. code-block:: Python\n\nstring = '''word one\nword two\nthree'''\ned = Editor(string)\ned.find('word') # [(0, \"word one\"), (1, \"word two\")]\ned.find('word', 'three') # {'word': [...], 'three': [(2, \"three\")]}\n\nArgs:\nstrings (str): Any number of strings to search for\nkeys_only (bool): Only return keys\nstart (int): Optional line to start searching on\nstop (int): Optional line to stop searching on\n\nReturns:\nresults: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)", "source": "codesearchnet"} {"code": "def _parse_authors(authors):\n \n link = authors.find(\"a\")\n link = link[0].params.get(\"href\") if link else None\n\n author_list = _strip_content(authors)\n\n if \"(\" in author_list:\n author_list = author_list.split(\"(\")[0]\n\n if not author_list.strip():\n return []\n\n return map(\n lambda author: Author(author.strip(), link),\n author_list.strip().split(\",\")\n )", "docstring": "Parse informations about authors of the book.\n\nArgs:\ndom (obj): HTMLElement containing slice of the page with details.\n\nReturns:\nlist: List of :class:`.Author` objects. Blank if no author \\\nfound.", "source": "juraj-google-style"} {"code": "def set_result(self, result, from_tree=False):\n \n\n if self._read_only:\n if not from_tree:\n LOGGER.warning(\"Tried to set address %s on a\"\n \" read-only context.\",\n self.address)\n return\n\n with self._condition:\n if self._read_only:\n if not from_tree:\n LOGGER.warning(\"Tried to set address %s on a\"\n \" read-only context.\",\n self.address)\n return\n if from_tree:\n \n \n \n if not self._result_set_in_context:\n self._result = result\n self._tree_has_set = True\n else:\n self._result = result\n self._result_set_in_context = True\n self._deleted = False\n\n self._condition.notify_all()", "docstring": "Set the addresses's value unless the future has been declared\nread only.\n\nArgs:\nresult (bytes): The value at an address.\nfrom_tree (bool): Whether the value is being set by a read from\nthe merkle tree.\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None):\n query_params = (query_params or {})\n headers = (headers or {})\n query_params = self.add_authorisation(query_params)\n uri = self.build_uri(uri_path, query_params)\n allowed_methods = ('POST', 'PUT', 'DELETE')\n if ((http_method in allowed_methods) and ('Content-Type' not in headers)):\n headers['Content-Type'] = 'application/json'\n headers['Accept'] = 'application/json'\n (response, content) = self.client.request(uri=uri, method=http_method, body=body, headers=headers)\n self.check_errors(uri, response)\n return json.loads(content.decode('utf-8'))", "docstring": "Make a call to Trello API and capture JSON response. Raises an error\nwhen it fails.\n\nReturns:\ndict: Dictionary with the JSON data", "source": "codesearchnet"} {"code": "def update_ethernet_settings(self, configuration, force=False, timeout=(- 1)):\n uri = '{}/ethernetSettings'.format(self.data['uri'])\n return self._helper.update(configuration, uri=uri, force=force, timeout=timeout)", "docstring": "Updates the Ethernet interconnect settings for the logical interconnect.\n\nArgs:\nconfiguration: Ethernet interconnect settings.\nforce: If set to true, the operation completes despite any problems with network connectivity or errors\non the resource itself. The default is false.\ntimeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation\nin OneView; it just stops waiting for its completion.\n\nReturns:\ndict: Logical Interconnect.", "source": "codesearchnet"} {"code": "def __init__(self, sync_optimizer, is_chief, num_tokens):\n self._sync_optimizer = sync_optimizer\n self._is_chief = is_chief\n self._num_tokens = num_tokens", "docstring": "Creates hook to handle SyncReplicasOptimizer initialization ops.\n\nArgs:\nsync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.\nis_chief: `Bool`, whether is this a chief replica or not.\nnum_tokens: Number of tokens to add to the queue.", "source": "github-repos"} {"code": "def distance_between(self, u, v):\n if (not isinstance(u, Node)):\n raise TypeError('u must be a Node')\n if (not isinstance(v, Node)):\n raise TypeError('v must be a Node')\n if (u == v):\n return 0.0\n u_dists = {u: 0.0}\n v_dists = {v: 0.0}\n c = u\n p = u.parent\n while (p is not None):\n u_dists[p] = u_dists[c]\n if (c.edge_length is not None):\n u_dists[p] += c.edge_length\n c = p\n p = p.parent\n c = v\n p = v.parent\n while (p is not None):\n v_dists[p] = v_dists[c]\n if (c.edge_length is not None):\n v_dists[p] += c.edge_length\n if (p in u_dists):\n return (u_dists[p] + v_dists[p])\n c = p\n p = p.parent\n raise RuntimeError('u and v are not in the same Tree')", "docstring": "Return the distance between nodes ``u`` and ``v`` in this ``Tree``\n\nArgs:\n``u`` (``Node``): Node ``u``\n\n``v`` (``Node``): Node ``v``\n\nReturns:\n``float``: The distance between nodes ``u`` and ``v``", "source": "codesearchnet"} {"code": "def track_progress(func, tasks, bar_width=50, **kwargs):\n \n if isinstance(tasks, tuple):\n assert len(tasks) == 2\n assert isinstance(tasks[0], collections_abc.Iterable)\n assert isinstance(tasks[1], int)\n task_num = tasks[1]\n tasks = tasks[0]\n elif isinstance(tasks, collections_abc.Iterable):\n task_num = len(tasks)\n else:\n raise TypeError(\n '\"tasks\" must be an iterable object or a (iterator, int) tuple')\n prog_bar = ProgressBar(task_num, bar_width)\n results = []\n for task in tasks:\n results.append(func(task, **kwargs))\n prog_bar.update()\n sys.stdout.write('\\n')\n return results", "docstring": "Track the progress of tasks execution with a progress bar.\n\nTasks are done with a simple for-loop.\n\nArgs:\nfunc (callable): The function to be applied to each task.\ntasks (list or tuple[Iterable, int]): A list of tasks or\n(tasks, total num).\nbar_width (int): Width of progress bar.\n\nReturns:\nlist: The task results.", "source": "juraj-google-style"} {"code": "def add_capture(self, tensor, placeholder):\n self._function_captures.add_or_replace(key=id(tensor), external=tensor, internal=placeholder, is_by_ref=False)\n self.inputs.append(placeholder)", "docstring": "Capture a specific tensor and utilize the provided placeholder.\n\nArgs:\ntensor: Tensor to captures.\nplaceholder: Provided placeholder for the tensor.", "source": "github-repos"} {"code": "def to_voxels(array):\n if (type(array) is not numpy.ndarray):\n raise ValueError('array argument must be of type numpy.ndarray')\n return numpy.argwhere(array)", "docstring": "Converts an array to its voxel list.\n\nArguments:\narray (numpy.ndarray): A numpy nd array. This must be boolean!\n\nReturns:\nA list of n-tuples", "source": "codesearchnet"} {"code": "def sort_response(response: Dict[(str, Any)]) -> OrderedDict:\n root_order = ['jsonrpc', 'result', 'error', 'id']\n error_order = ['code', 'message', 'data']\n req = OrderedDict(sorted(response.items(), key=(lambda k: root_order.index(k[0]))))\n if ('error' in response):\n req['error'] = OrderedDict(sorted(response['error'].items(), key=(lambda k: error_order.index(k[0]))))\n return req", "docstring": "Sort the keys in a JSON-RPC response object.\n\nThis has no effect other than making it nicer to read. Useful in Python 3.5 only,\ndictionaries are already sorted in newer Python versions.\n\nExample::\n\n>>> json.dumps(sort_response({'id': 2, 'result': 5, 'jsonrpc': '2.0'}))\n{\"jsonrpc\": \"2.0\", \"result\": 5, \"id\": 1}\n\nArgs:\nresponse: Deserialized JSON-RPC response.\n\nReturns:\nThe same response, sorted in an OrderedDict.", "source": "codesearchnet"} {"code": "def __init__(self, value: Union[None, int, float, str, List[Any], Tuple[Any]]=None, children: Optional[List['DNA']]=None, spec: Optional[DNASpec]=None, metadata: Optional[Dict[str, Any]]=None, *, allow_partial: bool=False, **kwargs):\n value, children, metadata = self._parse_value_and_children(value, children, metadata, spec)\n super().__init__(value=value, children=children, metadata=metadata or symbolic.Dict(), allow_partial=allow_partial, **kwargs)\n self._decision_by_id_cache = None\n self._named_decisions = None\n self._userdata = AttributeDict()\n self._cloneable_metadata_keys = set()\n self._cloneable_userdata_keys = set()\n self._spec = None\n if spec:\n self.use_spec(spec)", "docstring": "Constructor.\n\nArgs:\nvalue: Value for current node.\nchildren: Child DNA(s).\nspec: DNA spec that constraint current node.\nmetadata: Optional dict as controller metadata for the DNA.\nallow_partial: If True, allow the object to be partial.\n**kwargs: keyword arguments that will be passed through to\nsymbolic.Object.", "source": "github-repos"} {"code": "def p45(msg):\n \n d = hex2bin(data(msg))\n if d[26] == '0':\n return None\n p = bin2int(d[27:38]) \n return p", "docstring": "Average static pressure.\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nint: static pressure in hPa", "source": "juraj-google-style"} {"code": "def _get_prob_original_static(initial_dist_t, target_dist_t):\n init_static = tensor_util.constant_value(initial_dist_t)\n target_static = tensor_util.constant_value(target_dist_t)\n if init_static is None or target_static is None:\n return None\n else:\n return np.min(target_static / init_static)", "docstring": "Returns the static probability of sampling from the original.\n\n`tensor_util.constant_value(prob_of_original)` returns `None` if it encounters\nan Op that it isn't defined for. We have some custom logic to avoid this.\n\nArgs:\ninitial_dist_t: A tensor of the initial distribution.\ntarget_dist_t: A tensor of the target distribution.\n\nReturns:\nThe probability of sampling from the original distribution as a constant,\nif it is a constant, or `None`.", "source": "github-repos"} {"code": "def _get_default_configurable_parameter_values(fn, whitelist, blacklist):\n arg_vals = _ARG_DEFAULTS_CACHE.get(fn)\n if (arg_vals is not None):\n return arg_vals.copy()\n arg_spec = _get_cached_arg_spec(fn)\n if arg_spec.defaults:\n default_kwarg_names = arg_spec.args[(- len(arg_spec.defaults)):]\n arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))\n else:\n arg_vals = {}\n if (six.PY3 and arg_spec.kwonlydefaults):\n arg_vals.update(arg_spec.kwonlydefaults)\n for k in list(six.iterkeys(arg_vals)):\n whitelist_fail = (whitelist and (k not in whitelist))\n blacklist_fail = (blacklist and (k in blacklist))\n representable = _is_literally_representable(arg_vals[k])\n if (whitelist_fail or blacklist_fail or (not representable)):\n del arg_vals[k]\n _ARG_DEFAULTS_CACHE[fn] = arg_vals\n return arg_vals.copy()", "docstring": "Retrieve all default values for configurable parameters of a function.\n\nAny parameters included in the supplied blacklist, or not included in the\nsupplied whitelist, are excluded.\n\nArgs:\nfn: The function whose parameter values should be retrieved.\nwhitelist: The whitelist (or `None`) associated with the function.\nblacklist: The blacklist (or `None`) associated with the function.\n\nReturns:\nA dictionary mapping configurable parameter names to their default values.", "source": "codesearchnet"} {"code": "def _sample_tag_values(cls, sample_names, rformat, sample_fields):\n sample_tag_values = OrderedDict()\n tag_names = VcfRecord._format_list(rformat)\n for (i, sample_field) in enumerate(sample_fields):\n tag_values = (sample_field.split(':') if sample_field else '.')\n sample_tag_values[sample_names[i]] = OrderedDict(zip(tag_names, tag_values))\n return sample_tag_values", "docstring": "Creates a sample dict of tag-value dicts for a single variant record.\n\nArgs:\nsample_names: list of sample name strings.\nrformat: record format string (from VCF record).\nsample_fields: list of strings where each string is the ';'\nseperated format values for an individual sample.\n\nReturns:\nAn dict of samples, where each key is a sample and each value\nis an dict of format-values. See attribute below for example.\nWill return '.' if no values for sampe field.", "source": "codesearchnet"} {"code": "def poisson_cluster(data, k, init=None, max_iters=100):\n (genes, cells) = data.shape\n if (sparse.issparse(data) and (not sparse.isspmatrix_csc(data))):\n data = sparse.csc_matrix(data)\n (init, assignments) = kmeans_pp(data, k, centers=init)\n centers = np.copy(init)\n assignments = np.zeros(cells)\n for it in range(max_iters):\n lls = poisson_ll(data, centers)\n new_assignments = np.argmax(lls, 1)\n if np.equal(assignments, new_assignments).all():\n return (new_assignments, centers)\n for c in range(k):\n if sparse.issparse(data):\n if (data[(:, (new_assignments == c))].shape[0] == 0):\n (new_c, _) = kmeans_pp(data, k, centers[(:, :c)])\n centers[(:, c)] = new_c[(:, c)]\n else:\n centers[(:, c)] = np.asarray(data[(:, (new_assignments == c))].mean(1)).flatten()\n elif (len(data[(:, (new_assignments == c))]) == 0):\n (new_c, _) = kmeans_pp(data, k, centers[(:, :c)])\n centers[(:, c)] = new_c[(:, c)]\n else:\n centers[(:, c)] = np.mean(data[(:, (new_assignments == c))], 1)\n assignments = new_assignments\n return (assignments, centers)", "docstring": "Performs Poisson hard EM on the given data.\n\nArgs:\ndata (array): A 2d array- genes x cells. Can be dense or sparse; for best performance, sparse matrices should be in CSC format.\nk (int): Number of clusters\ninit (array, optional): Initial centers - genes x k array. Default: None, use kmeans++\nmax_iters (int, optional): Maximum number of iterations. Default: 100\n\nReturns:\na tuple of two arrays: a cells x 1 vector of cluster assignments,\nand a genes x k array of cluster means.", "source": "codesearchnet"} {"code": "def kmeans_pp(data, k, centers=None):\n (genes, cells) = data.shape\n if (sparse.issparse(data) and (not sparse.isspmatrix_csc(data))):\n data = sparse.csc_matrix(data)\n num_known_centers = 0\n if (centers is None):\n centers = np.zeros((genes, k))\n else:\n num_known_centers = centers.shape[1]\n centers = np.concatenate((centers, np.zeros((genes, (k - num_known_centers)))), 1)\n distances = np.zeros((cells, k))\n distances[:] = np.inf\n if (num_known_centers == 0):\n init = np.random.randint(0, cells)\n if sparse.issparse(data):\n centers[(:, 0)] = data[(:, init)].toarray().flatten()\n else:\n centers[(:, 0)] = data[(:, init)]\n num_known_centers += 1\n available_cells = list(range(cells))\n for c in range(num_known_centers, k):\n c2 = (c - 1)\n if sparse.issparse(data):\n lls = poisson_ll(data, centers[(:, c2:(c2 + 1))]).flatten()\n distances[(:, c2)] = ((1 + lls.max()) - lls)\n distances[(:, c2)] /= distances[(:, c2)].max()\n else:\n for cell in range(cells):\n distances[(cell, c2)] = poisson_dist(data[(:, cell)], centers[(:, c2)])\n min_distances = np.min(distances, 1)\n min_distances = (min_distances ** 2)\n min_distances = min_distances[available_cells]\n min_dist = np.random.choice(available_cells, p=(min_distances / min_distances.sum()))\n available_cells.pop(available_cells.index(min_dist))\n if sparse.issparse(data):\n centers[(:, c)] = data[(:, min_dist)].toarray().flatten()\n else:\n centers[(:, c)] = data[(:, min_dist)]\n lls = poisson_ll(data, centers)\n new_assignments = np.argmax(lls, 1)\n centers[(centers == 0.0)] = eps\n return (centers, new_assignments)", "docstring": "Generates kmeans++ initial centers.\n\nArgs:\ndata (array): A 2d array- genes x cells\nk (int): Number of clusters\ncenters (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).\n\nReturns:\ncenters - a genes x k array of cluster means.\nassignments - a cells x 1 array of cluster assignments", "source": "codesearchnet"} {"code": "def check_interactive_docker_worker(link):\n \n errors = []\n log.info(\"Checking for {} {} interactive docker-worker\".format(link.name, link.task_id))\n try:\n if link.task['payload']['features'].get('interactive'):\n errors.append(\"{} is interactive: task.payload.features.interactive!\".format(link.name))\n if link.task['payload']['env'].get('TASKCLUSTER_INTERACTIVE'):\n errors.append(\"{} is interactive: task.payload.env.TASKCLUSTER_INTERACTIVE!\".format(link.name))\n except KeyError:\n errors.append(\"check_interactive_docker_worker: {} task definition is malformed!\".format(link.name))\n return errors", "docstring": "Given a task, make sure the task was not defined as interactive.\n\n* ``task.payload.features.interactive`` must be absent or False.\n* ``task.payload.env.TASKCLUSTER_INTERACTIVE`` must be absent or False.\n\nArgs:\nlink (LinkOfTrust): the task link we're checking.\n\nReturns:\nlist: the list of error errors. Success is an empty list.", "source": "juraj-google-style"} {"code": "def ported_string(raw_data, encoding='utf-8', errors='ignore'):\n if (not raw_data):\n return six.text_type()\n if isinstance(raw_data, six.text_type):\n return raw_data.strip()\n if six.PY2:\n try:\n return six.text_type(raw_data, encoding, errors).strip()\n except LookupError:\n return six.text_type(raw_data, 'utf-8', errors).strip()\n if six.PY3:\n try:\n return six.text_type(raw_data, encoding).strip()\n except (LookupError, UnicodeDecodeError):\n return six.text_type(raw_data, 'utf-8', errors).strip()", "docstring": "Give as input raw data and output a str in Python 3\nand unicode in Python 2.\n\nArgs:\nraw_data: Python 2 str, Python 3 bytes or str to porting\nencoding: string giving the name of an encoding\nerrors: his specifies the treatment of characters\nwhich are invalid in the input encoding\n\nReturns:\nstr (Python 3) or unicode (Python 2)", "source": "codesearchnet"} {"code": "def __init__(self, queue_id=None, port=None, length=None, properties=None):\n \n super().__init__()\n self.queue_id = queue_id\n self.port = port\n self.length = length\n self.properties = [] if properties is None else properties", "docstring": "Create a PacketQueue with the optional parameters below.\n\nArgs:\nqueue_id (int): ID of the specific queue.\nport (int): Port his queue is attached to.\nlength (int): Length in bytes of this queue desc.\nproperties(~pyof.v0x04.common.queue.ListOfProperties):\nQueue's list of properties. Default is an empty list.", "source": "juraj-google-style"} {"code": "def import_family(self, rfa_file):\n \n self._add_entry(templates.IMPORT_FAMILY\n .format(family_file=rfa_file))", "docstring": "Append a import family entry to the journal.\n\nThis instructs Revit to import a family into the opened model.\n\nArgs:\nrfa_file (str): full path of the family file", "source": "juraj-google-style"} {"code": "def parse_source(info):\n \n\n if \"extractor_key\" in info:\n source = info[\"extractor_key\"]\n lower_source = source.lower()\n\n for key in SOURCE_TO_NAME:\n lower_key = key.lower()\n if lower_source == lower_key:\n source = SOURCE_TO_NAME[lower_key]\n\n if source != \"Generic\":\n return source\n\n if \"url\" in info and info[\"url\"] is not None:\n p = urlparse(info[\"url\"])\n if p and p.netloc:\n return p.netloc\n\n return \"Unknown\"", "docstring": "Parses the source info from an info dict generated by youtube-dl\n\nArgs:\ninfo (dict): The info dict to parse\n\nReturns:\nsource (str): The source of this song", "source": "juraj-google-style"} {"code": "def add_cookie(self, cookie_dict):\n \n if not isinstance(cookie_dict, dict):\n raise TypeError('Type of the cookie must be a dict.')\n if not cookie_dict.get(\n 'name', None\n ) or not cookie_dict.get(\n 'value', None):\n raise KeyError('Missing required keys, \\'name\\' and \\'value\\' must be provided.')\n self._execute(Command.ADD_COOKIE, {'cookie': cookie_dict})", "docstring": "Set a cookie.\n\nSupport:\nWeb(WebView)\n\nArgs:\ncookie_dict: A dictionary contain keys: \"name\", \"value\",\n[\"path\"], [\"domain\"], [\"secure\"], [\"httpOnly\"], [\"expiry\"].\n\nReturns:\nWebElement Object.", "source": "juraj-google-style"} {"code": "def b_fit_score(self, x, y):\n \n x = np.reshape(minmax_scale(x), (-1, 1))\n y = np.reshape(minmax_scale(y), (-1, 1))\n poly = PolynomialFeatures(degree=self.degree)\n poly_x = poly.fit_transform(x)\n\n poly_x[:,1] = 0\n poly_x[:,2] = 0\n\n regressor = LinearRegression()\n regressor.fit(poly_x, y)\n\n y_predict = regressor.predict(poly_x)\n error = mean_squared_error(y_predict, y)\n\n return error", "docstring": "Compute the RECI fit score\n\nArgs:\nx (numpy.ndarray): Variable 1\ny (numpy.ndarray): Variable 2\n\nReturns:\nfloat: RECI fit score", "source": "juraj-google-style"} {"code": "def intersect(self, other):\n \n if not isinstance(other, self.__class__):\n m = \"You can only intersect striplogs with each other.\"\n raise StriplogError(m)\n\n result = []\n for iv in self:\n for jv in other:\n try:\n result.append(iv.intersect(jv))\n except IntervalError:\n \n pass\n return Striplog(result)", "docstring": "Makes a striplog of all intersections.\n\nArgs:\nStriplog. The striplog instance to intersect with.\n\nReturns:\nStriplog. The result of the intersection.", "source": "juraj-google-style"} {"code": "def http(self, *args, **kwargs):\n return self.credentials.authorize(transport.get_http_object(*args, **kwargs))", "docstring": "Returns an authorized http instance.\n\nMust only be called from within an @oauth_required decorated method, or\nfrom within an @oauth_aware decorated method where has_credentials()\nreturns True.\n\nArgs:\n*args: Positional arguments passed to httplib2.Http constructor.\n**kwargs: Positional arguments passed to httplib2.Http constructor.", "source": "codesearchnet"} {"code": "def check(self, dsm, **kwargs):\n \n logger.debug('Entities = %s' % dsm.entities)\n messages = []\n code_clean = True\n threshold = kwargs.pop('threshold', 1)\n rows, _ = dsm.size\n for i in range(0, rows):\n if dsm.data[i][0] > threshold:\n messages.append(\n 'Number of issues (%d) in module %s '\n '> threshold (%d)' % (\n dsm.data[i][0], dsm.entities[i], threshold))\n code_clean = False\n\n return code_clean, '\\n'.join(messages)", "docstring": "Check code clean.\n\nArgs:\ndsm (:class:`DesignStructureMatrix`): the DSM to check.\n\nReturns:\nbool, str: True if code clean else False, messages", "source": "juraj-google-style"} {"code": "def make_one_shot_iterator(self):\n return self._make_one_shot_iterator()", "docstring": "Get a one time use iterator for DistributedDatasetV1.\n\nNote: This API is deprecated. Please use `for ... in dataset:` to iterate\nover the dataset or `iter` to create an iterator.\n\nReturns:\nA DistributedIteratorV1 instance.", "source": "github-repos"} {"code": "def json(cls, status_code, process):\n\n def func(response):\n ret = None\n if cls.boolean(status_code)(response):\n ret = (response.json() or {})\n return process(ret)\n return func", "docstring": "Callback to validate and extract a JSON object.\n\nThe returned callback checks a given response for the given\nstatus_code using :function:`response_boolean`. On success the\nresponse JSON is parsed and returned.\n\nArgs:\n\nstatus_code(int): The http status code to consider a success\n\nReturns:\n\nA function that given a response returns the JSON object\nin the given response. Raises a :class:`HeliumError` if\nthe response code does not match.", "source": "codesearchnet"} {"code": "def _GetConsoleEncoding(self):\n console_encoding = getattr(sys.stdout, 'encoding', None)\n if not console_encoding:\n return None\n console_encoding = console_encoding.lower()\n if 'utf-8' in console_encoding:\n return 'utf8'\n elif 'cp437' in console_encoding:\n return 'cp437'\n return None", "docstring": "Gets the encoding as declared by the stdout stream.\n\nReturns:\nstr, The encoding name or None if it could not be determined.", "source": "github-repos"} {"code": "def list_items(system_wide=False):\n\n\t\n\n\tdesktop_env = system.get_name()\n\n\tresult = []\n\n\tif desktop_env == 'windows':\n\t\tsys_startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\t\tuser_startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup')\n\n\t\tstartup_dir = sys_startup_dir if system_wide else user_startup_dir\n\n\t\tfor file in os.listdir(startup_dir):\n\t\t\tfile_path = os.path.join(startup_dir, file)\n\n\t\t\tresult.append({ 'name': file, 'command': os.path.join(startup_dir, file) })\n\n\telif desktop_env == 'mac':\n\t\titems_list = system.get_cmd_out('launchtl list | awk \\'{print $3}\\'')\n\t\tfor item in items_list.split('\\n'):\n\t\t\t\n\t\t\tlaunchd_plist_paths = ['~/Library/LaunchAgents',\n\t\t\t\t\t\t\t\t\t'/Library/LaunchAgents',\n\t\t\t\t\t\t\t\t\t'/Library/LaunchDaemons',\n\t\t\t\t\t\t\t\t\t'/System/Library/LaunchAgents',\n\t\t\t\t\t\t\t\t\t'/System/Library/LaunchDaemons']\n\n\t\t\tfor path in launchd_plist_paths:\n\t\t\t\tif item + '.plist' in os.listdir(path):\n\t\t\t\t\tplist_file = os.path.join(path, item + '.plist')\n\n\t\t\t\n\t\t\tif sys.version_info.major == 2:\n\t\t\t\tplist_parsed = plistlib.readPlist(plist_file)\n\t\t\telse:\n\t\t\t\twith open(plist_file) as f:\n\t\t\t\t\tplist_parsed = plistlib.load(f)\n\n\t\t\tif 'Program' in plist_parsed:\n\t\t\t\tcmd = plist_parsed['Program']\n\n\t\t\t\tif 'ProgramArguments' in plist_parsed:\n\t\t\t\t\tcmd += ' '.join(plist_parsed['ProgramArguments'])\n\n\t\t\telif 'ProgramArguments' in plist_parsed:\n\t\t\t\tcmd = ' '.join(plist_parsed['ProgramArguments'])\n\n\t\t\telse:\n\t\t\t\tcmd = ''\n\n\t\t\tresult.append({ 'name': item, 'command': cmd })\n\n\t\t\n\t\t\n\n\telse:\n\t\t\n\n\t\t\n\t\tprofile = os.path.expanduser('~/.profile')\n\n\t\tif os.path.isfile(profile):\n\t\t\twith open(profile) as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tif system.is_in_path(line.lstrip().split(' ')[0]):\n\t\t\t\t\t\tcmd_name = line.lstrip().split(' ')[0]\n\n\t\t\t\t\t\tresult.append({ 'name': cmd_name, 'command': line.strip() })\n\n\t\t\n\t\tif system_wide:\n\t\t\tif os.path.isdir('/etc/profile.d'):\n\t\t\t\tfor file in os.listdir('/etc/profile.d'):\n\t\t\t\t\tfile_path = os.path.join('/etc/profile.d', file)\n\t\t\t\t\tresult.append({ 'name': file, 'command': 'sh %s' % file_path })\n\n\t\t\n\n\t\ttry:\n\t\t\tstartup_dir = directories.get_config_dir('autostart', system_wide=system_wide)[0]\n\n\t\t\tfor file in os.listdir(startup_dir):\n\t\t\t\tfile_parsed = desktopfile.parse(os.path.join(startup_dir, file))\n\n\t\t\t\tif 'Name' in file_parsed:\n\t\t\t\t\tname = file_parsed['Name']\n\n\t\t\t\telse:\n\t\t\t\t\tname = file.replace('.desktop', '')\n\n\t\t\t\tif 'Exec' in file_parsed:\n\t\t\t\t\tif file_parsed['Terminal']:\n\t\t\t\t\t\tcmd = applications.terminal(exec_=file_parsed['Exec'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn_cmd=True)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcmd = file_parsed['Exec']\n\n\t\t\t\telse:\n\t\t\t\t\tcmd = ''\n\n\t\t\t\tif not file_parsed.get('Hidden', False):\n\t\t\t\t\tresult.append({ 'name': name, 'command': cmd })\n\n\t\texcept IndexError:\n\t\t\tpass\n\n\treturn result", "docstring": "List startup programs.\n\nList the programs set to run at startup.\n\nArgs:\nsystem_wide (bool): Gets the programs that run at system-wide startup.\n\nReturns:\nlist: A list of dictionaries in this format:\n\n.. code-block:: python\n\n{\n'name': 'The name of the entry.',\n'command': 'The command used to run it.'\n}", "source": "juraj-google-style"} {"code": "def pauli_string_half(circuit: circuits.Circuit) -> circuits.Circuit:\n \n return circuits.Circuit.from_ops(\n _pull_non_clifford_before(circuit),\n strategy=circuits.InsertStrategy.EARLIEST)", "docstring": "Return only the non-Clifford part of a circuit. See\nconvert_and_separate_circuit().\n\nArgs:\ncircuit: A Circuit with the gate set {SingleQubitCliffordGate,\nPauliInteractionGate, PauliStringPhasor}.\n\nReturns:\nA Circuit with only PauliStringPhasor operations.", "source": "juraj-google-style"} {"code": "def fetch(args: List[str], env: Dict[(str, str)]=None, encoding: str=sys.getdefaultencoding()) -> str:\n (stdout, _) = run(args, env=env, capture_stdout=True, echo_stdout=False, encoding=encoding)\n log.debug(stdout)\n return stdout", "docstring": "Run a command and returns its stdout.\n\nArgs:\nargs: the command-line arguments\nenv: the operating system environment to use\nencoding: the encoding to use for ``stdout``\n\nReturns:\nthe command's ``stdout`` output", "source": "codesearchnet"} {"code": "def ParseVideoRow(self, parser_mediator, query, row, **unused_kwargs):\n query_hash = hash(query)\n event_data = KodiVideoEventData()\n event_data.filename = self._GetRowValue(query_hash, row, 'strFilename')\n event_data.play_count = self._GetRowValue(query_hash, row, 'playCount')\n event_data.query = query\n timestamp = self._GetRowValue(query_hash, row, 'lastPlayed')\n date_time = dfdatetime_time_elements.TimeElements()\n date_time.CopyFromDateTimeString(timestamp)\n event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)\n parser_mediator.ProduceEventWithEventData(event, event_data)", "docstring": "Parses a Video row.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nquery (str): query that created the row.\nrow (sqlite3.Row): row.", "source": "codesearchnet"} {"code": "class ConvNextV2Layer(nn.Module):\n\n def __init__(self, config, dim, drop_path=0):\n super().__init__()\n self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)\n self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-06)\n self.pwconv1 = nn.Linear(dim, 4 * dim)\n self.act = ACT2FN[config.hidden_act]\n self.grn = ConvNextV2GRN(4 * dim)\n self.pwconv2 = nn.Linear(4 * dim, dim)\n self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n\n def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:\n input = hidden_states\n x = self.dwconv(hidden_states)\n x = x.permute(0, 2, 3, 1)\n x = self.layernorm(x)\n x = self.pwconv1(x)\n x = self.act(x)\n x = self.grn(x)\n x = self.pwconv2(x)\n x = x.permute(0, 3, 1, 2)\n x = input + self.drop_path(x)\n return x", "docstring": "This corresponds to the `Block` class in the original implementation.\n\nThere are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,\nH, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back\n\nThe authors used (2) as they find it slightly faster in PyTorch.\n\nArgs:\nconfig ([`ConvNextV2Config`]): Model configuration class.\ndim (`int`): Number of input channels.\ndrop_path (`float`): Stochastic depth rate. Default: 0.0.", "source": "github-repos"} {"code": "def deploy_template(access_token, subscription_id, resource_group, deployment_name, template, parameters):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.Resources/deployments/', deployment_name, '?api-version=', DEPLOYMENTS_API])\n properties = {'template': template}\n properties['mode'] = 'Incremental'\n properties['parameters'] = parameters\n template_body = {'properties': properties}\n body = json.dumps(template_body)\n return do_put(endpoint, body, access_token)", "docstring": "Deploy a template referenced by a JSON string, with parameters as a JSON string.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\ndeployment_name (str): A name you give to the deployment.\ntemplate (str): String representatipn of a JSON template body.\nparameters (str): String representation of a JSON template parameters body.\n\nReturns:\nHTTP response.", "source": "codesearchnet"} {"code": "def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):\n num_source_devices = len(input_tensors)\n num_gather_devices = len(gather_devices)\n shape = input_tensors[0].shape\n if len(shape) != 1:\n raise ValueError('input_tensors must be 1D')\n shards_by_source = []\n for d in range(0, num_source_devices):\n with ops.colocate_with(input_tensors[d]):\n shards_by_source.append(_ragged_split(input_tensors[d], num_gather_devices))\n reduced_shards = []\n for d in range(0, num_gather_devices):\n with ops.device(gather_devices[d]):\n values = [s[d] for s in shards_by_source]\n red_shard = red_op(values)\n if un_op:\n red_shard = un_op(red_shard)\n reduced_shards.append(red_shard)\n return reduced_shards", "docstring": "Construct the gather (concentrate and reduce) phase of shuffle all-reduce.\n\nArgs:\ninput_tensors: list of `tf.Tensor` values to be reduced.\ngather_devices: list of names of devices on which reduction shards\nshould be placed.\nred_op: the binary reduction Op\nun_op: optional elementwise unary Op to be applied to fully-reduced values.\n\nReturns:\nlist of `tf.Tensor` which are the fully reduced shards.\n\nRaises:\nValueError: inputs not well-formed.", "source": "github-repos"} {"code": "def __init__(self, definitions_registry):\n \n super(DataTypeMapFactory, self).__init__()\n self._definitions_registry = definitions_registry", "docstring": "Initializes a data type maps factory.\n\nArgs:\ndefinitions_registry (DataTypeDefinitionsRegistry): data type definitions\nregistry.", "source": "juraj-google-style"} {"code": "def create_accumulator(self, *args, **kwargs):\n raise NotImplementedError(str(self))", "docstring": "Return a fresh, empty accumulator for the combine operation.\n\nArgs:\n*args: Additional arguments and side inputs.\n**kwargs: Additional arguments and side inputs.", "source": "github-repos"} {"code": "def automatic_parser(result, dtypes={}, converters={}):\n np.seterr(all='raise')\n parsed = {}\n for (filename, contents) in result['output'].items():\n if (dtypes.get(filename) is None):\n dtypes[filename] = None\n if (converters.get(filename) is None):\n converters[filename] = None\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n parsed[filename] = np.genfromtxt(io.StringIO(contents), dtype=dtypes[filename], converters=converters[filename]).tolist()\n return parsed", "docstring": "Try and automatically convert strings formatted as tables into nested\nlist structures.\n\nUnder the hood, this function essentially applies the genfromtxt function\nto all files in the output, and passes it the additional kwargs.\n\nArgs:\nresult (dict): the result to parse.\ndtypes (dict): a dictionary containing the dtype specification to perform\nparsing for each available filename. See the numpy genfromtxt\ndocumentation for more details on how to format these.", "source": "codesearchnet"} {"code": "def set_memory(self, total=None, static=None):\n \n if total:\n self.params[\"rem\"][\"mem_total\"] = total\n if static:\n self.params[\"rem\"][\"mem_static\"] = static", "docstring": "Set the maxium allowed memory.\n\nArgs:\ntotal: The total memory. Integer. Unit: MBytes. If set to None,\nthis parameter will be neglected.\nstatic: The static memory. Integer. Unit MBytes. If set to None,\nthis parameterwill be neglected.", "source": "juraj-google-style"} {"code": "def convert_elementwise_add(params, w_name, scope_name, inputs, layers, weights, names):\n print('Converting elementwise_add ...')\n if ('broadcast' in params):\n model0 = layers[inputs[0]]\n model1 = layers[inputs[1]]\n if (names == 'short'):\n tf_name = ('A' + random_string(7))\n elif (names == 'keep'):\n tf_name = w_name\n else:\n tf_name = (w_name + str(random.random()))\n\n def target_layer(x):\n layer = tf.add(x[0], x[1])\n return layer\n lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])\n else:\n model0 = layers[inputs[0]]\n model1 = layers[inputs[1]]\n if (names == 'short'):\n tf_name = ('A' + random_string(7))\n elif (names == 'keep'):\n tf_name = w_name\n else:\n tf_name = (w_name + str(random.random()))\n add = keras.layers.Add(name=tf_name)\n layers[scope_name] = add([model0, model1])", "docstring": "Convert elementwise addition.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"} {"code": "def tokenize(self, text):\n output_tokens = []\n for token in whitespace_tokenize(text):\n chars = list(token)\n if len(chars) > self.max_input_chars_per_word:\n output_tokens.append(self.unk_token)\n continue\n is_bad = False\n start = 0\n sub_tokens = []\n while start < len(chars):\n end = len(chars)\n cur_substr = None\n while start < end:\n substr = ''.join(chars[start:end])\n if start > 0:\n substr = '\n if substr in self.vocab:\n cur_substr = substr\n break\n end -= 1\n if cur_substr is None:\n is_bad = True\n break\n sub_tokens.append(cur_substr)\n start = end\n if is_bad:\n output_tokens.append(self.unk_token)\n else:\n output_tokens.extend(sub_tokens)\n return output_tokens", "docstring": "Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform\ntokenization using the given vocabulary.\n\nFor example, `input = \"unaffable\"` will return as output `[\"un\", \"##aff\", \"##able\"]`.\n\nArgs:\ntext: A single token or whitespace separated tokens. This should have\nalready been passed through *BasicTokenizer*.\n\nReturns:\nA list of wordpiece tokens.", "source": "github-repos"} {"code": "async def put(self, cid):\n if settings.SIGNATURE_VERIFICATION:\n super().verify()\n try:\n body = json.loads(self.request.body)\n except:\n self.set_status(400)\n self.write({'error': 400, 'reason': 'Unexpected data format. JSON required'})\n raise tornado.web.Finish\n public_key = body.get('public_key', None)\n if isinstance(body['message'], str):\n message = json.loads(body['message'])\n elif isinstance(body['message'], dict):\n message = body['message']\n price = message.get('price')\n access_type = message.get('access_type')\n coinid = message.get('coinid')\n if (not any([price, access_type, coinid])):\n self.set_status(400)\n self.write({'error': 400, 'reason': 'Missed price and access type for content'})\n if (coinid in settings.bridges.keys()):\n self.account.blockchain.setendpoint(settings.bridges[coinid])\n else:\n self.set_status(400)\n self.write({'error': 400, 'reason': 'Invalid coin ID'})\n raise tornado.web.Finish\n check = self.account.validator[coinid](public_key)\n owneraddr = (await self.account.blockchain.ownerbycid(cid=cid))\n if isinstance(owneraddr, dict):\n if ('error' in owneraddr.keys()):\n self.set_status(404)\n self.write({'error': 404, 'reason': 'Owner not found.'})\n raise tornado.web.Finish\n if (owneraddr != check):\n self.set_status(403)\n self.write({'error': 403, 'reason': 'Owner does not match.'})\n raise tornado.web.Finish\n response = {'cid': cid, 'coinid': coinid}\n if (access_type == 'write_price'):\n result = (await self.account.blockchain.setwriteprice(cid=cid, write_price=price))\n response['write_access'] = result['price']\n elif (access_type == 'read_price'):\n result = (await self.account.blockchain.setreadprice(cid=cid, read_price=price))\n response['read_access'] = result['price']\n fee = (await billing.set_price_fee(cid=cid, price=price, owneraddr=owneraddr))\n if ('error' in fee.keys()):\n self.set_status(fee['error'])\n self.write(fee)\n raise tornado.web.Finish\n self.write(response)", "docstring": "Update price of current content\n\nAccepts:\nQuery string args:\n- \"cid\" - int\nRequest body params:\n- \"access_type\" - str\n- \"price\" - int\n- \"coinid\" - str\n\nReturns:\ndict with following fields:\n- \"confirmed\": None\n- \"txid\" - str\n- \"description\" - str\n- \"content\" - str\n- \"read_access\" - int\n- \"write_access\" - int\n- \"cid\" - int\n- \"txid\" - str\n- \"seller_pubkey\" - str\n- \"seller_access_string\": None or str\n\nVerified: True", "source": "codesearchnet"} {"code": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n if token_ids_1 is not None:\n output += token_ids_1 + [self.sep_token_id]\n return output", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A MobileBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs to which the special tokens will be added.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.", "source": "github-repos"} {"code": "def sheets_read(config, auth, sheet_url_or_name, sheet_tab, sheet_range=''):\n if config.verbose:\n print('SHEETS READ', sheet_url_or_name, sheet_tab, sheet_range)\n sheet_id = sheets_id(config, auth, sheet_url_or_name)\n if sheet_id:\n return API_Sheets(config, auth).spreadsheets().values().get(spreadsheetId=sheet_id, range=sheets_tab_range(sheet_tab, sheet_range)).execute().get('values')\n else:\n raise ValueError('Sheet does not exist for %s: %s' % (config, auth, sheet_url_or_name))", "docstring": "Pull sheet id from URL, name, or id itself\n\nArgs:\nconfig - see starthinker/util/configuration.py\nauth - user or service\nsheet_url_or_name - one of: URL, document title, or id\nsheet_tab - name of tab to get id for\nsheet_range - A1 notation or blank if whole sheet\n\nNo Return", "source": "github-repos"} {"code": "def visit_and_get_function_nodes(\n self,\n definition,\n first_node\n ):\n \n len_before_visiting_func = len(self.nodes)\n previous_node = self.nodes[-1]\n entry_node = self.append_node(EntryOrExitNode('Function Entry ' +\n definition.name))\n if not first_node:\n first_node = entry_node\n self.connect_if_allowed(previous_node, entry_node)\n\n function_body_connect_statements = self.stmt_star_handler(definition.node.body)\n entry_node.connect(function_body_connect_statements.first_statement)\n\n exit_node = self.append_node(EntryOrExitNode('Exit ' + definition.name))\n exit_node.connect_predecessors(function_body_connect_statements.last_statements)\n\n the_new_nodes = self.nodes[len_before_visiting_func:]\n return_connection_handler(the_new_nodes, exit_node)\n\n return (the_new_nodes, first_node)", "docstring": "Visits the nodes of a user defined function.\n\nArgs:\ndefinition(LocalModuleDefinition): Definition of the function being added.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.\n\nReturns:\nthe_new_nodes(list[Node]): The nodes added while visiting the function.\nfirst_node(EntryOrExitNode or None or RestoreNode): Used to connect previous statements to this function.", "source": "juraj-google-style"} {"code": "def transform_request(self, orig_request, params, method_config):\n method_params = method_config.get('request', {}).get('parameters', {})\n request = self.transform_rest_request(orig_request, params, method_params)\n request.path = method_config.get('rosyMethod', '')\n return request", "docstring": "Transforms orig_request to apiserving request.\n\nThis method uses orig_request to determine the currently-pending request\nand returns a new transformed request ready to send to the backend. This\nmethod accepts a rest-style or RPC-style request.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nparams: A dictionary containing path parameters for rest requests, or\nNone for an RPC request.\nmethod_config: A dict, the API config of the method to be called.\n\nReturns:\nAn ApiRequest that's a copy of the current request, modified so it can\nbe sent to the backend. The path is updated and parts of the body or\nother properties may also be changed.", "source": "codesearchnet"} {"code": "def __init__(self, intent_name):\n \n self.at_least_one = []\n self.requires = []\n self.optional = []\n self.name = intent_name", "docstring": "Constructor\n\nArgs:\nintent_name(str): the name of the intents that this parser parses/validates", "source": "juraj-google-style"} {"code": "def get_course_enrollment(self, username, course_id):\n endpoint = getattr(self.client.enrollment, '{username},{course_id}'.format(username=username, course_id=course_id))\n try:\n result = endpoint.get()\n except HttpNotFoundError:\n LOGGER.error('Course enrollment details not found for invalid username or course; username=[%s], course=[%s]', username, course_id)\n return None\n if (not result):\n LOGGER.info('Failed to find course enrollment details for user [%s] and course [%s]', username, course_id)\n return None\n return result", "docstring": "Query the enrollment API to get information about a single course enrollment.\n\nArgs:\nusername (str): The username by which the user goes on the OpenEdX platform\ncourse_id (str): The string value of the course's unique identifier\n\nReturns:\ndict: A dictionary containing details of the enrollment, including course details, mode, username, etc.", "source": "codesearchnet"} {"code": "def serialize_private_key_to_pem(private_key, passphrase_bytes=None):\n \n return private_key.private_bytes(\n encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM,\n format=cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=cryptography.hazmat.primitives.serialization.BestAvailableEncryption(\n passphrase_bytes\n )\n if passphrase_bytes is not None\n else cryptography.hazmat.primitives.serialization.NoEncryption(),\n )", "docstring": "Serialize private key to PEM.\n\nArgs:\nprivate_key:\npassphrase_bytes:\n\nReturns:\nbytes: PEM encoded private key", "source": "juraj-google-style"} {"code": "def _write_to_command_buffer(self, to_write):\n \n \n np.copyto(self._command_bool_ptr, True)\n to_write += '0' \n input_bytes = str.encode(to_write)\n for index, val in enumerate(input_bytes):\n self._command_buffer_ptr[index] = val", "docstring": "Write input to the command buffer. Reformat input string to the correct format.\n\nArgs:\nto_write (str): The string to write to the command buffer.", "source": "juraj-google-style"} {"code": "def Convert(self, metadata, stat_entry, token=None):\n if (stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY):\n return []\n result = ExportedRegistryKey(metadata=metadata, urn=stat_entry.AFF4Path(metadata.client_urn), last_modified=stat_entry.st_mtime)\n if (stat_entry.HasField('registry_type') and stat_entry.HasField('registry_data')):\n result.type = stat_entry.registry_type\n data = stat_entry.registry_data.GetValue()\n if isinstance(data, bytes):\n result.data = data\n else:\n result.data = str(data).encode('utf-8')\n return [result]", "docstring": "Converts StatEntry to ExportedRegistryKey.\n\nDoes nothing if StatEntry corresponds to a file and not a registry entry.\n\nArgs:\nmetadata: ExportedMetadata to be used for conversion.\nstat_entry: StatEntry to be converted.\ntoken: Security token.\n\nReturns:\nList or generator with resulting RDFValues. Empty list if StatEntry\ncorresponds to a file and not to a registry entry.", "source": "codesearchnet"} {"code": "def dockprep(self, force_rerun=False):\n \n log.debug('{}: running dock preparation...'.format(self.id))\n\n prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id))\n prep_py = op.join(self.dock_dir, \"prep.py\")\n\n if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2):\n with open(prep_py, \"w\") as f:\n f.write('import chimera\\n')\n f.write('from DockPrep import prep\\n')\n f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\\n')\n f.write('prep(models)\\n')\n f.write('from WriteMol2 import writeMol2\\n')\n f.write('writeMol2(models, \"{}\")\\n'.format(prep_mol2))\n\n cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py)\n os.system(cmd)\n os.remove(prep_py)\n os.remove('{}c'.format(prep_py))\n\n if ssbio.utils.is_non_zero_file(prep_mol2):\n self.dockprep_path = prep_mol2\n log.debug('{}: successful dockprep execution'.format(self.dockprep_path))\n else:\n log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path))", "docstring": "Prepare a PDB file for docking by first converting it to mol2 format.\n\nArgs:\nforce_rerun (bool): If method should be rerun even if output file exists", "source": "juraj-google-style"} {"code": "def cpfs(self,\n state: Sequence[tf.Tensor],\n action: Sequence[tf.Tensor],\n noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]:\n \n scope = self.transition_scope(state, action)\n batch_size = int(state[0].shape[0])\n interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise)\n interms = [fluent for _, fluent in interm_fluents]\n next_state = [fluent for _, fluent in next_state_fluents]\n return interms, next_state", "docstring": "Compiles the intermediate and next state fluent CPFs given\nthe current `state` and `action`.\n\nArgs:\nstate (Sequence[tf.Tensor]): A tuple of state tensors.\naction (Sequence[tf.Tensor]): A tuple of action tensors.\n\nReturns:\nTuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent\nrepresenting the intermediate and state CPFs.", "source": "juraj-google-style"} {"code": "def truncate(n):\n \n count = 0\n for digit in n[-1::-1]:\n if digit != 0:\n break\n count += 1\n return n[:-count] if count > 0 else n", "docstring": "Removes trailing zeros.\n\nArgs:\nn: The number to truncate.\nThis number should be in the following form:\n(..., '.', int, int, int, ..., 0)\nReturns:\nn with all trailing zeros removed\n\n>>> truncate((9, 9, 9, '.', 9, 9, 9, 9, 0, 0, 0, 0))\n(9, 9, 9, '.', 9, 9, 9, 9)\n>>> truncate(('.',))\n('.',)", "source": "juraj-google-style"} {"code": "def reconstruct_headers(self, response):\n header_dict = {}\n for key in list(response.headers.keys()):\n key_item_list = []\n key_list = response.headers.getlist(key)\n for item in key_list:\n key_item_list.append(item)\n header_dict[key] = key_item_list\n return header_dict", "docstring": "Purpose of this method is to reconstruct the headers dictionary that\nis normally passed in with a \"response\" object from scrapy.\n\nArgs:\nresponse: A scrapy response object\n\nReturns: A dictionary that mirrors the \"response.headers\" dictionary\nthat is normally within a response object\n\nRaises: None\nReason: Originally, there was a bug where the json.dumps() did not\nproperly serialize the headers. This method is a way to circumvent\nthe known issue", "source": "codesearchnet"} {"code": "def create(configs):\n if (not configs):\n raise Error(ANDROID_DEVICE_EMPTY_CONFIG_MSG)\n elif (configs == ANDROID_DEVICE_PICK_ALL_TOKEN):\n ads = get_all_instances()\n elif (not isinstance(configs, list)):\n raise Error(ANDROID_DEVICE_NOT_LIST_CONFIG_MSG)\n elif isinstance(configs[0], dict):\n ads = get_instances_with_configs(configs)\n elif isinstance(configs[0], basestring):\n ads = get_instances(configs)\n else:\n raise Error(('No valid config found in: %s' % configs))\n valid_ad_identifiers = (list_adb_devices() + list_adb_devices_by_usb_id())\n for ad in ads:\n if (ad.serial not in valid_ad_identifiers):\n raise DeviceError(ad, 'Android device is specified in config but is not attached.')\n _start_services_on_ads(ads)\n return ads", "docstring": "Creates AndroidDevice controller objects.\n\nArgs:\nconfigs: A list of dicts, each representing a configuration for an\nAndroid device.\n\nReturns:\nA list of AndroidDevice objects.", "source": "codesearchnet"} {"code": "def mirror_pull(self, **kwargs):\n \n path = '/projects/%s/mirror/pull' % self.get_id()\n self.manager.gitlab.http_post(path, **kwargs)", "docstring": "Start the pull mirroring process for the project.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabCreateError: If the server failed to perform the request", "source": "juraj-google-style"} {"code": "def write_command(self, command: Command):\n \n _logger.debug('Write command.')\n data = command.to_bytes()\n yield from self._connection.write(data)\n self._data_event_dispatcher.notify_write(data)", "docstring": "Write a command to the stream.\n\nArgs:\ncommand: The command.\n\nCoroutine.", "source": "juraj-google-style"} {"code": "def __init__(self, resolver_context):\n \n super(TARFile, self).__init__(resolver_context)\n self._current_offset = 0\n self._file_system = None\n self._size = 0\n self._tar_ext_file = None", "docstring": "Initializes a file-like object.\n\nArgs:\nresolver_context (Context): resolver context.", "source": "juraj-google-style"} {"code": "def compute_path(self, start_x, start_y, dest_x, dest_y, diagonal_cost=_math.sqrt(2)):\n return tcod.path.AStar(self, diagonal_cost).get_path(start_x, start_y, dest_x, dest_y)", "docstring": "Get the shortest path between two points.\n\nArgs:\nstart_x (int): Starting x-position.\nstart_y (int): Starting y-position.\ndest_x (int): Destination x-position.\ndest_y (int): Destination y-position.\ndiagonal_cost (float): Multiplier for diagonal movement.\n\nCan be set to zero to disable diagonal movement entirely.\n\nReturns:\nList[Tuple[int, int]]: The shortest list of points to the\ndestination position from the starting position.\n\nThe start point is not included in this list.", "source": "codesearchnet"} {"code": "def Match(self, registry_key):\n \n key_path = registry_key.path.upper()\n \n for ignore_key_path_suffix in self._IGNORE_KEY_PATH_SUFFIXES:\n if key_path.endswith(ignore_key_path_suffix):\n return False\n\n return super(MRUListStringRegistryKeyFilter, self).Match(registry_key)", "docstring": "Determines if a Windows Registry key matches the filter.\n\nArgs:\nregistry_key (dfwinreg.WinRegistryKey): Windows Registry key.\n\nReturns:\nbool: True if the Windows Registry key matches the filter.", "source": "juraj-google-style"} {"code": "def write_float(self, value, little_endian=True):\n if little_endian:\n endian = '<'\n else:\n endian = '>'\n return self.pack(('%sf' % endian), value)", "docstring": "Pack the value as a float and write 4 bytes to the stream.\n\nArgs:\nvalue (number): the value to write to the stream.\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "codesearchnet"} {"code": "def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):\n super(DeriveKeyRequestPayload, self).read(input_buffer, kmip_version=kmip_version)\n local_buffer = utils.BytearrayStream(input_buffer.read(self.length))\n if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer):\n self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE)\n self._object_type.read(local_buffer, kmip_version=kmip_version)\n else:\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the object type.')\n unique_identifiers = []\n while self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_buffer):\n unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)\n unique_identifier.read(local_buffer, kmip_version=kmip_version)\n unique_identifiers.append(unique_identifier)\n if (not unique_identifiers):\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the unique identifiers.')\n else:\n self._unique_identifiers = unique_identifiers\n if self.is_tag_next(enums.Tags.DERIVATION_METHOD, local_buffer):\n self._derivation_method = primitives.Enumeration(enums.DerivationMethod, tag=enums.Tags.DERIVATION_METHOD)\n self._derivation_method.read(local_buffer, kmip_version=kmip_version)\n else:\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the derivation method.')\n if self.is_tag_next(enums.Tags.DERIVATION_PARAMETERS, local_buffer):\n self._derivation_parameters = attributes.DerivationParameters()\n self._derivation_parameters.read(local_buffer, kmip_version=kmip_version)\n else:\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the derivation parameters.')\n if (kmip_version < enums.KMIPVersion.KMIP_2_0):\n if self.is_tag_next(enums.Tags.TEMPLATE_ATTRIBUTE, local_buffer):\n self._template_attribute = objects.TemplateAttribute()\n self._template_attribute.read(local_buffer, kmip_version=kmip_version)\n else:\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the template attribute.')\n elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):\n attrs = objects.Attributes()\n attrs.read(local_buffer, kmip_version=kmip_version)\n value = objects.convert_attributes_to_template_attribute(attrs)\n self._template_attribute = value\n else:\n raise exceptions.InvalidKmipEncoding('The DeriveKey request payload encoding is missing the attributes structure.')\n self.is_oversized(local_buffer)", "docstring": "Read the data encoding the DeriveKey request payload and decode it\ninto its constituent parts.\n\nArgs:\ninput_buffer (stream): A data stream containing encoded object\ndata, supporting a read method; usually a BytearrayStream\nobject.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be decoded. Optional,\ndefaults to KMIP 1.0.\n\nRaises:\nValueError: Raised if the data attribute is missing from the\nencoded payload.", "source": "codesearchnet"} {"code": "def gnuplot_3d_matrix(z_matrix, filename, title='', x_label='', y_label=''):\n (_, ext) = os.path.splitext(filename)\n if (ext != '.png'):\n filename += '.png'\n gnuplot_cmds = '\\n set datafile separator \",\"\\n set term pngcairo size 30cm,25cm\\n set out filename\\n\\n unset key\\n set border lw 1.5\\n set view map\\n\\n set title title\\n set xlabel x_label\\n set ylabel y_label\\n\\n splot filename_data matrix w pm3d\\n '\n scr = _GnuplotScriptTemp(gnuplot_cmds)\n data = _GnuplotDataZMatrixTemp(z_matrix)\n args_dict = {'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label}\n gnuplot(scr.name, args_dict)", "docstring": "Function to produce a general 3D plot from a 2D matrix.\n\nArgs:\nz_matrix (list): 2D matrix.\nfilename (str): Filename of the output image.\ntitle (str): Title of the plot. Default is '' (no title).\nx_label (str): x-axis label.\ny_label (str): y-axis label.", "source": "codesearchnet"} {"code": "def remove_symbol_add_symbol(string_item, remove_symbol, add_symbol):\n \n string_item = add_symbol.join(string_item.split(remove_symbol))\n return string_item", "docstring": "Remove a symbol from a string, and replace it with a different one\nArgs:\nstring_item: String that you want to replace symbols in\nremove_symbol: Symbol to remove\nadd_symbol: Symbol to add\n\nReturns: returns a string with symbols swapped", "source": "juraj-google-style"} {"code": "def create_sprites_dataset(characters, actions, directions, channels=3, length=8, shuffle=False, fake_data=False):\n if fake_data:\n dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])\n else:\n basedir = download_sprites()\n action_names = [action.name for action in actions]\n action_metadata = [(action.start_row, action.frames) for action in actions]\n direction_rows = [direction.row_offset for direction in directions]\n chars = tf.data.Dataset.from_tensor_slices(characters)\n act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()\n acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()\n dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()\n if shuffle:\n chars = chars.shuffle(len(characters))\n dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))\n skin_table = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))\n hair_table = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))\n top_table = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))\n pants_table = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))\n action_table = tf.contrib.lookup.index_table_from_tensor(sorted(action_names))\n\n def process_example(attrs, act_name, act_metadata, dir_row_offset):\n 'Processes a dataset row.'\n skin_name = attrs[0]\n hair_name = attrs[1]\n top_name = attrs[2]\n pants_name = attrs[3]\n if fake_data:\n char = dummy_image\n else:\n skin = read_image(((basedir + os.sep) + skin_name))\n hair = read_image(((basedir + os.sep) + hair_name))\n top = read_image(((basedir + os.sep) + top_name))\n pants = read_image(((basedir + os.sep) + pants_name))\n char = create_character(skin, hair, top, pants)\n if shuffle:\n seq = create_random_seq(char, act_metadata, dir_row_offset, length)\n else:\n seq = create_seq(char, act_metadata, dir_row_offset, length)\n seq = seq[(..., :channels)]\n skin_idx = skin_table.lookup(skin_name)\n hair_idx = hair_table.lookup(hair_name)\n top_idx = top_table.lookup(top_name)\n pants_idx = pants_table.lookup(pants_name)\n act_idx = action_table.lookup(act_name)\n return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx, skin_name, hair_name, top_name, pants_name, act_name)\n dataset = dataset.map(process_example)\n return dataset", "docstring": "Creates a tf.data pipeline for the sprites dataset.\n\nArgs:\ncharacters: A list of (skin, hair, top, pants) tuples containing\nrelative paths to the sprite png image for each attribute.\nactions: A list of Actions.\ndirections: A list of Directions.\nchannels: Number of image channels to yield.\nlength: Desired length of the sequences.\nshuffle: Whether or not to shuffle the characters and sequences\nstart frame.\nfake_data: Boolean for whether or not to yield synthetic data.\n\nReturns:\nA tf.data.Dataset yielding (seq, skin label index, hair label index,\ntop label index, pants label index, action label index, skin label\nname, hair label_name, top label name, pants label name, action\nlabel name) tuples.", "source": "codesearchnet"} {"code": "def ValidateKey(cls, key_path):\n for prefix in cls.VALID_PREFIXES:\n if key_path.startswith(prefix):\n return\n if key_path.startswith('HKEY_CURRENT_USER\\\\'):\n raise errors.FormatError('HKEY_CURRENT_USER\\\\ is not supported instead use: HKEY_USERS\\\\%%users.sid%%\\\\')\n raise errors.FormatError('Unupported Registry key path: {0:s}'.format(key_path))", "docstring": "Validates this key against supported key names.\n\nArgs:\nkey_path (str): path of a Windows Registry key.\n\nRaises:\nFormatError: when key is not supported.", "source": "codesearchnet"} {"code": "def upload(target):\n \n \n log.info(\"Uploading to pypi server <33>{}\".format(target))\n with conf.within_proj_dir():\n shell.run('python setup.py sdist register -r \"{}\"'.format(target))\n shell.run('python setup.py sdist upload -r \"{}\"'.format(target))", "docstring": "Upload the release to a pypi server.\n\nTODO: Make sure the git directory is clean before allowing a release.\n\nArgs:\ntarget (str):\npypi target as defined in ~/.pypirc", "source": "juraj-google-style"} {"code": "def __init__(self, source: SourceBase) -> None:\n super().__init__()\n self.source = source", "docstring": "Initializes a Read transform.\n\nArgs:\nsource: Data source to read from.", "source": "github-repos"} {"code": "def _ParseEntryArrayObject(self, file_object, file_offset):\n \n entry_array_object_map = self._GetDataTypeMap(\n 'systemd_journal_entry_array_object')\n\n try:\n entry_array_object, _ = self._ReadStructureFromFileObject(\n file_object, file_offset, entry_array_object_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError((\n 'Unable to parse entry array object at offset: 0x{0:08x} with error: '\n '{1!s}').format(file_offset, exception))\n\n if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY:\n raise errors.ParseError('Unsupported object type: {0:d}.'.format(\n entry_array_object.object_type))\n\n if entry_array_object.object_flags != 0:\n raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(\n entry_array_object.object_flags))\n\n return entry_array_object", "docstring": "Parses an entry array object.\n\nArgs:\nfile_object (dfvfs.FileIO): a file-like object.\nfile_offset (int): offset of the entry array object relative to the start\nof the file-like object.\n\nReturns:\nsystemd_journal_entry_array_object: entry array object.\n\nRaises:\nParseError: if the entry array object cannot be parsed.", "source": "juraj-google-style"} {"code": "def get_whois_tags(ip_address):\n \n whois = IPWhois(ip_address).lookup_whois()\n nets = whois.get(\"nets\", None)\n\n if not nets:\n return []\n\n \n cities = [\n net[\"city\"]\n for net in nets\n if net.get(\"city\", None)\n ]\n\n \n address_list = []\n for net in nets:\n address = net.get(\"address\", None)\n if not address:\n continue\n\n \n if \"description\" in net and net[\"description\"]:\n address = address.replace(net[\"description\"], \"\").strip()\n\n if \"\\n\" in address:\n address = \", \".join(address.splitlines())\n\n address_list.append(address)\n\n return [\n SourceString(val, source=\"Whois\")\n for val in set(cities + address_list)\n ]", "docstring": "Get list of tags with `address` for given `ip_address`.\n\nArgs:\nindex_page (str): HTML content of the page you wisht to analyze.\n\nReturns:\nlist: List of :class:`.SourceString` objects.", "source": "juraj-google-style"} {"code": "def fetch(self, transfer_id, data={}, **kwargs):\n return super(Transfer, self).fetch(transfer_id, data, **kwargs)", "docstring": "Fetch Transfer for given Id\n\nArgs:\ntransfer_id : Id for which transfer object has to be retrieved\n\nReturns:\nTransfer dict for given transfer Id", "source": "codesearchnet"} {"code": "def save(thing, url_or_handle, **kwargs):\n is_handle = (hasattr(url_or_handle, 'write') and hasattr(url_or_handle, 'name'))\n if is_handle:\n (_, ext) = os.path.splitext(url_or_handle.name)\n else:\n (_, ext) = os.path.splitext(url_or_handle)\n if (not ext):\n raise RuntimeError(('No extension in URL: ' + url_or_handle))\n if (ext in savers):\n saver = savers[ext]\n if is_handle:\n saver(thing, url_or_handle, **kwargs)\n else:\n with write_handle(url_or_handle) as handle:\n saver(thing, handle, **kwargs)\n else:\n saver_names = [(key, fn.__name__) for (key, fn) in savers.items()]\n message = \"Unknown extension '{}', supports {}.\"\n raise ValueError(message.format(ext, saver_names))", "docstring": "Save object to file on CNS.\n\nFile format is inferred from path. Use save_img(), save_npy(), or save_json()\nif you need to force a particular format.\n\nArgs:\nobj: object to save.\npath: CNS path.\n\nRaises:\nRuntimeError: If file extension not supported.", "source": "codesearchnet"} {"code": "def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs):\n preprocessed_func = self.preprocess_func(func)\n return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]", "docstring": "Applies a function to a list of remote partitions.\n\nNote: The main use for this is to preprocess the func.\n\nArgs:\nfunc: The func to apply\npartitions: The list of partitions\n\nReturns:\nA list of BaseFramePartition objects.", "source": "codesearchnet"} {"code": "def disambiguate_query(self, query, language=None, entities=None):\n \n\n body = {\n \"shortText\": query,\n \"entities\": [],\n \"onlyNER\": \"false\",\n \"customisation\": \"generic\"\n }\n\n if language:\n body['language'] = {\"lang\": language}\n\n if entities:\n body['entities'] = entities\n\n files = {'query': str(body)}\n\n logger.debug('About to submit the following query {}'.format(body))\n\n res, status = self.post(\n self.disambiguate_service,\n files=files,\n headers={'Accept': 'application/json'},\n )\n\n if status == 200:\n return self.decode(res), status\n else:\n logger.debug('Disambiguation failed.')\n return None, status", "docstring": "Call the disambiguation service in order to disambiguate a search query.\n\nArgs:\ntext (str): Query to be disambiguated.\nlanguage (str): language of text (if known)\nentities (list): list of entities or mentions to be supplied by\nthe user.\n\nReturns:\ndict, int: API response and API status.", "source": "juraj-google-style"} {"code": "def get_edge_by_name(self, source_name: str, target_name: str) -> Optional[Edge]:\n \n nodes: NodeList = self._graph.nodes\n source: Optional[Node] = nodes.get_node_by_name(source_name)\n if source is None:\n return None\n target: Optional[Node] = nodes.get_node_by_name(target_name)\n if target is None:\n return None\n return self.get_edge_by_index(source.index, target.index)", "docstring": "Returns the edge connecting the nodes with the specified names if such an edge exists.\n\nArguments:\nsource_name (str): The name of one of the endpoints of queried edge.\ntarget_name (str): The name of the other endpoint of the queried edge.\n\nReturns:\nThe edge connecting the nodes with the specified names\nor `None` if no such node exists.", "source": "juraj-google-style"} {"code": "async def _on_progress_notification(self, progress):\n \n\n conn_string = progress.get('connection_string')\n done = progress.get('done_count')\n total = progress.get('total_count')\n operation = progress.get('operation')\n\n await self.notify_progress(conn_string, operation, done, total, wait=True)", "docstring": "Callback function called when a progress notification is received.\n\nArgs:\nprogress (dict): The received notification containing the progress information", "source": "juraj-google-style"} {"code": "def _FormatSource(self, event):\n \n source_short, _ = self._output_mediator.GetFormattedSources(event)\n if source_short is None:\n data_type = getattr(event, 'data_type', 'UNKNOWN')\n raise errors.NoFormatterFound(\n 'Unable to find event formatter for: {0:s}.'.format(data_type))\n\n return self._SanitizeField(source_short)", "docstring": "Formats the source.\n\nArgs:\nevent (EventObject): event.\n\nReturns:\nstr: formatted source field.", "source": "juraj-google-style"} {"code": "def reciprocal_rank(truth, recommend):\n \n for n in range(recommend.size):\n if recommend[n] in truth:\n return 1. / (n + 1)\n return 0.", "docstring": "Reciprocal Rank (RR).\n\nArgs:\ntruth (numpy 1d array): Set of truth samples.\nrecommend (numpy 1d array): Ordered set of recommended samples.\n\nReturns:\nfloat: RR.", "source": "juraj-google-style"} {"code": "def average_data(counts, observable):\n if (not isinstance(observable, dict)):\n observable = make_dict_observable(observable)\n temp = 0\n tot = sum(counts.values())\n for key in counts:\n if (key in observable):\n temp += ((counts[key] * observable[key]) / tot)\n return temp", "docstring": "Compute the mean value of an diagonal observable.\n\nTakes in a diagonal observable in dictionary, list or matrix format and then\ncalculates the sum_i value(i) P(i) where value(i) is the value of the\nobservable for state i.\n\nArgs:\ncounts (dict): a dict of outcomes from an experiment\nobservable (dict or matrix or list): The observable to be averaged over.\nAs an example, ZZ on qubits can be given as:\n* dict: {\"00\": 1, \"11\": 1, \"01\": -1, \"10\": -1}\n* matrix: [[1, 0, 0, 0], [0, -1, 0, 0, ], [0, 0, -1, 0], [0, 0, 0, 1]]\n* matrix diagonal (list): [1, -1, -1, 1]\n\nReturns:\nDouble: Average of the observable", "source": "codesearchnet"} {"code": "def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util, output_file_param_util):\n task_descriptors = []\n path = tasks['path']\n task_min = tasks.get('min')\n task_max = tasks.get('max')\n param_file = dsub_util.load_file(path)\n reader = csv.reader(param_file, delimiter='\\t')\n header = six.advance_iterator(reader)\n job_params = parse_tasks_file_header(header, input_file_param_util, output_file_param_util)\n for row in reader:\n task_id = (reader.line_num - 1)\n if (task_min and (task_id < task_min)):\n continue\n if (task_max and (task_id > task_max)):\n continue\n if (len(row) != len(job_params)):\n dsub_util.print_error(('Unexpected number of fields %s vs %s: line %s' % (len(row), len(job_params), reader.line_num)))\n envs = set()\n inputs = set()\n outputs = set()\n labels = set()\n for i in range(0, len(job_params)):\n param = job_params[i]\n name = param.name\n if isinstance(param, job_model.EnvParam):\n envs.add(job_model.EnvParam(name, row[i]))\n elif isinstance(param, job_model.LabelParam):\n labels.add(job_model.LabelParam(name, row[i]))\n elif isinstance(param, job_model.InputFileParam):\n inputs.add(input_file_param_util.make_param(name, row[i], param.recursive))\n elif isinstance(param, job_model.OutputFileParam):\n outputs.add(output_file_param_util.make_param(name, row[i], param.recursive))\n task_descriptors.append(job_model.TaskDescriptor({'task-id': task_id, 'task-attempt': (1 if retries else None)}, {'labels': labels, 'envs': envs, 'inputs': inputs, 'outputs': outputs}, job_model.Resources()))\n if (not task_descriptors):\n raise ValueError(('No tasks added from %s' % path))\n return task_descriptors", "docstring": "Parses task parameters from a TSV.\n\nArgs:\ntasks: Dict containing the path to a TSV file and task numbers to run\nvariables, input, and output parameters as column headings. Subsequent\nlines specify parameter values, one row per job.\nretries: Number of retries allowed.\ninput_file_param_util: Utility for producing InputFileParam objects.\noutput_file_param_util: Utility for producing OutputFileParam objects.\n\nReturns:\ntask_descriptors: an array of records, each containing the task-id,\ntask-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of\nparameters for each task of the job.\n\nRaises:\nValueError: If no job records were provided", "source": "codesearchnet"} {"code": "def delete_merged_branches(self, **kwargs):\n \n path = '/projects/%s/repository/merged_branches' % self.get_id()\n self.manager.gitlab.http_delete(path, **kwargs)", "docstring": "Delete merged branches.\n\nArgs:\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabDeleteError: If the server failed to perform the request", "source": "juraj-google-style"} {"code": "def _ParseHeader(self, format_type, value_data):\n \n data_type_map_name = self._HEADER_DATA_TYPE_MAP_NAMES.get(format_type, None)\n if not data_type_map_name:\n raise errors.ParseError(\n 'Unsupported format type: {0:d}'.format(format_type))\n\n data_type_map = self._GetDataTypeMap(data_type_map_name)\n\n try:\n header = self._ReadStructureFromByteStream(\n value_data, 0, data_type_map)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n 'Unable to parse header value with error: {0!s}'.format(\n exception))\n\n header_data_size = data_type_map.GetByteSize()\n if format_type == self._FORMAT_TYPE_10:\n header_data_size = header.signature\n\n cache_header = AppCompatCacheHeader()\n cache_header.header_size = header_data_size\n cache_header.number_of_cached_entries = getattr(\n header, 'number_of_cached_entries', 0)\n\n return cache_header", "docstring": "Parses the header.\n\nArgs:\nformat_type (int): format type.\nvalue_data (bytes): value data.\n\nReturns:\nAppCompatCacheHeader: header.\n\nRaises:\nParseError: if the value data could not be parsed.", "source": "juraj-google-style"} {"code": "def get(self, volume_id):\n \n return self.prepare_model(self.client.api.inspect_volume(volume_id))", "docstring": "Get a volume.\n\nArgs:\nvolume_id (str): Volume name.\n\nReturns:\n(:py:class:`Volume`): The volume.\n\nRaises:\n:py:class:`docker.errors.NotFound`\nIf the volume does not exist.\n:py:class:`docker.errors.APIError`\nIf the server returns an error.", "source": "juraj-google-style"} {"code": "def _cosine_function(x, a, b, t_shift):\n \n\n mean_wind, t = x\n return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind", "docstring": "genrates a diurnal course of windspeed accroding to the cosine function\n\nArgs:\nx: series of euqally distributed windspeed values\na: parameter a for the cosine function\nb: parameter b for the cosine function\nt_shift: parameter t_shift for the cosine function\n\nReturns:\nseries including diurnal course of windspeed.", "source": "juraj-google-style"} {"code": "def next_location(self, raw=False):\n \n if self._response:\n location = self._response.fields.get('location')\n\n if not location or raw:\n return location\n\n return wpull.url.urljoin(self._response.request.url_info.url,\n location)", "docstring": "Returns the next location.\n\nArgs:\nraw (bool): If True, the original string contained in the Location\nfield will be returned. Otherwise, the URL will be\nnormalized to a complete URL.\n\nReturns:\nstr, None: If str, the location. Otherwise, no next location.", "source": "juraj-google-style"} {"code": "def _GetInode(self, inode_value):\n \n if isinstance(inode_value, py2to3.INTEGER_TYPES):\n return inode_value\n\n if isinstance(inode_value, float):\n return int(inode_value)\n\n if not isinstance(inode_value, py2to3.STRING_TYPES):\n return -1\n\n if b'-' in inode_value:\n inode_value, _, _ = inode_value.partition(b'-')\n\n try:\n return int(inode_value, 10)\n except ValueError:\n return -1", "docstring": "Retrieves the inode from the inode value.\n\nArgs:\ninode_value (int|str): inode, such as 1 or '27-128-1'.\n\nReturns:\nint: inode or -1 if the inode value cannot be converted to an integer.", "source": "juraj-google-style"} {"code": "def set_brightness(self, brightness):\n if (not (25 <= brightness <= 255)):\n raise ValueError('The brightness needs to be between 25 and 255.')\n payload = self.generate_payload(SET, {self.DPS_INDEX_BRIGHTNESS: brightness})\n data = self._send_receive(payload)\n return data", "docstring": "Set the brightness value of an rgb bulb.\n\nArgs:\nbrightness(int): Value for the brightness (25-255).", "source": "codesearchnet"} {"code": "def rpc_connect(self):\n if (self.coin in COINS):\n rpc_url = (COINS[self.coin]['rpc-url'] + ':')\n if self.testnet:\n rpc_url += COINS[self.coin]['rpc-port-testnet']\n else:\n rpc_url += COINS[self.coin]['rpc-port']\n self.rpc = pyjsonrpc.HttpClient(url=rpc_url, username=COINS[self.coin]['rpc-user'], password=COINS[self.coin]['rpc-password'])\n self.logger.debug(self.coin, 'RPC connection ok')\n self.connected = True\n else:\n self.logger.debug(self.coin, 'bridge not found')\n return self.connected", "docstring": "Connect to a coin daemon's JSON RPC interface.\n\nReturns:\nbool: True if successfully connected, False otherwise.", "source": "codesearchnet"} {"code": "def create(self, query):\n \n if isinstance(query, _query.Query):\n query = query.sql\n try:\n response = self._table._api.tables_insert(self._table.name, query=query)\n except Exception as e:\n raise e\n if 'selfLink' in response:\n return self\n raise Exception(\"View %s could not be created as it already exists\" % str(self))", "docstring": "Creates the view with the specified query.\n\nArgs:\nquery: the query to use to for the View; either a string containing a SQL query or\na Query object.\nReturns:\nThe View instance.\nRaises:\nException if the view couldn't be created or already exists and overwrite was False.", "source": "juraj-google-style"} {"code": "def _AddArtifactNodesAndEdges(self, artifact_names):\n for artifact_name in artifact_names:\n self.graph[artifact_name] = self.Node(is_artifact=True)\n rdf_artifact = artifact_registry.REGISTRY.GetArtifact(artifact_name)\n self._AddDependencyEdges(rdf_artifact)\n self._AddProvidesEdges(rdf_artifact)", "docstring": "Add the artifact nodes to the graph.\n\nFor every artifact that has to be collected, add a node to the dependency\ngraph.\n\nThe edges represent the dependencies. An artifact has outgoing edges to the\nattributes it provides and incoming edges from attributes it depends on.\nInitially, only artifacts without incoming edges are reachable. An artifact\nbecomes reachable if all of its dependencies are reachable.\n\nArgs:\nartifact_names: List of names of the artifacts to collect.", "source": "codesearchnet"} {"code": "def html_for_modules_method(method_name, *args, **kwargs):\n method = getattr(modules, method_name)\n value = method(*args, **kwargs)\n return KEY_VALUE_TEMPLATE.format(method_name, value)", "docstring": "Returns an HTML snippet for a Modules API method.\n\nArgs:\nmethod_name: A string containing a Modules API method.\nargs: Positional arguments to be passed to the method.\nkwargs: Keyword arguments to be passed to the method.\n\nReturns:\nString HTML representing the Modules API method and value.", "source": "codesearchnet"} {"code": "def add_config_paths(**kwargs):\n for (k, path) in kwargs.items():\n if (not os.path.exists(path)):\n raise ValueError('Configuration file \"{}\" does not exist'.format(k))\n if (k in cf.get_option('config_paths')):\n raise ValueError('Configuration {!r} already exists'.format(k))\n kwargs.update(**cf.get_option('config_paths'))\n cf.set_option('config_paths', kwargs)", "docstring": "Add to the pool of available configuration files for BIDSLayout.\n\nArgs:\nkwargs: dictionary specifying where to find additional config files.\nKeys are names, values are paths to the corresponding .json file.\n\nExample:\n> add_config_paths(my_config='/path/to/config')\n> layout = BIDSLayout('/path/to/bids', config=['bids', 'my_config'])", "source": "codesearchnet"} {"code": "def compile_src_string_to_pyc_string(src, filename, python_version, python_exe: list[str], mode='exec'):\n if can_compile_bytecode_natively(python_version):\n output = io.BytesIO()\n compile_bytecode.compile_src_to_pyc(src, filename or '<>', output, mode)\n bytecode = output.getvalue()\n else:\n tempfile_options = {'mode': 'w', 'suffix': '.py', 'delete': False}\n tempfile_options.update({'encoding': 'utf-8'})\n fi = compatible_tempfile.NamedTemporaryFile(**tempfile_options)\n try:\n fi.write(src)\n fi.close()\n cmd = python_exe + ['-E', '-', fi.name, filename or fi.name, mode]\n compile_script_src = pytype_source_utils.load_binary_file(_COMPILE_SCRIPT)\n with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) as p:\n bytecode, _ = p.communicate(compile_script_src)\n assert p.poll() == 0, 'Child process failed'\n finally:\n os.unlink(fi.name)\n first_byte = bytecode[0]\n if first_byte == 0:\n return bytecode[1:]\n elif first_byte == 1:\n code = bytecode[1:]\n raise CompileError(utils.native_str(code))\n else:\n raise OSError('_compile.py produced invalid result')", "docstring": "Compile Python source code to pyc data.\n\nThis may use py_compile if the src is for the same version as we're running,\nor else it spawns an external process to produce a .pyc file. The generated\nbytecode (.pyc file) is read and both it and any temporary files are deleted.\n\nArgs:\nsrc: Python sourcecode\nfilename: Name of the source file. For error messages.\npython_version: Python version, (major, minor).\npython_exe: A path to a Python interpreter.\nmode: Same as builtins.compile: \"exec\" if source consists of a sequence of\nstatements, \"eval\" if it consists of a single expression, or \"single\" if\nit consists of a single interactive statement.\n\nReturns:\nThe compiled pyc file as a binary string.\nRaises:\nCompileError: If we find a syntax error in the file.\nIOError: If our compile script failed.", "source": "github-repos"} {"code": "def arango_id_to_key(_id):\n \n\n key = re.sub(r\"[^a-zA-Z0-9\\_\\-\\:\\.\\@\\(\\)\\+\\,\\=\\;\\$\\!\\*\\%]+\", r\"_\", _id)\n if len(key) > 254:\n log.error(\n f\"Arango _key cannot be longer than 254 chars: Len={len(key)} Key: {key}\"\n )\n elif len(key) < 1:\n log.error(f\"Arango _key cannot be an empty string: Len={len(key)} Key: {key}\")\n\n return key", "docstring": "Remove illegal chars from potential arangodb _key (id)\n\nArgs:\n_id (str): id to be used as arangodb _key\n\nReturns:\n(str): _key value with illegal chars removed", "source": "juraj-google-style"} {"code": "def set_router_id(self, value=None, default=False, disable=False):\n \n cmd = self.command_builder('router-id', value=value,\n default=default, disable=disable)\n return self.configure_ospf(cmd)", "docstring": "Controls the router id property for the OSPF Proccess\n\nArgs:\nvalue (str): The router-id value\ndefault (bool): Controls the use of the default keyword\ndisable (bool): Controls the use of the no keyword\nReturns:\nbool: True if the commands are completed successfully", "source": "juraj-google-style"} {"code": "def doECDHE(statprv_u, statpub_v, ephmprv_u, ephmpub_v, length=64, salt=None, info=None):\n zs = statprv_u.exchange(statpub_v)\n ze = ephmprv_u.exchange(ephmpub_v)\n z = (ze + zs)\n kdf = c_hkdf.HKDF(c_hashes.SHA256(), length=length, salt=salt, info=info, backend=default_backend())\n k = kdf.derive(z)\n return k", "docstring": "Perform one side of an Ecliptic Curve Diffie Hellman Ephemeral key exchange.\n\nArgs:\nstatprv_u (PriKey): Static Private Key for U\nstatpub_v (PubKey: Static Public Key for V\nephmprv_u (PriKey): Ephemeral Private Key for U\nephmpub_v (PubKey): Ephemeral Public Key for V\nlength (int): Number of bytes to return\nsalt (bytes): Salt to use when computing the key.\ninfo (bytes): Additional information to use when computing the key.\n\nNotes:\nThis makes no assumption about the reuse of the Ephemeral keys passed\nto the function. It is the caller's responsibility to destroy the keys\nafter they are used for doing key generation. This implementation is\nthe dhHybrid1 scheme described in NIST 800-56A Revision 2.\n\nReturns:\nbytes: The derived key.", "source": "codesearchnet"} {"code": "def get_option(self, opt_name, bool_option=False):\n parser = argparse.ArgumentParser()\n opt_name = opt_name[:2] if opt_name[:2] == '--' else opt_name\n if bool_option:\n parser.add_argument('--' + opt_name, action='store_true')\n else:\n parser.add_argument('--' + opt_name, type=str, action='store')\n known, _ = parser.parse_known_args(self.options_list)\n return getattr(known, opt_name) if hasattr(known, opt_name) else None", "docstring": "Get a pipeline option value by name\n\nArgs:\nopt_name: The name of the pipeline option.\n\nReturns:\nNone if option is not found in existing option list which is generated\nby parsing value of argument `test-pipeline-options`.", "source": "github-repos"} {"code": "def protocol(alias_name, default=None, allow_none=False):\n warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2)\n try:\n return _split_docker_link(alias_name)[0]\n except KeyError as err:\n if (default or allow_none):\n return default\n else:\n raise err", "docstring": "Get the protocol from the docker link alias or return the default.\n\nArgs:\nalias_name: The docker link alias\ndefault: The default value if the link isn't available\nallow_none: If the return value can be `None` (i.e. optional)\n\nExamples:\nAssuming a Docker link was created with ``docker --link postgres:db``\nand the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``.\n\n>>> envitro.docker.protocol('DB')\ntcp", "source": "codesearchnet"} {"code": "def add_figure(self, key, url, **kwargs):\n figure = self._check_metadata_for_file(key=key, url=url, **kwargs)\n for dict_key in ('caption', 'label', 'material', 'filename', 'url', 'original_url'):\n if (kwargs.get(dict_key) is not None):\n figure[dict_key] = kwargs[dict_key]\n if key_already_there(figure, self.record.get('figures', ())):\n raise ValueError((\"There's already a figure with the key %s.\" % figure['key']))\n self._append_to('figures', figure)\n self.add_document", "docstring": "Add a figure.\n\nArgs:\nkey (string): document key\nurl (string): document url\nKeyword Args:\ncaption (string): simple description\nlabel (string):\nmaterial (string):\noriginal_url (string): original url\nfilename (string): current url\n\nReturns: None", "source": "codesearchnet"} {"code": "def Sign(self, context):\n \n success = False\n\n for hash in context.ScriptHashes:\n\n contract = self.GetContract(hash)\n if contract is None:\n logger.info(\n f\"Cannot find key belonging to script_hash {hash}. Make sure the source address you're trying to sign the transaction for is imported in the wallet.\")\n continue\n\n key = self.GetKeyByScriptHash(hash)\n\n if key is None:\n continue\n\n signature = Helper.Sign(context.Verifiable, key)\n\n res = context.AddSignature(contract, key.PublicKey, signature)\n\n success |= res\n\n return success", "docstring": "Sign the verifiable items ( Transaction, Block, etc ) in the context with the Keypairs in this wallet.\n\nArgs:\ncontext (ContractParameterContext): the context to sign.\n\nReturns:\nbool: if signing is successful for all contracts in this wallet.", "source": "juraj-google-style"} {"code": "def UpdateFromSource(self, source, incremental=True, force_write=False):\n cache = cache_factory.Create(self.cache_options, self.map_name)\n return self.UpdateCacheFromSource(cache, source, incremental, force_write, location=None)", "docstring": "Update this map's cache from the source provided.\n\nThe FileMapUpdater expects to fetch as single map from the source\nand write/merge it to disk. We create a cache to write to, and then call\nUpdateCacheFromSource() with that cache.\n\nNote that AutomountUpdater also calls UpdateCacheFromSource() for each\ncache it is writing, hence the distinct seperation.\n\nArgs:\nsource: A nss_cache.sources.Source object.\nincremental: A boolean flag indicating that an incremental update should\nbe performed, defaults to True.\nforce_write: A boolean flag forcing empty map updates, defaults to False.\n\nReturns:\nAn int indicating success of update (0 == good, fail otherwise).", "source": "github-repos"} {"code": "def replace_default_in_arg_description(description: str, default: Any) -> str:\n description = description.replace('`optional`', OPTIONAL_KEYWORD)\n description = description.replace('**optional**', OPTIONAL_KEYWORD)\n if default is inspect._empty:\n idx = description.find(OPTIONAL_KEYWORD)\n if idx != -1:\n description = description[:idx].rstrip()\n if description.endswith(','):\n description = description[:-1].rstrip()\n elif default is None:\n idx = description.find(OPTIONAL_KEYWORD)\n if idx == -1:\n description = f'{description}, {OPTIONAL_KEYWORD}'\n elif re.search('defaults to `?None`?', description) is not None:\n len_optional = len(OPTIONAL_KEYWORD)\n description = description[:idx + len_optional]\n else:\n str_default = None\n if isinstance(default, (int, float)) and re.search('defaults to `?(.*?)(?:`|$)', description) is not None:\n current_default = re.search('defaults to `?(.*?)(?:`|$)', description).groups()[0]\n if default == eval_math_expression(current_default):\n try:\n str_default = str(type(default)(current_default))\n except Exception:\n str_default = f'`{current_default}`'\n elif isinstance(default, enum.Enum) and default.name == current_default.split('.')[-1]:\n str_default = f'`{current_default}`'\n if str_default is None:\n str_default = stringify_default(default)\n if OPTIONAL_KEYWORD not in description:\n description = f'{description}, {OPTIONAL_KEYWORD}, defaults to {str_default}'\n elif _re_parse_description.search(description) is None:\n idx = description.find(OPTIONAL_KEYWORD)\n len_optional = len(OPTIONAL_KEYWORD)\n description = f'{description[:idx + len_optional]}, defaults to {str_default}'\n else:\n description = _re_parse_description.sub(f'*optional*, defaults to {str_default}', description)\n return description", "docstring": "Catches the default value in the description of an argument inside a docstring and replaces it by the value passed.\n\nArgs:\ndescription (`str`): The description of an argument in a docstring to process.\ndefault (`Any`): The default value that would be in the docstring of that argument.\n\nReturns:\n`str`: The description updated with the new default value.", "source": "github-repos"} {"code": "def recipe_dbm_to_bigquery(config, auth_read, auth_write, dbm_report_id, dbm_report_name, dbm_dataset, dbm_table, dbm_schema, is_incremental_load):\n dbm(config, {'auth': auth_read, 'report': {'report_id': dbm_report_id, 'name': dbm_report_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': dbm_dataset, 'table': dbm_table, 'schema': dbm_schema, 'header': True, 'is_incremental_load': is_incremental_load}}})", "docstring": "Move existing DV360 reports into a BigQuery table.\n\nArgs:\nauth_read (authentication) - Credentials used for reading data.\nauth_write (authentication) - Authorization used for writing data.\ndbm_report_id (integer) - DV360 report ID given in UI, not needed if name used.\ndbm_report_name (string) - Name of report, not needed if ID used.\ndbm_dataset (string) - Existing BigQuery dataset.\ndbm_table (string) - Table to create from this report.\ndbm_schema (json) - Schema provided in JSON list format or empty value to auto detect.\nis_incremental_load (boolean) - Clear data in destination table during this report's time period, then append report data to destination table.", "source": "github-repos"} {"code": "def get_config_string_option(parser: ConfigParser, section: str, option: str, default: str=None) -> str:\n if (not parser.has_section(section)):\n raise ValueError(('config missing section: ' + section))\n return parser.get(section, option, fallback=default)", "docstring": "Retrieves a string value from a parser.\n\nArgs:\nparser: instance of :class:`ConfigParser`\nsection: section name within config file\noption: option (variable) name within that section\ndefault: value to return if option is absent\n\nReturns:\nstring value\n\nRaises:\nValueError: if the section is absent", "source": "codesearchnet"} {"code": "def get_reduce_on_plateau_schedule(optimizer: Optimizer, **kwargs):\n return ReduceLROnPlateau(optimizer, **kwargs)", "docstring": "Create a schedule with a constant learning rate that decreases when a metric has stopped improving.\n\nArgs:\noptimizer ([`~torch.optim.Optimizer`]):\nThe optimizer for which to schedule the learning rate.\nkwargs (`dict`, *optional*):\nExtra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau`\nfor possible parameters.\n\nReturn:\n`torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule.", "source": "github-repos"} {"code": "def Query(self, request, global_params=None):\n config = self.GetMethodConfig('Query')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Runs a BigQuery SQL query synchronously and returns query results if the query completes within a specified timeout.\n\nArgs:\nrequest: (BigqueryJobsQueryRequest) input message\nglobal_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n(QueryResponse) The response message.", "source": "github-repos"} {"code": "def Reboot(self, destination=b''):\n \n self.protocol_handler.Open(self._handle, b'reboot:%s' % destination)", "docstring": "Reboot the device.\n\nArgs:\ndestination: Specify 'bootloader' for fastboot.", "source": "juraj-google-style"} {"code": "def __init__(self, checker, message):\n \n self.checker = checker\n self.message = message\n Validator.validators_count += 1\n \n self.insertion_index = Validator.validators_count", "docstring": "Constructor to create all validators.\n\nArgs:\nchecker: function to verify the constraint.\nInput of this method varies, see SingleFlagValidator and\nmulti_flags_validator for a detailed description.\nmessage: str, error message to be shown to the user.", "source": "juraj-google-style"} {"code": "def verify_duplicates(duplicates, uniques):\n for (uniq1, uniq2) in itertools.combinations(uniques, 2):\n if same_intersection(uniq1, uniq2):\n raise ValueError('Non-unique intersection')\n counter = collections.Counter()\n for dupe in duplicates:\n matches = []\n for (index, uniq) in enumerate(uniques):\n if same_intersection(dupe, uniq):\n matches.append(index)\n if (len(matches) != 1):\n raise ValueError('Duplicate not among uniques', dupe)\n matched = matches[0]\n counter[matched] += 1\n for (index, count) in six.iteritems(counter):\n uniq = uniques[index]\n if (count == 1):\n if ((uniq.s, uniq.t).count(0.0) != 1):\n raise ValueError('Count == 1 should be a single corner', uniq)\n elif (count == 3):\n if ((uniq.s, uniq.t) != (0.0, 0.0)):\n raise ValueError('Count == 3 should be a double corner', uniq)\n else:\n raise ValueError('Unexpected duplicate count', count)", "docstring": "Verify that a set of intersections had expected duplicates.\n\n.. note::\n\nThis is a helper used only by :func:`generic_intersect`.\n\nArgs:\nduplicates (List[.Intersection]): List of intersections\ncorresponding to duplicates that were filtered out.\nuniques (List[.Intersection]): List of \"final\" intersections\nwith duplicates filtered out.\n\nRaises:\nValueError: If the ``uniques`` are not actually all unique.\nValueError: If one of the ``duplicates`` does not correspond to\nan intersection in ``uniques``.\nValueError: If a duplicate occurs only once but does not have\nexactly one of ``s`` and ``t`` equal to ``0.0``.\nValueError: If a duplicate occurs three times but does not have\nexactly both ``s == t == 0.0``.\nValueError: If a duplicate occurs a number other than one or three\ntimes.", "source": "codesearchnet"} {"code": "def write_int16(self, value, little_endian=True):\n \n if little_endian:\n endian = \"<\"\n else:\n endian = \">\"\n return self.pack('%sh' % endian, value)", "docstring": "Pack the value as a signed integer and write 2 bytes to the stream.\n\nArgs:\nvalue:\nlittle_endian (bool): specify the endianness. (Default) Little endian.\n\nReturns:\nint: the number of bytes written.", "source": "juraj-google-style"} {"code": "def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n local_stream = utils.BytearrayStream()\n if self._unique_identifier:\n self._unique_identifier.write(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('invalid payload missing the unique identifier attribute')\n if self._signature_data:\n self._signature_data.write(local_stream, kmip_version=kmip_version)\n else:\n raise ValueError('invalid payload missing the signature attribute')\n self.length = local_stream.length()\n super(SignResponsePayload, self).write(output_stream, kmip_version=kmip_version)\n output_stream.write(local_stream.buffer)", "docstring": "Write the data encoding the Sign response to a stream.\n\nArgs:\noutput_stream (stream): A data stream in which to encode object\ndata, supporting a write method; usually a BytearrayStream\nobject.\n\nRaises:\nValueError: Raised if the unique_identifier or signature\nattributes are not defined.", "source": "codesearchnet"} {"code": "def __rdiv__(self, other):\n raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use", "docstring": "Use `__floordiv__` via `x // y` instead.\n\nThis function exists only to have a better error message. Instead of:\n`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,\nthis function will explicitly call for usage of `//` instead.\n\nArgs:\nother: Another `Dimension`.\n\nRaises:\nTypeError.", "source": "github-repos"} {"code": "def remove_perm(self, subj_str, perm_str):\n self._assert_valid_permission(perm_str)\n for perm_str in self._equal_or_higher_perm(perm_str):\n self._perm_dict.setdefault(perm_str, set()).discard(subj_str)", "docstring": "Remove permission from a subject.\n\nArgs:\nsubj_str : str\nSubject for which to remove permission(s)\n\nperm_str : str\nPermission to remove. Implicitly removes all higher permissions. E.g., ``write``\nwill also remove ``changePermission`` if previously granted.", "source": "codesearchnet"} {"code": "def _make_intersection(edge_info, all_edge_nodes):\n edges = []\n for (index, start, end) in edge_info:\n nodes = all_edge_nodes[index]\n new_nodes = _curve_helpers.specialize_curve(nodes, start, end)\n degree = (new_nodes.shape[1] - 1)\n edge = _curve_mod.Curve(new_nodes, degree, _copy=False)\n edges.append(edge)\n return curved_polygon.CurvedPolygon(*edges, metadata=edge_info, _verify=False)", "docstring": "Convert a description of edges into a curved polygon.\n\n.. note::\n\nThis is a helper used only by :meth:`.Surface.intersect`.\n\nArgs:\nedge_info (Tuple[Tuple[int, float, float], ...]): Information\ndescribing each edge in the curved polygon by indicating which\nsurface / edge on the surface and then start and end parameters\nalong that edge. (See :func:`.ends_to_curve`.)\nall_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges\nof the first surface being intersected followed by the nodes of\nthe three edges of the second.\n\nReturns:\n.CurvedPolygon: The intersection corresponding to ``edge_info``.", "source": "codesearchnet"} {"code": "def _OpenFileObject(self, path_spec):\n \n if not path_spec.HasParent():\n raise errors.PathSpecError(\n 'Unsupported path specification without parent.')\n\n file_object = resolver.Resolver.OpenFileObject(\n path_spec.parent, resolver_context=self._resolver_context)\n qcow_file = pyqcow.file()\n qcow_file.open_file_object(file_object)\n return qcow_file", "docstring": "Opens the file-like object defined by path specification.\n\nArgs:\npath_spec (PathSpec): path specification.\n\nReturns:\npyqcow.file: a file-like object.\n\nRaises:\nPathSpecError: if the path specification is incorrect.", "source": "juraj-google-style"} {"code": "def _serializeNT(data):\n \n if isinstance(data, list):\n return [_serializeNT(item) for item in data]\n\n elif isinstance(data, tuple) and hasattr(data, \"_fields\"): \n serialized = _serializeNT(dict(data._asdict()))\n serialized[\"__nt_name\"] = data.__class__.__name__\n\n return serialized\n\n elif isinstance(data, tuple):\n return tuple(_serializeNT(item) for item in data)\n\n elif isinstance(data, dict):\n return {\n key: _serializeNT(data[key])\n for key in data\n }\n\n return data", "docstring": "Serialize namedtuples (and other basic python types) to dictionary with\nsome special properties.\n\nArgs:\ndata (namedtuple/other python types): Data which will be serialized to\ndict.\n\nData can be later automatically de-serialized by calling _deserializeNT().", "source": "juraj-google-style"} {"code": "def get_collections(db, collection=None, prefix=None, suffix=None):\n if (collection is not None):\n return [collection]\n collections = db.collection_names(include_system_collections=False)\n if (prefix is not None):\n collections = [c for c in collections if c.startswith(prefix)]\n if (suffix is not None):\n collections = [c for c in collections if c.endswith(suffix)]\n return sorted(collections)", "docstring": "Returns a sorted list of collection names found in ``db``.\n\nArguments:\n\ndb (Database): A pymongo Database object. Can be obtained\nwith ``get_db``.\n\ncollection (str): Name of a collection. If the collection is\npresent in the MongoDB database, a single-element list will\nbe returned with the collecion name. If not, an empty list\nwill be returned. This option is primarly included to allow\nfor quick checking to see if a collection name is present.\nDefault is None, which results in this option being ignored.\n\nprefix (str): If supplied, only collections that begin with\n``prefix`` will be returned.\n\nsuffix (str): If supplied, only collections that end with\n``suffix`` will be returned.\n\nReturns:\n\nlist: A sorted list of collection names.", "source": "codesearchnet"} {"code": "def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[(str, TensorFluent)]:\n scope = {}\n scope.update(self.non_fluents_scope())\n scope.update(self.state_scope(state))\n scope.update(self.action_scope(action))\n scope.update(self.next_state_scope(next_state))\n return scope", "docstring": "Returns the complete reward fluent scope for the\ncurrent `state`, `action` fluents, and `next_state` fluents.\n\nArgs:\nstate (Sequence[tf.Tensor]): The current state fluents.\naction (Sequence[tf.Tensor]): The action fluents.\nnext_state (Sequence[tf.Tensor]): The next state fluents.\n\nReturns:\nA mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.", "source": "codesearchnet"} {"code": "def draw_proposal_recall(img, proposals, proposal_scores, gt_boxes):\n \n box_ious = np_iou(gt_boxes, proposals) \n box_ious_argsort = np.argsort(-box_ious, axis=1)\n good_proposals_ind = box_ious_argsort[:, :3] \n good_proposals_ind = np.unique(good_proposals_ind.ravel())\n\n proposals = proposals[good_proposals_ind, :]\n tags = list(map(str, proposal_scores[good_proposals_ind]))\n img = viz.draw_boxes(img, proposals, tags)\n return img, good_proposals_ind", "docstring": "Draw top3 proposals for each gt.\nArgs:\nproposals: NPx4\nproposal_scores: NP\ngt_boxes: NG", "source": "juraj-google-style"} {"code": "def get_superclasses(self, t):\n if isinstance(t, pytd.ClassType):\n return sum((self.get_superclasses(c) for c in t.cls.bases), [t])\n elif isinstance(t, pytd.AnythingType):\n return [pytd.NamedType('builtins.object')]\n elif isinstance(t, pytd.GenericType):\n return self.get_superclasses(t.base_type)\n else:\n log.warning(\"Can't extract superclasses from %s\", type(t))\n return [pytd.NamedType('builtins.object')]", "docstring": "Get all base classes of this type.\n\nArgs:\nt: A pytd.Type\n\nReturns:\nA list of pytd.Type.", "source": "github-repos"} {"code": "def is_compatible_with(self, other):\n other = as_dtype(other)\n return (self._type_enum in (other.as_datatype_enum, other.base_dtype.as_datatype_enum))", "docstring": "Returns True if the `other` DType will be converted to this DType.\n\nThe conversion rules are as follows:\n\n```python\nDType(T) .is_compatible_with(DType(T)) == True\nDType(T) .is_compatible_with(DType(T).as_ref) == True\nDType(T).as_ref.is_compatible_with(DType(T)) == False\nDType(T).as_ref.is_compatible_with(DType(T).as_ref) == True\n```\n\nArgs:\nother: A `DType` (or object that may be converted to a `DType`).\n\nReturns:\nTrue if a Tensor of the `other` `DType` will be implicitly converted to\nthis `DType`.", "source": "codesearchnet"} {"code": "def _composition_must_be_self_adjoint(operators):\n if len(operators) == 1 and operators[0].is_self_adjoint:\n return True\n if linear_operator_util.is_aat_form(operators):\n return True\n return False", "docstring": "Runs some checks to see if composition operators must be SA.\n\nArgs:\noperators: List of LinearOperators.\n\nReturns:\nTrue if the composition must be SA. False if it is not SA OR if we did not\ndetermine whether the composition is SA.", "source": "github-repos"} {"code": "def _step(self, actions):\n \n\n \n \n self.assert_common_preconditions()\n assert len(actions) == len(self._envs)\n\n observations = []\n rewards = []\n dones = []\n infos = []\n\n \n for env, action in zip(self._envs, actions):\n observation, reward, done, info = env.step(action)\n\n observations.append(observation)\n rewards.append(reward)\n dones.append(done)\n infos.append(info)\n\n \n \n return tuple(map(np.stack, [observations, rewards, dones, infos]))", "docstring": "Takes a step in all environments, shouldn't pre-process or record.\n\nSubclasses should override this to do the actual step if something other\nthan the default implementation is desired.\n\nArgs:\nactions: (np.ndarray) with first dimension equal to the batch size.\n\nReturns:\na tuple of stacked raw observations, raw rewards, dones and infos.", "source": "juraj-google-style"} {"code": "def brightness(x, severity=1):\n \n c = [.1, .2, .3, .4, .5][severity - 1]\n\n x = np.array(x) / 255.\n x = tfds.core.lazy_imports.skimage.color.rgb2hsv(x)\n x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)\n x = tfds.core.lazy_imports.skimage.color.hsv2rgb(x)\n x_clip = np.clip(x, 0, 1) * 255\n return around_and_astype(x_clip)", "docstring": "Change brightness of images.\n\nArgs:\nx: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].\nseverity: integer, severity of corruption.\n\nReturns:\nnumpy array, image with uint8 pixels in [0,255]. Changed brightness.", "source": "juraj-google-style"} {"code": "def _apply_sequence_to_tensor_op(cls, op_fn, tensor_wrappers):\n raise NotImplementedError()", "docstring": "Applies given sequence-to-tensor op.\n\nThis method is used for implementing ops that take a sequence of tensors and\nreturn a new tensor, such as tf.concat and tf.stack. Implementing wrappers\nshould apply `op_fn` to the backing tensor(s) and return an new wrapper\ninstance with the combined backing tensor.\n\nArgs:\nop_fn: Callable that applies sequence-to-tensor op to the given sequence\nof Tensors. E.g. applies tf.concat.\ntensor_wrappers: a sequence of tensor wrappers to be transformed. All\nelements have the type of the implementing TensorWrapper class.\n\nReturns:\nA TensorWrapper instance with combined backing tensor(s).", "source": "github-repos"} {"code": "def set_metadata(self, key: str, value: str):\n \n if not isinstance(key, str) or not isinstance(value, str):\n raise TypeError(\"'key' and 'value' of metadata MUST be strings\")\n self.metadata[key] = value", "docstring": "Add a new metadata to the message\n\nArgs:\nkey (str): name of the metadata\nvalue (str): value of the metadata", "source": "juraj-google-style"} {"code": "def get(path, objectType, user=None):\n ret = {'Path': path, 'ACLs': []}\n sidRet = _getUserSid(user)\n if (path and objectType):\n dc = daclConstants()\n objectTypeBit = dc.getObjectTypeBit(objectType)\n path = dc.processPath(path, objectTypeBit)\n tdacl = _get_dacl(path, objectTypeBit)\n if tdacl:\n for counter in range(0, tdacl.GetAceCount()):\n tAce = tdacl.GetAce(counter)\n if ((not sidRet['sid']) or (tAce[2] == sidRet['sid'])):\n ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))\n return ret", "docstring": "Get the ACL of an object. Will filter by user if one is provided.\n\nArgs:\npath: The path to the object\nobjectType: The type of object (FILE, DIRECTORY, REGISTRY)\nuser: A user name to filter by\n\nReturns (dict): A dictionary containing the ACL\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' win_dacl.get c:\\temp directory", "source": "codesearchnet"} {"code": "def GetSizeHint(self, context=None, **unused_kwargs):\n \n context_state = getattr(context, 'state', {})\n\n subcontext = context_state.get('context', None)\n if not subcontext:\n mapped_values = context_state.get('mapped_values', None)\n subcontext = DataTypeMapContext(values={\n type(mapped_values).__name__: mapped_values})\n\n size_hint = 0\n for data_type_map in self._data_type_maps:\n data_type_size = data_type_map.GetSizeHint(context=subcontext)\n if data_type_size is None:\n break\n\n size_hint += data_type_size\n\n return size_hint", "docstring": "Retrieves a hint about the size.\n\nArgs:\ncontext (Optional[DataTypeMapContext]): data type map context, used to\ndetermine the size hint.\n\nReturns:\nint: hint of the number of bytes needed from the byte stream or None.", "source": "juraj-google-style"} {"code": "def to_dataframe(self, view: views.View) -> pandas.DataFrame:\n resources = self._fetch_bundle_entries()\n flattened_view = self._flatten_fhir_to_view(view, resources)\n return pandas.json_normalize(flattened_view)", "docstring": "Returns a Pandas dataframe of the results, if Pandas is installed.\n\nArgs:\nview: the view that defines the desired format of the flattened FHIR\noutput.\n\nReturns:\npandas.DataFrame: dataframe of the view contents.", "source": "github-repos"} {"code": "def from_config(cls, path, directory=None):\n if (not exists(path)):\n raise REPPError('REPP config file not found: {}'.format(path))\n confdir = dirname(path)\n conf = io.open(path, encoding='utf-8').read()\n conf = re.sub(';.*', '', conf).replace('\\n', ' ')\n m = re.search('repp-modules\\\\s*:=\\\\s*((?:[-\\\\w]+\\\\s+)*[-\\\\w]+)\\\\s*\\\\.', conf)\n t = re.search('repp-tokenizer\\\\s*:=\\\\s*([-\\\\w]+)\\\\s*\\\\.', conf)\n a = re.search('repp-calls\\\\s*:=\\\\s*((?:[-\\\\w]+\\\\s+)*[-\\\\w]+)\\\\s*\\\\.', conf)\n f = re.search('format\\\\s*:=\\\\s*(\\\\w+)\\\\s*\\\\.', conf)\n d = re.search('repp-directory\\\\s*:=\\\\s*(.*)\\\\.\\\\s*$', conf)\n if (m is None):\n raise REPPError('repp-modules option must be set')\n if (t is None):\n raise REPPError('repp-tokenizer option must be set')\n mods = m.group(1).split()\n tok = t.group(1).strip()\n active = (a.group(1).split() if (a is not None) else None)\n fmt = (f.group(1).strip() if (f is not None) else None)\n if (directory is None):\n if (d is not None):\n directory = d.group(1).strip(' \"')\n elif exists(joinpath(confdir, (tok + '.rpp'))):\n directory = confdir\n elif exists(joinpath(confdir, 'rpp', (tok + '.rpp'))):\n directory = joinpath(confdir, 'rpp')\n elif exists(joinpath(confdir, '../rpp', (tok + '.rpp'))):\n directory = joinpath(confdir, '../rpp')\n else:\n raise REPPError('Could not find a suitable REPP directory.')\n return REPP.from_file(joinpath(directory, (tok + '.rpp')), directory=directory, active=active)", "docstring": "Instantiate a REPP from a PET-style `.set` configuration file.\n\nThe *path* parameter points to the configuration file.\nSubmodules are loaded from *directory*. If *directory* is not\ngiven, it is the directory part of *path*.\n\nArgs:\npath (str): the path to the REPP configuration file\ndirectory (str, optional): the directory in which to search\nfor submodules", "source": "codesearchnet"} {"code": "def remove_option(self, section, name, value=None):\n \n \n if self._is_live():\n raise RuntimeError('Submitted units cannot update their options')\n\n removed = 0\n \n for option in list(self._data['options']):\n \n if option['section'] == section:\n \n if option['name'] == name:\n \n if value is None or option['value'] == value:\n \n self._data['options'].remove(option)\n removed += 1\n\n if removed > 0:\n return True\n\n return False", "docstring": "Remove an option from a unit\n\nArgs:\nsection (str): The section to remove from.\nname (str): The item to remove.\nvalue (str, optional): If specified, only the option matching this value will be removed\nIf not specified, all options with ``name`` in ``section`` will be removed\n\nReturns:\nTrue: At least one item was removed\nFalse: The item requested to remove was not found", "source": "juraj-google-style"} {"code": "def get(self, workflow_id):\n \n try:\n db = self._client[self.database]\n fs = GridFSProxy(GridFS(db.unproxied_object))\n return DataStoreDocument(db[WORKFLOW_DATA_COLLECTION_NAME], fs, workflow_id)\n\n except ConnectionFailure:\n raise DataStoreNotConnected()", "docstring": "Returns the document for the given workflow id.\n\nArgs:\nworkflow_id (str): The id of the document that represents a workflow run.\n\nRaises:\nDataStoreNotConnected: If the data store is not connected to the server.\n\nReturns:\nDataStoreDocument: The document for the given workflow id.", "source": "juraj-google-style"} {"code": "def container_def(image, model_data_url=None, env=None):\n if (env is None):\n env = {}\n c_def = {'Image': image, 'Environment': env}\n if model_data_url:\n c_def['ModelDataUrl'] = model_data_url\n return c_def", "docstring": "Create a definition for executing a container as part of a SageMaker model.\n\nArgs:\nimage (str): Docker image to run for this container.\nmodel_data_url (str): S3 URI of data required by this container,\ne.g. SageMaker training job model artifacts (default: None).\nenv (dict[str, str]): Environment variables to set inside the container (default: None).\nReturns:\ndict[str, str]: A complete container definition object usable with the CreateModel API if passed via\n`PrimaryContainers` field.", "source": "codesearchnet"} {"code": "def console_get_height(con: tcod.console.Console) -> int:\n return int(lib.TCOD_console_get_height(_console(con)))", "docstring": "Return the height of a console.\n\nArgs:\ncon (Console): Any Console instance.\n\nReturns:\nint: The height of a Console.\n\n.. deprecated:: 2.0\nUse `Console.height` instead.", "source": "codesearchnet"} {"code": "def register_rml(self, filepath, **kwargs):\n \n name = os.path.split(filepath)[-1]\n if name in self.rml_maps and self.rml_maps[name] != filepath:\n raise Exception(\"RML name already registered. Filenames must be \"\n \"unique.\",\n (self.rml_maps[name], filepath))\n self.rml_maps[name] = filepath", "docstring": "Registers the filepath for an rml mapping\n\nArgs:\n-----\nfilepath: the path the rml file", "source": "juraj-google-style"} {"code": "def _batch_gather(params, indices, axis, batch_dims):\n if not params.shape[:batch_dims].is_compatible_with(indices.shape[:batch_dims]):\n raise ValueError('batch shape from indices %s does not match params shape %s' % (indices.shape[:batch_dims], params.shape))\n if batch_dims > 1:\n if not isinstance(params, ragged_tensor.RaggedTensor):\n if indices.uniform_row_length is None:\n raise ValueError('batch shape from indices does not match params shape: ragged indices dimension corresponds to uniform params dimension')\n params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n if not isinstance(indices, ragged_tensor.RaggedTensor):\n if params.uniform_row_length is None:\n raise ValueError('batch shape from indices does not match params shape: ragged params dimension corresponds to uniform indices dimension')\n indices = ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=1, row_splits_dtype=params.row_splits.dtype)\n return params.with_values(_gather(params.values, indices.values, axis - 1, batch_dims - 1))\n if axis > 1:\n if not isinstance(indices, ragged_tensor.RaggedTensor):\n adjusted_indices = params.with_values(array_ops.repeat(indices, params.row_lengths(), 0))\n else:\n if not isinstance(params, ragged_tensor.RaggedTensor):\n params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n adjusted_indices = _gather(indices, params.with_values(array_ops.repeat(math_ops.range(params.nrows()), params.row_lengths())), 0, 0)\n return _batch_gather(params, adjusted_indices, axis, batch_dims + 1)\n if indices.shape.rank is None:\n raise ValueError('rank(indices) must be known statically')\n assert batch_dims == 1\n flat_params = _flatten_dims_0_and_1(params)\n adjustments = _row_starts(params, indices.dtype)\n adjustments = _increase_rank_to(adjustments, indices.shape.ndims)\n adjusted_indices = indices + adjustments\n return _gather(flat_params, adjusted_indices, axis - 1, 0)", "docstring": "Helper that implements the body for ragged gather() when batch_dims>0.\n\nArgs:\nparams: The tensor from which to gather values.\nindices: The indices of values to gather.\naxis: The axis in `params` to gather `indices` from.\nbatch_dims: The number of batch dimensions.\n\nReturns:\nA potentially ragged tensor.", "source": "github-repos"} {"code": "def extractOne(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0):\n best_list = extractWithoutOrder(query, choices, processor, scorer, score_cutoff)\n try:\n return max(best_list, key=(lambda i: i[1]))\n except ValueError:\n return None", "docstring": "Find the single best match above a score in a list of choices.\n\nThis is a convenience method which returns the single best choice.\nSee extract() for the full arguments list.\n\nArgs:\nquery: A string to match against\nchoices: A list or dictionary of choices, suitable for use with\nextract().\nprocessor: Optional function for transforming choices before matching.\nSee extract().\nscorer: Scoring function for extract().\nscore_cutoff: Optional argument for score threshold. If the best\nmatch is found, but it is not greater than this number, then\nreturn None anyway (\"not a good enough match\"). Defaults to 0.\n\nReturns:\nA tuple containing a single match and its score, if a match\nwas found that was above score_cutoff. Otherwise, returns None.", "source": "codesearchnet"} {"code": "def to_xyz(self, buf=None, sort_index=True, index=False, header=False, float_format='{:.6f}'.format, overwrite=True):\n if sort_index:\n molecule_string = self.sort_index().to_string(header=header, index=index, float_format=float_format)\n else:\n molecule_string = self.to_string(header=header, index=index, float_format=float_format)\n space = (' ' * (self.loc[(:, 'atom')].str.len().max() - len(self.iloc[(0, 0)])))\n output = '{n}\\n{message}\\n{alignment}{frame_string}'.format(n=len(self), alignment=space, frame_string=molecule_string, message='Created by chemcoord http:\n if (buf is not None):\n if overwrite:\n with open(buf, mode='w') as f:\n f.write(output)\n else:\n with open(buf, mode='x') as f:\n f.write(output)\n else:\n return output", "docstring": "Write xyz-file\n\nArgs:\nbuf (str): StringIO-like, optional buffer to write to\nsort_index (bool): If sort_index is true, the\n:class:`~chemcoord.Cartesian`\nis sorted by the index before writing.\nfloat_format (one-parameter function): Formatter function\nto apply to column’s elements if they are floats.\nThe result of this function must be a unicode string.\noverwrite (bool): May overwrite existing files.\n\nReturns:\nformatted : string (or unicode, depending on data and options)", "source": "codesearchnet"} {"code": "def filter_db_names(paths: List[str]) -> List[str]:\n return [db_path for db_path in paths if VERSION_RE.match(os.path.basename(db_path))]", "docstring": "Returns a filtered list of `paths`, where every name matches our format.\n\nArgs:\npaths: A list of file names.", "source": "codesearchnet"} {"code": "def call(self, inputs):\n \n out = self.dense(inputs) \n out = self.output_layer(out) \n loc = out[..., :self.latent_size]\n scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 \n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)", "docstring": "Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`.\n\nArgs:\ninputs: A batch of intermediate representations of image frames\nacross all timesteps, of shape [..., batch_size, timesteps,\nhidden_size].\n\nReturns:\nA batch of MultivariateNormalDiag distributions with event shape\n[latent_size], batch shape [..., batch_size, timesteps], and\nsample shape [sample_shape, ..., batch_size, timesteps,\nlatent_size].", "source": "juraj-google-style"} {"code": "def __init__(self, limit=100, history_file_path=None):\n self._commands = []\n self._limit = limit\n self._history_file_path = history_file_path or self._get_default_history_file_path()\n self._load_history_from_file()", "docstring": "CommandHistory constructor.\n\nArgs:\nlimit: Maximum number of the most recent commands that this instance\nkeeps track of, as an int.\nhistory_file_path: (str) Manually specified path to history file. Used in\ntesting.", "source": "github-repos"} {"code": "def remove(self, iterable, data=None):\n \n return self.root.remove(iterable, data=data)", "docstring": "Used to remove from the root node\n\nArgs:\niterable(hashable): index or key used to identify\nitem to remove\ndata: data to be paired with the key", "source": "juraj-google-style"} {"code": "def _MakeExecutable(self, metadata_script):\n mode = os.stat(metadata_script).st_mode\n os.chmod(metadata_script, (mode | stat.S_IEXEC))", "docstring": "Add executable permissions to a file.\n\nArgs:\nmetadata_script: string, the path to the executable file.", "source": "codesearchnet"} {"code": "def register_with_password(self, username, password):\n response = self.api.register(auth_body={'type': 'm.login.dummy'}, kind='user', username=username, password=password)\n return self._post_registration(response)", "docstring": "Register for a new account on this HS.\n\nArgs:\nusername (str): Account username\npassword (str): Account password\n\nReturns:\nstr: Access Token\n\nRaises:\nMatrixRequestError", "source": "codesearchnet"} {"code": "def generate_boilerplate(self, import_roots):\n boilerplate_contents = _boilerplate_template % {'runtime_package': _runtime_package, 'import_roots': str(import_roots), 'zip_safe': self.zip_safe}\n return boilerplate_contents.encode('ascii').decode('ascii')", "docstring": "Generate boilerplate to be insert into __main__.py\n\nWe don't know the encoding of the main source file, so\nrequire that the template be pure ascii, which we can safely\ninsert.\n\nReturns:\nA string containing only ascii characters", "source": "github-repos"} {"code": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n data_types = {'numpy': (numpy.array, XGBoostModelHandlerNumpy), 'pandas': (pandas.DataFrame, XGBoostModelHandlerPandas), 'scipy': (scipy.sparse.csr_matrix, XGBoostModelHandlerSciPy), 'datatable': (datatable.Frame, XGBoostModelHandlerDatatable)}\n input_data_type, model_handler = data_types[known_args.input_type]\n xgboost_model_handler = KeyedModelHandler(model_handler(model_class=xgboost.XGBClassifier, model_state=known_args.model_state, large_model=known_args.large_model))\n input_data = load_sklearn_iris_test_data(data_type=input_data_type, split=known_args.split)\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n predictions = pipeline | 'ReadInputData' >> beam.Create(input_data) | 'RunInference' >> RunInference(xgboost_model_handler) | 'PostProcessOutputs' >> beam.ParDo(PostProcessor())\n _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\nargv: Command line arguments defined for this example.\nsave_main_session: Used for internal testing.\ntest_pipeline: Used for internal testing.", "source": "github-repos"} {"code": "def depthwise_conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n scale = [1.0] * self.out_channel_size\n offset = [0.5] * self.out_channel_size\n mean, variance = (scale, offset)\n out = nn_ops.depthwise_conv2d_native(input_tensor, self.filters, strides=[1, 2, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')\n if has_bias:\n out = nn_ops.bias_add(out, self.bias)\n if has_batch_norm:\n out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(out, scale, offset, mean, variance, is_training=False)\n if activation_fn is not None:\n out = activation_fn(out)\n return {'output': out}", "docstring": "Performs a 2D depthwise convolution operation.\n\nArgs:\ninput_tensor: Input tensor to perform convolution on.\n\nReturns:\nA map of: output key -> output result.", "source": "github-repos"} {"code": "def read_classification_results(storage_client, file_path):\n \n if storage_client:\n \n success = False\n retry_count = 0\n while retry_count < 4:\n try:\n blob = storage_client.get_blob(file_path)\n if not blob:\n return {}\n if blob.size > MAX_ALLOWED_CLASSIFICATION_RESULT_SIZE:\n logging.warning('Skipping classification result because it''s too '\n 'big: %d bytes for %s', blob.size, file_path)\n return None\n buf = BytesIO()\n blob.download_to_file(buf)\n buf.seek(0)\n success = True\n break\n except Exception:\n retry_count += 1\n time.sleep(5)\n if not success:\n return None\n else:\n \n try:\n with open(file_path, 'rb') as f:\n buf = BytesIO(f.read())\n except IOError:\n return None\n result = {}\n if PY3:\n buf = StringIO(buf.read().decode('UTF-8'))\n for row in csv.reader(buf):\n try:\n image_filename = row[0]\n if image_filename.endswith('.png') or image_filename.endswith('.jpg'):\n image_filename = image_filename[:image_filename.rfind('.')]\n label = int(row[1])\n except (IndexError, ValueError):\n continue\n result[image_filename] = label\n return result", "docstring": "Reads classification results from the file in Cloud Storage.\n\nThis method reads file with classification results produced by running\ndefense on singe batch of adversarial images.\n\nArgs:\nstorage_client: instance of CompetitionStorageClient or None for local file\nfile_path: path of the file with results\n\nReturns:\ndictionary where keys are image names or IDs and values are classification\nlabels", "source": "juraj-google-style"} {"code": "def decompress(ctype, unc_len, data):\n \n if ctype == UBIFS_COMPR_LZO:\n try:\n return lzo.decompress(b''.join((b'\\xf0', struct.pack('>I', unc_len), data)))\n except Exception as e:\n error(decompress, 'Warn', 'LZO Error: %s' % e)\n elif ctype == UBIFS_COMPR_ZLIB:\n try:\n return zlib.decompress(data, -11)\n except Exception as e:\n error(decompress, 'Warn', 'ZLib Error: %s' % e)\n else:\n return data", "docstring": "Decompress data.\n\nArguments:\nInt:ctype -- Compression type LZO, ZLIB (*currently unused*).\nInt:unc_len -- Uncompressed data lenth.\nStr:data -- Data to be uncompessed.\n\nReturns:\nUncompressed Data.", "source": "juraj-google-style"} {"code": "def fetch(self, virtual_account_id, data={}, **kwargs):\n return super(VirtualAccount, self).fetch(virtual_account_id, data, **kwargs)", "docstring": "Fetch Virtual Account for given Id\n\nArgs:\nvirtual_account_id :\nId for which Virtual Account object has to be retrieved\n\nReturns:\nVirtual Account dict for given Virtual Account Id", "source": "codesearchnet"} {"code": "def build_graph(path, term_depth=1000, skim_depth=10, d_weights=False, **kwargs):\n click.echo('\\nTokenizing text...')\n t = Text.from_file(path)\n click.echo(('Extracted %d tokens' % len(t.tokens)))\n m = Matrix()\n click.echo('\\nIndexing terms:')\n m.index(t, t.most_frequent_terms(term_depth), **kwargs)\n g = Skimmer()\n click.echo('\\nGenerating graph:')\n g.build(t, m, skim_depth, d_weights)\n return g", "docstring": "Tokenize a text, index a term matrix, and build out a graph.\n\nArgs:\npath (str): The file path.\nterm_depth (int): Consider the N most frequent terms.\nskim_depth (int): Connect each word to the N closest siblings.\nd_weights (bool): If true, give \"close\" nodes low weights.\n\nReturns:\nSkimmer: The indexed graph.", "source": "codesearchnet"} {"code": "def build_input_pipeline(x, y, batch_size):\n \n training_dataset = tf.data.Dataset.from_tensor_slices((x, y))\n training_batches = training_dataset.repeat().batch(batch_size)\n training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)\n batch_features, batch_labels = training_iterator.get_next()\n return batch_features, batch_labels", "docstring": "Build a Dataset iterator for supervised classification.\n\nArgs:\nx: Numpy `array` of features, indexed by the first dimension.\ny: Numpy `array` of labels, with the same first dimension as `x`.\nbatch_size: Number of elements in each training batch.\n\nReturns:\nbatch_features: `Tensor` feed features, of shape\n`[batch_size] + x.shape[1:]`.\nbatch_labels: `Tensor` feed of labels, of shape\n`[batch_size] + y.shape[1:]`.", "source": "juraj-google-style"} {"code": "def datasets_delete(self, dataset_name, delete_contents):\n url = (Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name))\n args = {}\n if delete_contents:\n args['deleteContents'] = True\n return datalab.utils.Http.request(url, method='DELETE', args=args, credentials=self._credentials, raw_response=True)", "docstring": "Issues a request to delete a dataset.\n\nArgs:\ndataset_name: the name of the dataset to delete.\ndelete_contents: if True, any tables in the dataset will be deleted. If False and the\ndataset is non-empty an exception will be raised.\nReturns:\nA parsed result object.\nRaises:\nException if there is an error performing the operation.", "source": "codesearchnet"} {"code": "def get_aws_regions(*, force=False):\n \n from cloud_inquisitor.config import dbconfig\n global __regions\n\n if force or not __regions:\n logger.debug('Loading list of AWS regions from static data')\n data = requests.get('https:\n rgx = re.compile(dbconfig.get('ignored_aws_regions_regexp', default='(^cn-|GLOBAL|-gov)'), re.I)\n __regions = sorted(list({x['region'] for x in data['prefixes'] if not rgx.search(x['region'])}))\n\n return __regions", "docstring": "Load a list of AWS regions from the AWS static data.\n\nArgs:\nforce (`bool`): Force fetch list of regions even if we already have a cached version\n\nReturns:\n:obj:`list` of `str`", "source": "juraj-google-style"} {"code": "def start_greedy_ensemble_search(automated_run, session, path):\n module = functions.import_string_code_as_module(automated_run.source)\n assert (module.metric_to_optimize in automated_run.base_learner_origin.metric_generators)\n best_ensemble = []\n secondary_learner = automated_run.base_learner_origin.return_estimator()\n secondary_learner.set_params(**module.secondary_learner_hyperparameters)\n for i in range(module.max_num_base_learners):\n best_score = (- float('inf'))\n current_ensemble = best_ensemble[:]\n for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all():\n if (base_learner in current_ensemble):\n continue\n current_ensemble.append(base_learner)\n existing_ensemble = session.query(models.StackedEnsemble).filter_by(base_learner_origin_id=automated_run.base_learner_origin.id, secondary_learner_hyperparameters=secondary_learner.get_params(), base_learner_ids=sorted([bl.id for bl in current_ensemble])).first()\n if (existing_ensemble and (existing_ensemble.job_status == 'finished')):\n score = existing_ensemble.individual_score[module.metric_to_optimize]\n elif (existing_ensemble and (existing_ensemble.job_status != 'finished')):\n eval_stacked_ensemble(existing_ensemble, session, path)\n score = existing_ensemble.individual_score[module.metric_to_optimize]\n else:\n stacked_ensemble = models.StackedEnsemble(secondary_learner_hyperparameters=secondary_learner.get_params(), base_learners=current_ensemble, base_learner_origin=automated_run.base_learner_origin, job_status='started')\n session.add(stacked_ensemble)\n session.commit()\n eval_stacked_ensemble(stacked_ensemble, session, path)\n score = stacked_ensemble.individual_score[module.metric_to_optimize]\n score = ((- score) if module.invert_metric else score)\n if (best_score < score):\n best_score = score\n best_ensemble = current_ensemble[:]\n current_ensemble.pop()", "docstring": "Starts an automated ensemble search using greedy forward model selection.\n\nThe steps for this search are adapted from \"Ensemble Selection from Libraries of Models\" by\nCaruana.\n\n1. Start with the empty ensemble\n\n2. Add to the ensemble the model in the library that maximizes the ensemmble's\nperformance on the error metric.\n\n3. Repeat step 2 for a fixed number of iterations or until all models have been used.\n\nArgs:\nautomated_run (xcessiv.models.AutomatedRun): Automated run object\n\nsession: Valid SQLAlchemy session\n\npath (str, unicode): Path to project folder", "source": "codesearchnet"} {"code": "def __init__(self, cache_file=settings.CACHE_FILE):\n \n self.cache_file = cache_file\n \n \n self.snip_ctimes = {}\n reset_cache = False\n if os.path.exists(self.cache_file):\n self.cache = yaml_loader.YamlLoader.load_yaml_by_path(cache_file) or {}\n if self.cache.get('version', '0.0.0') != devassistant.__version__:\n reset_cache = True\n else:\n if not os.path.exists(os.path.dirname(cache_file)):\n os.makedirs(os.path.dirname(cache_file))\n reset_cache = True\n\n \n \n if reset_cache:\n f = open(cache_file, 'w')\n self.cache = {'version': devassistant.__version__}\n f.close()", "docstring": "Inits a cache objects with given cache_file. Creates the cache file if\nit doesn't exist. If cache_file exists, but was created with different\nDevAssistant version, it gets deleted.\n\nArgs:\ncache_file: cache file to use", "source": "juraj-google-style"} {"code": "def update_expected_keys(self, model, expected_keys: List[str], loaded_keys: List[str]) -> List[str]:\n return expected_keys", "docstring": "Override this method if you want to adjust the `update_expected_keys`.\n\nArgs:\nexpected_keys (`List[str]`, *optional*):\nThe list of the expected keys in the initialized model.\nloaded_keys (`List[str]`, *optional*):\nThe list of the loaded keys in the checkpoint.", "source": "github-repos"} {"code": "def preprocess_for_eval(image, image_size=224, normalize=True):\n \n if normalize: image = tf.to_float(image) / 255.0\n image = _do_scale(image, image_size + 32)\n if normalize: image = _normalize(image)\n image = _center_crop(image, image_size)\n image = tf.reshape(image, [image_size, image_size, 3])\n return image", "docstring": "Preprocesses the given image for evaluation.\n\nArgs:\nimage: `Tensor` representing an image of arbitrary size.\nimage_size: int, how large the output image should be.\nnormalize: bool, if True the image is normalized.\n\nReturns:\nA preprocessed image `Tensor`.", "source": "juraj-google-style"} {"code": "def movies_opening(self, **kwargs):\n \n path = self._get_path('movies_opening')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Gets the current opening movies from the API.\n\nArgs:\nlimit (optional): limits the number of movies returned, default=10\ncountry (optional): localized data for selected country, default=\"us\"\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "juraj-google-style"} {"code": "def __init__(self, name, aliases=None, description=None, urls=None):\n \n super(StructureFamilyDefinition, self).__init__(\n name, aliases=aliases, description=description, urls=urls)\n self.members = []\n self.runtime = None", "docstring": "Initializes a structure family data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"} {"code": "def convert_dicts(d, to_class=AttrDictWrapper, from_class=dict):\n d_ = to_class()\n for (key, value) in d.iteritems():\n if isinstance(value, from_class):\n d_[key] = convert_dicts(value, to_class=to_class, from_class=from_class)\n else:\n d_[key] = value\n return d_", "docstring": "Recursively convert dict and UserDict types.\n\nNote that `d` is unchanged.\n\nArgs:\nto_class (type): Dict-like type to convert values to, usually UserDict\nsubclass, or dict.\nfrom_class (type): Dict-like type to convert values from. If a tuple,\nmultiple types are converted.\n\nReturns:\nConverted data as `to_class` instance.", "source": "codesearchnet"} {"code": "def __init__(self, schema, uid):\n \n message = 'UID \"{}\" is not of the schema \"{}\".'.format(uid, schema)\n super(SchemaUIDConflict, self).__init__(message)", "docstring": "Exception raised when a UID is not matching provided schema.\n\nArgs:\nschema (string): given schema\nuid (string): UID which conflicts the schema", "source": "juraj-google-style"} {"code": "def __get_merged_api_info(self, services):\n \n merged_api_info = services[0].api_info\n\n \n \n for service in services[1:]:\n if not merged_api_info.is_same_api(service.api_info):\n raise api_exceptions.ApiConfigurationError(\n _MULTICLASS_MISMATCH_ERROR_TEMPLATE % (service.api_info.name,\n service.api_info.api_version))\n\n return merged_api_info", "docstring": "Builds a description of an API.\n\nArgs:\nservices: List of protorpc.remote.Service instances implementing an\napi/version.\n\nReturns:\nThe _ApiInfo object to use for the API that the given services implement.\n\nRaises:\nApiConfigurationError: If there's something wrong with the API\nconfiguration, such as a multiclass API decorated with different API\ndescriptors (see the docstring for api()).", "source": "juraj-google-style"} {"code": "def assert_not_almost_equal(first, second, places=None, msg=None, delta=None, extras=None):\n _call_unittest_assertion(_pyunit_proxy.assertNotAlmostEqual, first, second, places=places, msg=msg, delta=delta, extras=extras)", "docstring": "Asserts that first is not almost equal to second.\n\nArgs:\nfirst: The first value to compare.\nsecond: The second value to compare.\nplaces: How many decimal places to take into account for comparison.\nNote that decimal places (from zero) are usually not the same\nas significant digits (measured from the most significant digit).\nmsg: A string that adds additional info about the failure.\ndelta: Delta to use for comparison instead of decimal places.\nextras: An optional field for extra information to be included in\ntest result.", "source": "github-repos"} {"code": "def read(path):\n \n if fs.exists(path):\n with open(path) as infile:\n components = infile.read().split()\n pid = int(components[0])\n date = datetime.date.fromtimestamp(float(components[1]))\n return pid, date\n else:\n return None, None", "docstring": "Read the contents of a LockFile.\n\nArguments:\npath (str): Path to lockfile.\n\nReturns:\nTuple(int, datetime): The integer PID of the lock owner, and the\ndate the lock was required. If the lock is not claimed, both\nvalues are None.", "source": "juraj-google-style"} {"code": "def parent_callback(self, parent_fu):\n \n if parent_fu.done() is True:\n e = parent_fu._exception\n if e:\n super().set_exception(e)\n else:\n super().set_result(self.file_obj)\n return", "docstring": "Callback from executor future to update the parent.\n\nArgs:\n- parent_fu (Future): Future returned by the executor along with callback\n\nReturns:\n- None\n\nUpdates the super() with the result() or exception()", "source": "juraj-google-style"} {"code": "def foo(x: int, *args, y: str, **kwargs) -> float:\n del x, y, args, kwargs", "docstring": "A function.\n\nArgs:\nx: Input 1.\n*args: Variable positional args.\ny: Input 2.\n**kwargs: Variable keyword args.\n\nReturns:\nThe result.", "source": "github-repos"} {"code": "def _lower_if_str(item):\n \n \n try:\n string_type = basestring\n except NameError:\n string_type = str\n\n if isinstance(item, string_type):\n return item.lower()\n\n return item", "docstring": "Try to convert item to lowercase, if it is string.\n\nArgs:\nitem (obj): Str, unicode or any other object.\n\nReturns:\nobj: ``item.lower()`` if `item` is ``str`` or ``unicode``, else just \\\n`item` itself.", "source": "juraj-google-style"} {"code": "def backup_value(self, value, up_to):\n self.N += 1\n self.W += value\n if ((self.parent is None) or (self is up_to)):\n return\n self.parent.backup_value(value, up_to)", "docstring": "Propagates a value estimation up to the root node.\n\nArgs:\nvalue: the value to be propagated (1 = black wins, -1 = white wins)\nup_to: the node to propagate until.", "source": "codesearchnet"} {"code": "def __init__(self, key, value, timeout):\n \n self.key = key\n self.value = value\n self.expiration = time.clock() * 1000 + timeout", "docstring": "Creates instance of the cache entry.\n\nArgs:\nkey: the unique key used to identify and locate the value.\nvalue: the cached value.\ntimeout: time to live for the object in milliseconds", "source": "juraj-google-style"} {"code": "def delete(self, context_id, address_list):\n if (context_id not in self._contexts):\n return False\n context = self._contexts[context_id]\n for add in address_list:\n if (not self.address_is_valid(address=add)):\n raise AuthorizationException(address=add)\n context.delete_direct(address_list)\n return True", "docstring": "Delete the values associated with list of addresses, for a specific\ncontext referenced by context_id.\n\nArgs:\ncontext_id (str): the return value of create_context, referencing\na particular context.\naddress_list (list): a list of address strs\n\nReturns:\n(bool): True if the operation is successful, False if\nthe context_id doesn't reference a known context.\n\nRaises:\nAuthorizationException: Raised when an address in address_list is\nnot authorized either by not being in the inputs for the\ntxn associated with this context, or it is under a namespace\nbut the characters that are under the namespace are not valid\naddress characters.", "source": "codesearchnet"} {"code": "def arg_types(parsed: Parsed, errors: Errors) -> Tuple[(Parsed, Errors)]:\n func_pattern = re.compile('\\\\s*[a-zA-Z]+\\\\(')\n nsarg_pattern = re.compile('^\\\\s*([A-Z]+):(.*?)\\\\s*$')\n for span in parsed:\n if ((parsed[span]['type'] != 'Function') or ('parens_span' not in parsed[span])):\n continue\n for (i, arg) in enumerate(parsed[span]['args']):\n nsarg_matches = nsarg_pattern.match(arg['arg'])\n if func_pattern.match(arg['arg']):\n parsed[span]['args'][i].update({'type': 'Function'})\n elif nsarg_matches:\n (start, end) = arg['span']\n ns = nsarg_matches.group(1)\n ns_val = nsarg_matches.group(2)\n ns_span = nsarg_matches.span(1)\n ns_span = ((ns_span[0] + start), ((ns_span[1] + start) - 1))\n ns_val_span = nsarg_matches.span(2)\n ns_val_span = ((ns_val_span[0] + start), ((ns_val_span[1] + start) - 1))\n parsed[span]['args'][i].update({'type': 'NSArg', 'ns': ns, 'ns_span': ns_span, 'ns_val': ns_val, 'ns_val_span': ns_val_span})\n else:\n parsed[span]['args'][i].update({'type': 'StrArg'})\n return (parsed, errors)", "docstring": "Add argument types to parsed function data structure\n\nArgs:\nparsed: function and arg locations in BEL string\nerrors: error messages\n\nReturns:\n(parsed, errors): parsed, arguments with arg types plus error messages", "source": "codesearchnet"} {"code": "def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\nEmbedded representation of the inputs. Should be float, not int tokens.\nattention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\nMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n- 1 for tokens that are **not masked**,\n- 0 for tokens that are **masked**.\n\n[What are attention masks?](../glossary#attention-mask)\noutput_attentions (`bool`, *optional*):\nWhether or not to return the attentions tensors of all attention layers. See `attentions` under\nreturned tensors for more detail.\noutput_hidden_states (`bool`, *optional*):\nWhether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\nfor more detail.\nreturn_dict (`bool`, *optional*):\nWhether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.", "source": "github-repos"} {"code": "def _kl_categorical_categorical(a, b, name=None):\n with ops.name_scope(name, 'kl_categorical_categorical', values=[a.logits, b.logits]):\n delta_log_probs1 = nn_ops.log_softmax(a.logits) - nn_ops.log_softmax(b.logits)\n return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1, axis=-1)", "docstring": "Calculate the batched KL divergence KL(a || b) with a and b Categorical.\n\nArgs:\na: instance of a Categorical distribution object.\nb: instance of a Categorical distribution object.\nname: (optional) Name to use for created operations.\ndefault is \"kl_categorical_categorical\".\n\nReturns:\nBatchwise KL(a || b)", "source": "github-repos"} {"code": "def format_config(sensor_graph):\n \n\n cmdfile = CommandFile(\"Config Variables\", \"1.0\")\n\n for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()):\n for conf_var, conf_def in sorted(sensor_graph.config_database[slot].items()):\n conf_type, conf_val = conf_def\n\n if conf_type == 'binary':\n conf_val = 'hex:' + hexlify(conf_val)\n\n cmdfile.add(\"set_variable\", slot, conf_var, conf_type, conf_val)\n\n return cmdfile.dump()", "docstring": "Extract the config variables from this sensor graph in ASCII format.\n\nArgs:\nsensor_graph (SensorGraph): the sensor graph that we want to format\n\nReturns:\nstr: The ascii output lines concatenated as a single string", "source": "juraj-google-style"} {"code": "def easeInOutElastic(n, amplitude=1, period=0.5):\n \n _checkRange(n)\n n *= 2\n if n < 1:\n return easeInElastic(n, amplitude=amplitude, period=period) / 2\n else:\n return easeOutElastic(n-1, amplitude=amplitude, period=period) / 2 + 0.5", "docstring": "An elastic tween function wobbles towards the midpoint.\n\nArgs:\nn (float): The time progress, starting at 0.0 and ending at 1.0.\n\nReturns:\n(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().", "source": "juraj-google-style"} {"code": "def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):\n \n return Kpoints(\"Automatic kpoint scheme\", 0,\n Kpoints.supported_modes.Gamma, kpts=[kpts],\n kpts_shift=shift)", "docstring": "Convenient static constructor for an automatic Gamma centered Kpoint\ngrid.\n\nArgs:\nkpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice\nvectors. Defaults to (1,1,1)\nshift: Shift to be applied to the kpoints. Defaults to (0,0,0).\n\nReturns:\nKpoints object", "source": "juraj-google-style"} {"code": "def _pad_images(self, images: 'torch.Tensor'):\n height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)\n pad_height = int(np.sqrt(height / 2) * 3)\n pad_width = int(np.sqrt(width / 2) * 3)\n return F.pad(images, padding=(pad_width, pad_height), padding_mode='reflect')", "docstring": "Args:\nimage (`torch.Tensor`):\nImage to pad.", "source": "github-repos"} {"code": "def bottleneck_block_v1(cnn, depth, depth_bottleneck, stride):\n input_layer = cnn.top_layer\n in_size = cnn.top_size\n name_key = 'resnet_v1'\n name = (name_key + str(cnn.counts[name_key]))\n cnn.counts[name_key] += 1\n with tf.variable_scope(name):\n if (depth == in_size):\n if (stride == 1):\n shortcut = input_layer\n else:\n shortcut = cnn.apool(1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size)\n else:\n shortcut = cnn.conv(depth, 1, 1, stride, stride, activation=None, use_batch_norm=True, input_layer=input_layer, num_channels_in=in_size, bias=None)\n cnn.conv(depth_bottleneck, 1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size, use_batch_norm=True, bias=None)\n cnn.conv(depth_bottleneck, 3, 3, 1, 1, mode='SAME_RESNET', use_batch_norm=True, bias=None)\n res = cnn.conv(depth, 1, 1, 1, 1, activation=None, use_batch_norm=True, bias=None)\n output = tf.nn.relu((shortcut + res))\n cnn.top_layer = output\n cnn.top_size = depth", "docstring": "Bottleneck block with identity short-cut for ResNet v1.\n\nArgs:\ncnn: the network to append bottleneck blocks.\ndepth: the number of output filters for this bottleneck block.\ndepth_bottleneck: the number of bottleneck filters for this block.\nstride: Stride used in the first layer of the bottleneck block.", "source": "codesearchnet"} {"code": "def patch_on_value(src: symbolic.Symbolic, old_value: Any, value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:\n return _conditional_patch(src, lambda k, v, p: v == old_value, value, value_fn, skip_notification)", "docstring": "Recursively patch values on matched values.\n\nExample::\n\nd = pg.Dict(a={'x': 1}, b=1)\nprint(pg.patching.patch_on_value(d, 1, value=3))\n# {a={x=3}, b=3}\n\nArgs:\nsrc: symbolic value to patch.\nold_value: Old value to match.\nvalue: New value for field that satisfy `condition`.\nvalue_fn: Callable object that produces new value based on old value.\nIf not None, `value` must be None.\nskip_notification: If True, `on_change` event will not be triggered for this\noperation. If None, the behavior is decided by `pg.notify_on_rebind`.\nPlease see `symbolic.Symbolic.rebind` for details.\n\nReturns:\n`src` after being patched.", "source": "github-repos"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not\nmake use of token type ids, therefore a list of zeros is returned.\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"} {"code": "def _CanSkipDataStream(self, file_entry, data_stream):\n \n if file_entry.IsFile():\n return False\n\n if data_stream.IsDefault():\n return True\n\n return False", "docstring": "Determines if analysis and extraction of a data stream can be skipped.\n\nThis is used to prevent Plaso trying to run analyzers or extract content\nfrom a pipe or socket it encounters while processing a mounted filesystem.\n\nArgs:\nfile_entry (dfvfs.FileEntry): file entry to consider for skipping.\ndata_stream (dfvfs.DataStream): data stream to consider for skipping.\n\nReturns:\nbool: True if the data stream can be skipped.", "source": "juraj-google-style"} {"code": "def grep(self, regex):\n matches = []\n logger.debug(\"Searching student files for '{0}'\".format(regex))\n for fname in self.student_files:\n if os.path.isfile((self.working_dir + fname)):\n for line in open((self.working_dir + fname), 'br'):\n if re.search(regex.encode(), line):\n logger.debug(\"{0} contains '{1}'\".format(fname, regex))\n matches.append(fname)\n return matches", "docstring": "Scans the student files for text patterns.\n\nArgs:\nregex (str): Regular expression used for scanning inside the files.\n\nReturns:\ntuple: Names of the matching files in the working directory.", "source": "codesearchnet"} {"code": "def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):\n if (domain not in self.domains):\n self.register_domain(domain=domain)\n self.domains[domain].register_entity(entity_value=entity_value, entity_type=entity_type, alias_of=alias_of)", "docstring": "Register an entity to be tagged in potential parse results.\n\nArgs:\nentity_value(str): the value/proper name of an entity instance\n(Ex: \"The Big Bang Theory\")\nentity_type(str): the type/tag of an entity instance (Ex: \"Television Show\")\ndomain(str): a string representing the domain you wish to add the entity to", "source": "codesearchnet"} {"code": "def _mask(self, tensor, length, padding_value=0):\n \n with tf.name_scope('mask'):\n range_ = tf.range(tensor.shape[1].value)\n mask = range_[None, :] < length[:, None]\n if tensor.shape.ndims > 2:\n for _ in range(tensor.shape.ndims - 2):\n mask = mask[..., None]\n mask = tf.tile(mask, [1, 1] + tensor.shape[2:].as_list())\n masked = tf.where(mask, tensor, padding_value * tf.ones_like(tensor))\n return tf.check_numerics(masked, 'masked')", "docstring": "Set padding elements of a batch of sequences to a constant.\n\nUseful for setting padding elements to zero before summing along the time\ndimension, or for preventing infinite results in padding elements.\n\nArgs:\ntensor: Tensor of sequences.\nlength: Batch of sequence lengths.\npadding_value: Value to write into padding elements.\n\nReturns:\nMasked sequences.", "source": "juraj-google-style"} {"code": "def value(self, row, col, device_name_filter=None, node_name_filter=None, op_type_filter=None):\n menu_item = None\n if col == 0:\n text = self._profile_datum_list[row].node_exec_stats.node_name\n elif col == 1:\n text = self._profile_datum_list[row].op_type\n elif col == 2:\n text = str(self.formatted_start_time[row])\n elif col == 3:\n text = str(self.formatted_op_time[row])\n elif col == 4:\n text = str(self.formatted_exec_time[row])\n elif col == 5:\n command = 'ps'\n if device_name_filter:\n command += ' --%s %s' % (_DEVICE_NAME_FILTER_FLAG, device_name_filter)\n if node_name_filter:\n command += ' --%s %s' % (_NODE_NAME_FILTER_FLAG, node_name_filter)\n if op_type_filter:\n command += ' --%s %s' % (_OP_TYPE_FILTER_FLAG, op_type_filter)\n command += ' %s --init_line %d' % (self._profile_datum_list[row].file_path, self._profile_datum_list[row].line_number)\n menu_item = debugger_cli_common.MenuItem(None, command)\n text = self._profile_datum_list[row].file_line_func\n else:\n raise IndexError('Invalid column index %d.' % col)\n return RL(text, font_attr=menu_item)", "docstring": "Get the content of a cell of the table.\n\nArgs:\nrow: (int) row index.\ncol: (int) column index.\ndevice_name_filter: Regular expression to filter by device name.\nnode_name_filter: Regular expression to filter by node name.\nop_type_filter: Regular expression to filter by op type.\n\nReturns:\nA debuggre_cli_common.RichLine object representing the content of the\ncell, potentially with a clickable MenuItem.\n\nRaises:\nIndexError: if row index is out of range.", "source": "github-repos"} {"code": "def make_es_id(uri):\n try:\n uri = uri.clean_uri\n except AttributeError:\n pass\n return sha1(uri.encode()).hexdigest()", "docstring": "Creates the id based off of the uri value\n\nArgs:\n-----\nuri: the uri to conver to an elasticsearch id", "source": "codesearchnet"} {"code": "def box_to_center_and_scale(box: Union[Tuple, List, np.ndarray], image_width: int, image_height: int, normalize_factor: float=200.0, padding_factor: float=1.25):\n top_left_x, top_left_y, width, height = box[:4]\n aspect_ratio = image_width / image_height\n center = np.array([top_left_x + width * 0.5, top_left_y + height * 0.5], dtype=np.float32)\n if width > aspect_ratio * height:\n height = width * 1.0 / aspect_ratio\n elif width < aspect_ratio * height:\n width = height * aspect_ratio\n scale = np.array([width / normalize_factor, height / normalize_factor], dtype=np.float32)\n scale = scale * padding_factor\n return (center, scale)", "docstring": "Encodes a bounding box in COCO format into (center, scale).\n\nArgs:\nbox (`Tuple`, `List`, or `np.ndarray`):\nBounding box in COCO format (top_left_x, top_left_y, width, height).\nimage_width (`int`):\nImage width.\nimage_height (`int`):\nImage height.\nnormalize_factor (`float`):\nWidth and height scale factor.\npadding_factor (`float`):\nBounding box padding factor.\n\nReturns:\ntuple: A tuple containing center and scale.\n\n- `np.ndarray` [float32](2,): Center of the bbox (x, y).\n- `np.ndarray` [float32](2,): Scale of the bbox width & height.", "source": "github-repos"} {"code": "def _print_results(filename, data):\n \n if filename:\n with open(filename, 'wb') as f:\n f.write(data)\n else:\n print data", "docstring": "Print data to a file or STDOUT.\n\nArgs:\nfilename (str or None): If None, print to STDOUT; otherwise, print\nto the file with this name.\ndata (str): Data to print.", "source": "juraj-google-style"} {"code": "def get_ip_address_country(ip_address, parallel=False):\n\n def download_country_database(location='GeoLite2-Country.mmdb'):\n 'Downloads the MaxMind Geolite2 Country database\\n\\n Args:\\n location (str): Local location for the database file\\n '\n if parallel:\n logging.warning('Cannot download GeoIP database in parallel mode')\n return\n url = 'https:\n headers = {'User-Agent': USER_AGENT}\n original_filename = 'GeoLite2-Country.mmdb'\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n tar_bytes = response.content\n tar_file = tarfile.open(fileobj=BytesIO(tar_bytes), mode='r:gz')\n tar_dir = tar_file.getnames()[0]\n tar_path = '{0}/{1}'.format(tar_dir, original_filename)\n tar_file.extract(tar_path)\n shutil.move(tar_path, location)\n shutil.rmtree(tar_dir)\n except Exception as e:\n logger.warning('Error downloading {0}: {1}'.format(url, e.__str__()))\n system_paths = ['GeoLite2-Country.mmdb', '/usr/local/share/GeoIP/GeoLite2-Country.mmdb', '/usr/share/GeoIP/GeoLite2-Country.mmdb', '/var/lib/GeoIP/GeoLite2-Country.mmdb', '/var/local/lib/GeoIP/GeoLite2-Country.mmdb', 'C:\\\\GeoIP\\\\GeoLite2-Country.mmdb']\n db_path = None\n for system_path in system_paths:\n if os.path.exists(system_path):\n db_path = system_path\n break\n if (db_path is None):\n db_path = os.path.join(tempdir, 'GeoLite2-Country.mmdb')\n if (not os.path.exists(db_path)):\n download_country_database(db_path)\n if (not os.path.exists(db_path)):\n return None\n else:\n db_age = (datetime.now() - datetime.fromtimestamp(os.stat(db_path).st_mtime))\n if (db_age > timedelta(days=7)):\n download_country_database()\n db_path = db_path\n db_reader = geoip2.database.Reader(db_path)\n country = None\n try:\n country = db_reader.country(ip_address).country.iso_code\n except geoip2.errors.AddressNotFoundError:\n pass\n return country", "docstring": "Uses the MaxMind Geolite2 Country database to return the ISO code for the\ncountry associated with the given IPv4 or IPv6 address\n\nArgs:\nip_address (str): The IP address to query for\nparallel (bool): Parallel processing\n\nReturns:\nstr: And ISO country code associated with the given IP address", "source": "codesearchnet"} {"code": "def element_at(self, index):\n \n if self.closed():\n raise ValueError(\"Attempt to call element_at() on a \"\n \"closed Queryable.\")\n\n if index < 0:\n raise OutOfRangeError(\"Attempt to use negative index.\")\n\n \n try:\n return self._iterable[index]\n except IndexError:\n raise OutOfRangeError(\"Index out of range.\")\n except TypeError:\n pass\n\n \n for i, item in enumerate(self):\n if i == index:\n return item\n raise OutOfRangeError(\"element_at(index) out of range.\")", "docstring": "Return the element at ordinal index.\n\nNote: This method uses immediate execution.\n\nArgs:\nindex: The index of the element to be returned.\n\nReturns:\nThe element at ordinal index in the source sequence.\n\nRaises:\nValueError: If the Queryable is closed().\nValueError: If index is out of range.", "source": "juraj-google-style"} {"code": "def _parse_testcase(testcase, project_mapping, session_variables_set=None):\n \n testcase.setdefault(\"config\", {})\n prepared_config = __prepare_config(\n testcase[\"config\"],\n project_mapping,\n session_variables_set\n )\n prepared_testcase_tests = __prepare_testcase_tests(\n testcase[\"teststeps\"],\n prepared_config,\n project_mapping,\n session_variables_set\n )\n return {\n \"config\": prepared_config,\n \"teststeps\": prepared_testcase_tests\n }", "docstring": "parse testcase\n\nArgs:\ntestcase (dict):\n{\n\"config\": {},\n\"teststeps\": []\n}", "source": "juraj-google-style"} {"code": "def _ws_on_open(self, ws: websocket.WebSocketApp):\n \n payload = {\n 'op': WebSocketEvent.IDENTIFY.value,\n 'd': {\n 'token': self.token,\n 'properties': {\n '$os': sys.platform,\n '$browser': 'Pycord',\n '$device': 'Pycord',\n '$referrer': '',\n '$referring_domain': ''\n },\n 'compress': True,\n 'large_threshold': 250\n }\n }\n self.logger.debug('Sending identify payload')\n ws.send(json.dumps(payload))\n self.connected = True", "docstring": "Callback for sending the initial authentication data\n\nThis \"payload\" contains the required data to authenticate this websocket\nclient as a suitable bot connection to the Discord websocket.\n\nArgs:\nws: websocket connection", "source": "juraj-google-style"} {"code": "def change_disk_usage(self, usage_change, file_path, st_dev):\n \n mount_point = self._mount_point_for_device(st_dev)\n if mount_point:\n total_size = mount_point['total_size']\n if total_size is not None:\n if total_size - mount_point['used_size'] < usage_change:\n self.raise_io_error(errno.ENOSPC, file_path)\n mount_point['used_size'] += usage_change", "docstring": "Change the used disk space by the given amount.\n\nArgs:\nusage_change: Number of bytes added to the used space.\nIf negative, the used space will be decreased.\n\nfile_path: The path of the object needing the disk space.\n\nst_dev: The device ID for the respective file system.\n\nRaises:\nIOError: if usage_change exceeds the free file system space", "source": "juraj-google-style"} {"code": "def create_context(self, state_hash, base_contexts, inputs, outputs):\n \n\n for address in inputs:\n if not self.namespace_is_valid(address):\n raise CreateContextException(\n \"Address or namespace {} listed in inputs is not \"\n \"valid\".format(address))\n for address in outputs:\n if not self.namespace_is_valid(address):\n raise CreateContextException(\n \"Address or namespace {} listed in outputs is not \"\n \"valid\".format(address))\n\n addresses_to_find = [add for add in inputs if len(add) == 70]\n\n address_values, reads = self._find_address_values_in_chain(\n base_contexts=base_contexts,\n addresses_to_find=addresses_to_find)\n\n context = ExecutionContext(\n state_hash=state_hash,\n read_list=inputs,\n write_list=outputs,\n base_context_ids=base_contexts)\n\n contexts_asked_not_found = [cid for cid in base_contexts\n if cid not in self._contexts]\n if contexts_asked_not_found:\n raise KeyError(\n \"Basing a new context off of context ids {} \"\n \"that are not in context manager\".format(\n contexts_asked_not_found))\n\n context.create_initial(address_values)\n\n self._contexts[context.session_id] = context\n\n if reads:\n context.create_prefetch(reads)\n self._address_queue.put_nowait(\n (context.session_id, state_hash, reads))\n return context.session_id", "docstring": "Create a ExecutionContext to run a transaction against.\n\nArgs:\nstate_hash: (str): Merkle root to base state on.\nbase_contexts (list of str): Context ids of contexts that will\nhave their state applied to make this context.\ninputs (list of str): Addresses that can be read from.\noutputs (list of str): Addresses that can be written to.\nReturns:\ncontext_id (str): the unique context_id of the session", "source": "juraj-google-style"} {"code": "def __init__(self, adgroup_id):\n \n \n \n \n \n \n \n \n \n self.next_id = -1\n \n self.operations = []\n self.adgroup_id = adgroup_id", "docstring": "Initializer.\n\nArgs:\nadgroup_id: The ID of the AdGroup that we wish to attach the partition\ntree to.", "source": "juraj-google-style"} {"code": "def _subscribe_extend(tensor, side_effects):\n assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(tensor.op.name)\n source_tensor = tensor.op.inputs[0]\n outs = []\n name_scope = source_tensor.op.name + '/subscription/'\n with ops.name_scope(name_scope):\n for s in side_effects:\n outs += s(source_tensor)\n out_ops = [out.op if isinstance(out, tensor_lib.Tensor) else out for out in outs]\n tensor.op._add_control_inputs(out_ops)\n return tensor", "docstring": "Helper method to extend the list of side_effects for a subscribed tensor.\n\nArgs:\ntensor: A `tf.Tensor` as returned by subscribe().\nside_effects: List of side_effect functions, see subscribe for details.\n\nReturns:\nThe given subscribed tensor (for API consistency).", "source": "github-repos"} {"code": "def _CheckLocation(self, file_entry, search_depth):\n if (self._location_segments is None):\n return False\n if ((search_depth < 0) or (search_depth > self._number_of_location_segments)):\n return False\n if (search_depth == 0):\n segment_name = ''\n else:\n segment_name = self._location_segments[(search_depth - 1)]\n if self._is_regex:\n if isinstance(segment_name, py2to3.STRING_TYPES):\n flags = (re.DOTALL | re.UNICODE)\n if (not self._is_case_sensitive):\n flags |= re.IGNORECASE\n try:\n segment_name = '^{0:s}$'.format(segment_name)\n segment_name = re.compile(segment_name, flags=flags)\n except sre_constants.error:\n return False\n self._location_segments[(search_depth - 1)] = segment_name\n elif (not self._is_case_sensitive):\n segment_name = segment_name.lower()\n self._location_segments[(search_depth - 1)] = segment_name\n if (search_depth > 0):\n if self._is_regex:\n if (not segment_name.match(file_entry.name)):\n return False\n elif self._is_case_sensitive:\n if (segment_name != file_entry.name):\n return False\n elif (segment_name != file_entry.name.lower()):\n return False\n return True", "docstring": "Checks the location find specification.\n\nArgs:\nfile_entry (FileEntry): file entry.\nsearch_depth (int): number of location path segments to compare.\n\nReturns:\nbool: True if the file entry matches the find specification, False if not.", "source": "codesearchnet"} {"code": "def tinsel(to_patch, module_name, decorator=mock_decorator):\n\n def fn_decorator(function):\n\n def wrapper(*args, **kwargs):\n with patch(to_patch, decorator):\n m = importlib.import_module(module_name)\n reload(m)\n function(*args, **kwargs)\n reload(m)\n return wrapper\n return fn_decorator", "docstring": "Decorator for simple in-place decorator mocking for tests\n\nArgs:\nto_patch: the string path of the function to patch\nmodule_name: complete string path of the module to reload\ndecorator (optional): replacement decorator. By default a pass-through\nwill be used.\n\nReturns:\nA wrapped test function, during the context of execution the specified\npath is patched.", "source": "codesearchnet"} {"code": "def merge_options(*options_list):\n if len(options_list) < 1:\n raise ValueError('At least one options should be provided')\n result_type = type(options_list[0])\n for options in options_list:\n if not isinstance(options, result_type):\n raise TypeError('Could not merge incompatible options of type {} and {}.'.format(type(options), result_type))\n if not isinstance(options_list[0], OptionsBase):\n raise TypeError('All options to be merged should inherit from `OptionsBase` but found option of type {} which does not.'.format(type(options_list[0])))\n default_options = result_type()\n result = result_type()\n for options in options_list:\n for name in options._options:\n this = getattr(result, name)\n that = getattr(options, name)\n default = getattr(default_options, name)\n if that == default:\n continue\n elif this == default:\n setattr(result, name, that)\n elif isinstance(this, OptionsBase):\n setattr(result, name, merge_options(this, that))\n elif name == 'framework_type':\n setattr(result, name, this + that)\n elif this != that:\n logging.warning('Changing the value of option %s from %r to %r.', name, this, that)\n setattr(result, name, that)\n return result", "docstring": "Merges the given options, returning the result as a new options object.\n\nThe input arguments are expected to have a matching type that derives from\n`tf.data.OptionsBase` (and thus each represent a set of options). The method\noutputs an object of the same type created by merging the sets of options\nrepresented by the input arguments.\n\nIf an option is set to different values by different options objects, the\nresult will match the setting of the options object that appears in the input\nlist last.\n\nIf an option is an instance of `tf.data.OptionsBase` itself, then this method\nis applied recursively to the set of options represented by this option.\n\nArgs:\n*options_list: options to merge\n\nRaises:\nTypeError: if the input arguments are incompatible or not derived from\n`tf.data.OptionsBase`\n\nReturns:\nA new options object which is the result of merging the given options.", "source": "github-repos"} {"code": "def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:\n if self.map:\n my_input_ids = self.assistant_overlap_token_ids[input_ids[0, -1]].unsqueeze(0).unsqueeze(0)\n else:\n self.map = True\n my_input_ids = input_ids\n return self.original_embedding(my_input_ids)", "docstring": "Args:\ninput_ids (torch.LongTensor): Tensor of token IDs (batch_size, seq_len).\n\nReturns:\ntorch.FloatTensor: Corresponding input embeddings.", "source": "github-repos"} {"code": "def fori_loop(lower, upper, body_fun, init_val):\n if any_symbolic_tensors((lower, upper, init_val)):\n return ForiLoop(lower, upper, body_fun).symbolic_call(init_val)\n return backend.core.fori_loop(lower, upper, body_fun, init_val)", "docstring": "For loop implementation.\n\nArgs:\nlower: The initial value of the loop variable.\nupper: The upper bound of the loop variable.\nbody_fun: A callable that represents the loop body. Must take two\narguments: the loop variable and the loop state. The loop state\nshould be updated and returned by this function.\ninit_val: The initial value of the loop state.\n\nReturns:\nThe final state after the loop.\n\nExample:\n\n>>> lower = 0\n>>> upper = 10\n>>> body_fun = lambda i, s: (i + 1, s + i)\n>>> init_val = 0\n>>> keras.ops.fori_loop(lower, upper, body_fun, init_val)\n45", "source": "github-repos"} {"code": "def get_icohp_dict_by_bondlengths(self, minbondlength=0.0, maxbondlength=8.0):\n \n newicohp_dict = {}\n for value in self._icohplist.values():\n if value._length >= minbondlength and value._length <= maxbondlength:\n newicohp_dict[value._label] = value\n return newicohp_dict", "docstring": "get a dict of IcohpValues corresponding to certaind bond lengths\nArgs:\nminbondlength: defines the minimum of the bond lengths of the bonds\nmaxbondlength: defines the maximum of the bond lengths of the bonds\nReturns:\ndict of IcohpValues, the keys correspond to the values from the initial list_labels", "source": "juraj-google-style"} {"code": "def get_csv(filename):\n \n check_if_this_file_exist(filename)\n\n \n filename = os.path.abspath(filename)\n s = command_line(['exiftool', '-G', '-csv', '-sort', filename])\n if s:\n \n s = s.decode('utf-8')\n return s\n else:\n return 0", "docstring": "Return a csv representation of the exif\n\nget a filename and returns a unicode string with a CSV format\n\nArguments:\nfilename {string} -- your filename\n\nReturns:\n[unicode] -- unicode string", "source": "juraj-google-style"} {"code": "def get_token_accuracy(targets, outputs, ignore_index=None):\n n_correct = 0.0\n n_total = 0.0\n for (target, output) in zip(targets, outputs):\n if ((not torch.is_tensor(target)) or is_scalar(target)):\n target = torch.LongTensor([target])\n if ((not torch.is_tensor(output)) or is_scalar(output)):\n output = torch.LongTensor([[output]])\n if (len(target.size()) != len(output.size())):\n prediction = output.max(dim=0)[0].view((- 1))\n else:\n prediction = output\n if (ignore_index is not None):\n mask = target.ne(ignore_index)\n n_correct += prediction.eq(target).masked_select(mask).sum().item()\n n_total += mask.sum().item()\n else:\n n_total += len(target)\n n_correct += prediction.eq(target).sum().item()\n return ((n_correct / n_total), n_correct, n_total)", "docstring": "Get the accuracy token accuracy between two tensors.\n\nArgs:\ntargets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure\nsaccuracy\noutputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector\nignore_index (int, optional): Specifies a target index that is ignored\n\nReturns:\n:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and\ntotal (:class:`int`)\n\nExample:\n\n>>> import torch\n>>> from torchnlp.metrics import get_token_accuracy\n>>> targets = torch.LongTensor([[1, 1], [2, 2], [3, 3]])\n>>> outputs = torch.LongTensor([[1, 1], [2, 3], [4, 4]])\n>>> accuracy, n_correct, n_total = get_token_accuracy(targets, outputs, ignore_index=3)\n>>> accuracy\n0.75\n>>> n_correct\n3.0\n>>> n_total\n4.0", "source": "codesearchnet"} {"code": "def AddArg(self, argument):\n \n self.args.append(argument)\n if len(self.args) > self.number_of_args:\n raise errors.ParseError('Too many arguments for this expression.')\n\n elif len(self.args) == self.number_of_args:\n return True\n\n return False", "docstring": "Adds a new argument to this expression.\n\nArgs:\nargument (str): argument to add.\n\nReturns:\nTrue if the argument is the last argument, False otherwise.\n\nRaises:\nParseError: If there are too many arguments.", "source": "juraj-google-style"} {"code": "def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):\n return ops.get_collection(loss_collection, scope)", "docstring": "Gets the list of losses from the loss_collection.\n\nArgs:\nscope: An optional scope name for filtering the losses to return.\nloss_collection: Optional losses collection.\n\nReturns:\na list of loss tensors.", "source": "github-repos"} {"code": "def extract_possible_actions(self, state_key):\n \n if state_key in self.__state_action_list_dict:\n return self.__state_action_list_dict[state_key]\n else:\n action_list = []\n state_key_list = [action_list.extend(self.__state_action_list_dict[k]) for k in self.__state_action_list_dict.keys() if len([s for s in state_key if s in k]) > 0]\n return action_list", "docstring": "Concreat method.\n\nArgs:\nstate_key The key of state. this value is point in map.\n\nReturns:\n[(x, y)]", "source": "juraj-google-style"} {"code": "def create_graph_from_data(self, data, **kwargs):\n self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test]\n self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep]\n self.arguments['{DIRECTED}'] = 'TRUE'\n self.arguments['{ALPHA}'] = str(self.alpha)\n self.arguments['{NJOBS}'] = str(self.nb_jobs)\n self.arguments['{VERBOSE}'] = str(self.verbose).upper()\n results = self._run_pc(data, verbose=self.verbose)\n return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})", "docstring": "Run the PC algorithm.\n\nArgs:\ndata (pandas.DataFrame): DataFrame containing the data\n\nReturns:\nnetworkx.DiGraph: Solution given by PC on the given data.", "source": "codesearchnet"} {"code": "def get_gated_grpc_tensors(self, matching_debug_op=None):\n \n with self._grpc_gated_lock:\n matching_debug_op = matching_debug_op or 'DebugIdentity'\n if matching_debug_op not in self._grpc_gated_tensors:\n \n node_name_to_op_type = dict(\n (node.name, node.op) for node in self._graph_def.node)\n\n \n gated = []\n for node in self._graph_def.node:\n if node.op == matching_debug_op:\n for attr_key in node.attr:\n if attr_key == 'gated_grpc' and node.attr[attr_key].b:\n node_name, output_slot, _, debug_op = (\n debug_graphs.parse_debug_node_name(node.name))\n gated.append(\n (node_name, node_name_to_op_type[node_name], output_slot,\n debug_op))\n break\n self._grpc_gated_tensors[matching_debug_op] = gated\n\n return self._grpc_gated_tensors[matching_debug_op]", "docstring": "Extract all nodes with gated-gRPC debug ops attached.\n\nUses cached values if available.\nThis method is thread-safe.\n\nArgs:\ngraph_def: A tf.GraphDef proto.\nmatching_debug_op: Return tensors and nodes with only matching the\nspecified debug op name (optional). If `None`, will extract only\n`DebugIdentity` debug ops.\n\nReturns:\nA list of (node_name, op_type, output_slot, debug_op) tuples.", "source": "juraj-google-style"} {"code": "def erase(self):\n try:\n if (not self.halted()):\n self.halt()\n except errors.JLinkException:\n pass\n res = self._dll.JLINK_EraseChip()\n if (res < 0):\n raise errors.JLinkEraseException(res)\n return res", "docstring": "Erases the flash contents of the device.\n\nThis erases the flash memory of the target device. If this method\nfails, the device may be left in an inoperable state.\n\nArgs:\nself (JLink): the ``JLink`` instance\n\nReturns:\nNumber of bytes erased.", "source": "codesearchnet"} {"code": "def log_abs_determinant(self, name='log_abs_det'):\n if self.is_square is False:\n raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.')\n with self._name_scope(name):\n return self._log_abs_determinant()", "docstring": "Log absolute value of determinant for every batch member.\n\nArgs:\nname: A name for this `Op`.\n\nReturns:\n`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\nRaises:\nNotImplementedError: If `self.is_square` is `False`.", "source": "github-repos"} {"code": "def install(pkg, target='LocalSystem', store=False, allow_untrusted=False):\n \n if '*.' not in pkg:\n \n pkg = _quote(pkg)\n\n target = _quote(target)\n\n cmd = 'installer -pkg {0} -target {1}'.format(pkg, target)\n if store:\n cmd += ' -store'\n if allow_untrusted:\n cmd += ' -allowUntrusted'\n\n \n \n python_shell = False\n if '*.' in cmd:\n python_shell = True\n\n return __salt__['cmd.run_all'](cmd, python_shell=python_shell)", "docstring": "Install a pkg file\n\nArgs:\npkg (str): The package to install\ntarget (str): The target in which to install the package to\nstore (bool): Should the package be installed as if it was from the\nstore?\nallow_untrusted (bool): Allow the installation of untrusted packages?\n\nReturns:\ndict: A dictionary containing the results of the installation\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt '*' macpackage.install test.pkg", "source": "juraj-google-style"} {"code": "def _configure_from_module(self, item):\n package = None\n if (item[0] == '.'):\n package = self.import_name\n obj = importlib.import_module(item, package=package)\n self.config.from_object(obj)\n return self", "docstring": "Configure from a module by import path.\n\nEffectively, you give this an absolute or relative import path, it will\nimport it, and then pass the resulting object to\n``_configure_from_object``.\n\nArgs:\nitem (str):\nA string pointing to a valid import path.\n\nReturns:\nfleaker.App:\nReturns itself.", "source": "codesearchnet"} {"code": "def __init__(\n self, maximum_number_of_tasks=_MAXIMUM_NUMBER_OF_TASKS, use_zeromq=True):\n \n super(TaskMultiProcessEngine, self).__init__()\n self._enable_sigsegv_handler = False\n self._filter_find_specs = None\n self._last_worker_number = 0\n self._maximum_number_of_tasks = maximum_number_of_tasks\n self._merge_task = None\n self._merge_task_on_hold = None\n self._number_of_consumed_event_tags = 0\n self._number_of_consumed_events = 0\n self._number_of_consumed_reports = 0\n self._number_of_consumed_sources = 0\n self._number_of_consumed_warnings = 0\n self._number_of_produced_event_tags = 0\n self._number_of_produced_events = 0\n self._number_of_produced_reports = 0\n self._number_of_produced_sources = 0\n self._number_of_produced_warnings = 0\n self._number_of_worker_processes = 0\n self._path_spec_extractor = extractors.PathSpecExtractor()\n self._processing_configuration = None\n self._resolver_context = context.Context()\n self._session_identifier = None\n self._status = definitions.STATUS_INDICATOR_IDLE\n self._storage_merge_reader = None\n self._storage_merge_reader_on_hold = None\n self._task_queue = None\n self._task_queue_port = None\n self._task_manager = task_manager.TaskManager()\n self._use_zeromq = use_zeromq", "docstring": "Initializes an engine.\n\nArgs:\nmaximum_number_of_tasks (Optional[int]): maximum number of concurrent\ntasks, where 0 represents no limit.\nuse_zeromq (Optional[bool]): True if ZeroMQ should be used for queuing\ninstead of Python's multiprocessing queue.", "source": "juraj-google-style"} {"code": "def __eq__(self, other):\n \n if not isinstance(other, DateTimeValues):\n return False\n\n normalized_timestamp = self._GetNormalizedTimestamp()\n other_normalized_timestamp = other._GetNormalizedTimestamp() \n\n if normalized_timestamp is None and other_normalized_timestamp is not None:\n return False\n\n if normalized_timestamp is not None and other_normalized_timestamp is None:\n return False\n\n return normalized_timestamp == other_normalized_timestamp", "docstring": "Determines if the date time values are equal to other.\n\nArgs:\nother (DateTimeValues): date time values to compare against.\n\nReturns:\nbool: True if the date time values are equal to other.", "source": "juraj-google-style"} {"code": "def check_cuda_lib(path, check_soname=True):\n if not os.path.isfile(path):\n raise ConfigError('No library found under: ' + path)\n objdump = shutil.which('objdump')\n if check_soname and objdump is not None and (not _is_windows()):\n output = subprocess.check_output([objdump, '-p', path]).decode('utf-8')\n output = [line for line in output.splitlines() if 'SONAME' in line]\n sonames = [line.strip().split(' ')[-1] for line in output]\n if not any((soname == os.path.basename(path) for soname in sonames)):\n raise ConfigError('None of the libraries match their SONAME: ' + path)", "docstring": "Tests if a library exists on disk and whether its soname matches the filename.\n\nArgs:\npath: the path to the library.\ncheck_soname: whether to check the soname as well.\n\nRaises:\nConfigError: If the library does not exist or if its soname does not match\nthe filename.", "source": "github-repos"} {"code": "def sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table):\n if (not ir_blocks):\n raise AssertionError(u'Received no ir_blocks: {}'.format(ir_blocks))\n _sanity_check_fold_scope_locations_are_unique(ir_blocks)\n _sanity_check_no_nested_folds(ir_blocks)\n _sanity_check_query_root_block(ir_blocks)\n _sanity_check_output_source_follower_blocks(ir_blocks)\n _sanity_check_block_pairwise_constraints(ir_blocks)\n _sanity_check_mark_location_preceding_optional_traverse(ir_blocks)\n _sanity_check_every_location_is_marked(ir_blocks)\n _sanity_check_coerce_type_outside_of_fold(ir_blocks)\n _sanity_check_all_marked_locations_are_registered(ir_blocks, query_metadata_table)\n _sanity_check_registered_locations_parent_locations(query_metadata_table)", "docstring": "Assert that IR blocks originating from the frontend do not have nonsensical structure.\n\nArgs:\nir_blocks: list of BasicBlocks representing the IR to sanity-check\n\nRaises:\nAssertionError, if the IR has unexpected structure. If the IR produced by the front-end\ncannot be successfully and correctly used to generate MATCH or Gremlin due to a bug,\nthis is the method that should catch the problem.", "source": "codesearchnet"} {"code": "def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):\n from torchnlp.datasets import Dataset\n concat = (dataset.rows + other_dataset.rows)\n shuffle(concat, random_seed=random_seed)\n if (split is None):\n return (Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):]))\n else:\n split = max(min(round((len(concat) * split)), len(concat)), 0)\n return (Dataset(concat[:split]), Dataset(concat[split:]))", "docstring": "Deterministic shuffle and split algorithm.\n\nGiven the same two datasets and the same ``random_seed``, the split happens the same exact way\nevery call.\n\nArgs:\ndataset (lib.datasets.Dataset): First dataset.\nother_dataset (lib.datasets.Dataset): Another dataset.\nrandom_seed (int, optional): Seed to control the shuffle of both datasets.\nsplit (float, optional): If defined it is the percentage of rows that first dataset gets\nafter split otherwise the original proportions are kept.\n\nReturns:\n:class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets.", "source": "codesearchnet"} {"code": "def search(self, filters):\n records = self.__model__.search(self.__five9__, filters)\n return self.__class__(self.__five9__, self.__model__, records)", "docstring": "Search Five9 given a filter.\n\nArgs:\nfilters (dict): A dictionary of search strings, keyed by the name\nof the field to search.\n\nReturns:\nEnvironment: An environment representing the recordset.", "source": "codesearchnet"} {"code": "def copy_file(self, file_id, dest_folder_id):\n return self.__request('POST', (('/files/' + unicode(file_id)) + '/copy'), data={'parent': {'id': unicode(dest_folder_id)}})", "docstring": "Copy file to new destination\n\nArgs:\nfile_id (int): ID of the folder.\n\ndest_folder_id (int): ID of parent folder you are copying to.\n\nReturns:\ndict. Response from Box.\n\nRaises:\nBoxError: An error response is returned from Box (status_code >= 400).\n\nBoxError: 409 - Item with the same name already exists.\nIn this case you will need download the file and upload a new version to your destination.\n(Box currently doesn't have a method to copy a new verison.)\n\nBoxHttpResponseError: Response from Box is malformed.\n\nrequests.exceptions.*: Any connection related problem.", "source": "codesearchnet"} {"code": "def AddStationDecoration(self, index, color=\"\n \n tmpstr = str()\n num_stations = len(self._stations)\n ind = int(index)\n if self._stations:\n if 0= 300:\n assert loss < 0.015", "docstring": "Trains a simple classification model.\n\nNote that the data has been configured such that after around 300 steps,\nthe model has memorized the dataset (e.g. we can expect %100 accuracy).\n\nArgs:\ncheckpoint_dir: The directory where the checkpoint is written to.\nnum_steps: The number of steps to train for.", "source": "github-repos"} {"code": "def Categories(unicode_dir=_UNICODE_DIR):\n \n\n categories = {}\n\n def DoLine(codes, fields):\n \n category = fields[2]\n categories.setdefault(category, []).extend(codes)\n \n if len(category) > 1:\n short = category[0]\n categories.setdefault(short, []).extend(codes)\n\n ReadUnicodeTable(unicode_dir+\"/UnicodeData.txt\", 15, DoLine)\n return categories", "docstring": "Returns dict mapping category names to code lists.\n\nArgs:\nunicode_dir: Unicode data directory\n\nReturns:\ndict mapping category names to code lists", "source": "juraj-google-style"} {"code": "def lookup(self, keys, name=None):\n key_tensor = keys\n if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)):\n key_tensor = keys.values\n if keys.dtype.base_dtype != self._key_dtype:\n raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}')\n with ops.name_scope(name, '%s_Lookup' % self.name, (self.resource_handle, key_tensor, self._default_value)):\n values = gen_lookup_ops.lookup_table_find_v2(self.resource_handle, key_tensor, self._default_value)\n values.set_shape(key_tensor.get_shape())\n if isinstance(keys, sparse_tensor.SparseTensor):\n return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)\n elif isinstance(keys, internal.RaggedTensor):\n return keys.with_values(values)\n else:\n return values", "docstring": "Looks up `keys` in a table, outputs the corresponding values.\n\nThe `default_value` is used for keys not present in the table.\n\nArgs:\nkeys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.\nname: A name for the operation (optional).\n\nReturns:\nA `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged,\notherwise a dense `Tensor`.\n\nRaises:\nTypeError: when `keys` or `default_value` doesn't match the table data\ntypes.", "source": "github-repos"} {"code": "def construct_field(model_name, field_name, field_type, all_models, **kwargs):\n \n field_type_parts = field_type.split('->')\n _field_type = field_type_parts[0].strip().split('[]')[0].strip()\n back_populates = field_type_parts[1].strip() if len(field_type_parts) > 1 else None\n error_context = kwargs.pop('error_context', StatikErrorContext())\n _kwargs = copy(kwargs)\n _kwargs['back_populates'] = back_populates\n\n if _field_type not in FIELD_TYPES and _field_type not in all_models:\n raise InvalidFieldTypeError(\n model_name,\n field_name,\n context=error_context\n )\n\n if _field_type in FIELD_TYPES:\n return FIELD_TYPES[_field_type](field_name, **_kwargs)\n\n if field_type_parts[0].strip().endswith('[]'):\n return StatikManyToManyField(field_name, _field_type, **_kwargs)\n\n return StatikForeignKeyField(field_name, _field_type, **_kwargs)", "docstring": "Helper function to build a field from the given field name and\ntype.\n\nArgs:\nmodel_name: The name of the model for which we're building this field.\nfield_name: The name of the field to build.\nfield_type: A string indicator as to which field type must be built.\nall_models: A list containing the names of all of the models, which\nwill help us when building foreign key lookups.", "source": "juraj-google-style"} {"code": "def _is_unannotated_contextmanager_exit(self, func: _function_base.Function, args: function.Args) -> bool:\n if not isinstance(func, _function_base.BoundInterpreterFunction):\n return False\n if not self.name.endswith('.__exit__'):\n return False\n if self.signature.has_param_annotations:\n return False\n return len(args.posargs) == 4 and (not args.has_namedargs()) and (not args.starargs) and (not args.starstarargs)", "docstring": "Returns whether this is an unannotated contextmanager __exit__ method.\n\nIf this is a bound method named __exit__ that has no type annotations and is\npassed four positional args and nothing else, then we assume that it is a\ncontextmanager's __exit__ method that needs annotations added.\n\nArgs:\nfunc: A method binding for self.\nargs: Passed arguments.", "source": "github-repos"} {"code": "def __init__(self, func=None, max_threads=10, token=None):\n \n self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME,\n max_threads)\n self.thread_pool.Start()\n self.token = token\n self.func = func\n self.broken_subjects = [] \n\n self.out_queue = queue.Queue()", "docstring": "Iterate over all clients in a threadpool.\n\nArgs:\nfunc: A function to call with each client urn.\nmax_threads: Number of threads to use.\ntoken: Auth token.\n\nRaises:\nValueError: If function not specified.", "source": "juraj-google-style"} {"code": "def depth_june_average_ground_temperature(self, value=None):\n if (value is not None):\n try:\n value = float(value)\n except ValueError:\n raise ValueError('value {} need to be of type float for field `depth_june_average_ground_temperature`'.format(value))\n self._depth_june_average_ground_temperature = value", "docstring": "Corresponds to IDD Field `depth_june_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_june_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "codesearchnet"} {"code": "def get_group(self, name, user_name=None):\n self.project_service.set_auth(self._token_project)\n return self.project_service.get_group(name, user_name)", "docstring": "Get information on the given group or whether or not a user is a member\nof the group.\n\nArgs:\nname (string): Name of group to query.\nuser_name (optional[string]): Supply None if not interested in\ndetermining if user is a member of the given group.\n\nReturns:\n(mixed): Dictionary if getting group information or bool if a user\nname is supplied.\n\nRaises:\nrequests.HTTPError on failure.", "source": "codesearchnet"} {"code": "def equals(self, rhs):\n try:\n return isinstance(rhs, self._class_name)\n except TypeError:\n return (type(rhs) == type(self._class_name))", "docstring": "Check to see if the RHS is an instance of class_name.\n\nArgs:\n# rhs: the right hand side of the test\nrhs: object\n\nReturns:\nbool", "source": "codesearchnet"} {"code": "def generate_cot(context, parent_path=None):\n body = generate_cot_body(context)\n schema = load_json_or_yaml(context.config['cot_schema_path'], is_path=True, exception=ScriptWorkerException, message=\"Can't read schema file {}: %(exc)s\".format(context.config['cot_schema_path']))\n validate_json_schema(body, schema, name='chain of trust')\n body = format_json(body)\n parent_path = (parent_path or os.path.join(context.config['artifact_dir'], 'public'))\n unsigned_path = os.path.join(parent_path, 'chain-of-trust.json')\n write_to_file(unsigned_path, body)\n if context.config['sign_chain_of_trust']:\n ed25519_signature_path = '{}.sig'.format(unsigned_path)\n ed25519_private_key = ed25519_private_key_from_file(context.config['ed25519_private_key_path'])\n ed25519_signature = ed25519_private_key.sign(body.encode('utf-8'))\n write_to_file(ed25519_signature_path, ed25519_signature, file_type='binary')\n return body", "docstring": "Format and sign the cot body, and write to disk.\n\nArgs:\ncontext (scriptworker.context.Context): the scriptworker context.\nparent_path (str, optional): The directory to write the chain of trust\nartifacts to. If None, this is ``artifact_dir/public/``.\nDefaults to None.\n\nReturns:\nstr: the contents of the chain of trust artifact.\n\nRaises:\nScriptWorkerException: on schema error.", "source": "codesearchnet"} {"code": "def get_nc_attrs(nc):\n meta = {'experiment': nc.experiment_id, 'frequency': nc.frequency, 'institute': nc.institute_id, 'model': nc.model_id, 'modeling_realm': nc.modeling_realm, 'ensemble_member': 'r{}i{}p{}'.format(nc.realization, nc.initialization_method, nc.physics_version)}\n variable_name = get_var_name(nc)\n if variable_name:\n meta.update({'variable_name': variable_name})\n return meta", "docstring": "Gets netCDF file metadata attributes.\n\nArguments:\nnc (netCDF4.Dataset): an open NetCDF4 Dataset to pull attributes from.\n\nReturns:\ndict: Metadata as extracted from the netCDF file.", "source": "codesearchnet"} {"code": "def _classify_segment(self, address, length):\n \n\n end_address = address + length - 1\n\n _, start_seg = self._find_address(address)\n _, end_seg = self._find_address(end_address)\n\n if start_seg is not None or end_seg is not None:\n raise ArgumentError(\"Overlapping segments are not yet supported\", address=address, length=length)\n\n return DisjointSegment()", "docstring": "Determine how a new data segment fits into our existing world\n\nParams:\naddress (int): The address we wish to classify\nlength (int): The length of the segment\n\nReturns:\nint: One of SparseMemoryMap.prepended", "source": "juraj-google-style"} {"code": "def instantiate_interface(virtual_iface, config, loop):\n \n\n \n if virtual_iface == 'null':\n return StandardDeviceServer(None, {}, loop=loop)\n\n conf = {}\n if 'interface' in config:\n conf = config['interface']\n\n try:\n reg = ComponentRegistry()\n if virtual_iface.endswith('.py'):\n _name, iface = reg.load_extension(virtual_iface, class_filter=AbstractDeviceServer, unique=True)\n else:\n _name, iface = reg.load_extensions('iotile.device_server', name_filter=virtual_iface,\n class_filter=AbstractDeviceServer, unique=True)\n\n return iface(None, conf, loop=loop)\n except ArgumentError as err:\n print(\"ERROR: Could not load device_server (%s): %s\" % (virtual_iface, err.msg))\n sys.exit(1)", "docstring": "Find a virtual interface by name and instantiate it\n\nArgs:\nvirtual_iface (string): The name of the pkg_resources entry point corresponding to\nthe interface. It should be in group iotile.virtual_interface\nconfig (dict): A dictionary with a 'interface' key with the config info for configuring\nthis virtual interface. This is optional.\n\nReturns:\nVirtualInterface: The instantiated subclass of VirtualInterface", "source": "juraj-google-style"} {"code": "def stretch_hist_equalize(self, approximate=False):\n \n logger.info(\"Perform a histogram equalized contrast stretch.\")\n\n nwidth = 2048.\n logger.debug(\"Make histogram bins having equal amount of data, \" +\n \"using numpy percentile function:\")\n\n def _band_hist(band_data):\n cdf = da.arange(0., 1., 1. / nwidth, chunks=nwidth)\n if approximate:\n \n flat_data = band_data.ravel()\n \n \n bins = da.percentile(flat_data[da.notnull(flat_data)],\n cdf * 100.)\n else:\n bins = dask.delayed(np.nanpercentile)(band_data, cdf * 100.)\n bins = da.from_delayed(bins, shape=(nwidth,), dtype=cdf.dtype)\n res = dask.delayed(np.interp)(band_data, bins, cdf)\n res = da.from_delayed(res, shape=band_data.shape,\n dtype=band_data.dtype)\n return res\n\n band_results = []\n for band in self.data['bands'].values:\n if band == 'A':\n continue\n band_data = self.data.sel(bands=band)\n res = _band_hist(band_data.data)\n band_results.append(res)\n\n if 'A' in self.data.coords['bands'].values:\n band_results.append(self.data.sel(bands='A'))\n self.data.data = da.stack(band_results,\n axis=self.data.dims.index('bands'))", "docstring": "Stretch the current image's colors through histogram equalization.\n\nArgs:\napproximate (bool): Use a faster less-accurate percentile\ncalculation. At the time of writing the dask\nversion of `percentile` is not as accurate as\nthe numpy version. This will likely change in\nthe future. Current dask version 0.17.", "source": "juraj-google-style"} {"code": "def add_delta_deltas(filterbanks, name=None):\n delta_filter = np.array([2, 1, 0, (- 1), (- 2)])\n delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, 'full')\n delta_filter_stack = np.array([((([0] * 4) + [1]) + ([0] * 4)), ((([0] * 2) + list(delta_filter)) + ([0] * 2)), list(delta_delta_filter)], dtype=np.float32).T[(:, None, None, :)]\n delta_filter_stack /= np.sqrt(np.sum((delta_filter_stack ** 2), axis=0, keepdims=True))\n filterbanks = tf.nn.conv2d(filterbanks, delta_filter_stack, [1, 1, 1, 1], 'SAME', data_format='NHWC', name=name)\n return filterbanks", "docstring": "Compute time first and second-order derivative channels.\n\nArgs:\nfilterbanks: float32 tensor with shape [batch_size, len, num_bins, 1]\nname: scope name\n\nReturns:\nfloat32 tensor with shape [batch_size, len, num_bins, 3]", "source": "codesearchnet"} {"code": "def _from_any_pb(pb_type, any_pb):\n msg = pb_type()\n if (not any_pb.Unpack(msg)):\n raise TypeError('Could not convert {} to {}'.format(any_pb.__class__.__name__, pb_type.__name__))\n return msg", "docstring": "Converts an Any protobuf to the specified message type\n\nArgs:\npb_type (type): the type of the message that any_pb stores an instance\nof.\nany_pb (google.protobuf.any_pb2.Any): the object to be converted.\n\nReturns:\npb_type: An instance of the pb_type message.\n\nRaises:\nTypeError: if the message could not be converted.", "source": "codesearchnet"} {"code": "def set_hparam(self, name, value):\n \n param_type, is_list = self._hparam_types[name]\n if isinstance(value, list):\n if not is_list:\n raise ValueError(\n 'Must not pass a list for single-valued parameter: %s' % name)\n setattr(self, name, [\n _cast_to_type_if_compatible(name, param_type, v) for v in value])\n else:\n if is_list:\n raise ValueError(\n 'Must pass a list for multi-valued parameter: %s.' % name)\n setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))", "docstring": "Set the value of an existing hyperparameter.\n\nThis function verifies that the type of the value matches the type of the\nexisting hyperparameter.\n\nArgs:\nname: Name of the hyperparameter.\nvalue: New value of the hyperparameter.\n\nRaises:\nKeyError: If the hyperparameter doesn't exist.\nValueError: If there is a type mismatch.", "source": "juraj-google-style"} {"code": "def pretty_print_counters(counters):\n \n totals = collections.defaultdict(int)\n for (name, val) in counters:\n prefixes = [name[:i] for i in xrange(len(name)) if name[i] == \"/\"] + [name]\n for p in prefixes:\n totals[p] += val\n parts = []\n for name, val in sorted(six.iteritems(totals)):\n parts.append(\" \" * name.count(\"/\") + \"%s: %.3g\" % (name, val))\n return \"\\n\".join(parts)", "docstring": "print counters hierarchically.\n\nEach counter is a pair of a string and a number.\nThe string can have slashes, meaning that the number also counts towards\neach prefix. e.g. \"parameters/trainable\" counts towards both \"parameters\"\nand \"parameters/trainable\".\n\nArgs:\ncounters: a list of (string, number) pairs\n\nReturns:\na string", "source": "juraj-google-style"} {"code": "def find_n_data_blocks(self):\n self.file_obj.seek(0)\n (header0, data_idx0) = self.read_header()\n self.file_obj.seek(data_idx0)\n block_size = int(header0['BLOCSIZE'])\n n_bits = int(header0['NBITS'])\n self.file_obj.seek(int(header0['BLOCSIZE']), 1)\n n_blocks = 1\n end_found = False\n while (not end_found):\n try:\n (header, data_idx) = self.read_header()\n self.file_obj.seek(data_idx)\n self.file_obj.seek(header['BLOCSIZE'], 1)\n n_blocks += 1\n except EndOfFileError:\n end_found = True\n break\n self.file_obj.seek(0)\n return n_blocks", "docstring": "Seek through the file to find how many data blocks there are in the file\n\nReturns:\nn_blocks (int): number of data blocks in the file", "source": "codesearchnet"} {"code": "def dump(self, output, close_after_write=True):\n \n\n self.open(output)\n try:\n self.make_worksheet(self.table_name)\n self.write_table()\n finally:\n if close_after_write:\n self.close()", "docstring": "Write a worksheet to the current workbook.\n\nArgs:\noutput (str):\nPath to the workbook file to write.\nclose_after_write (bool, optional):\nClose the workbook after write.\nDefaults to |True|.", "source": "juraj-google-style"} {"code": "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, (len(self._token2id) - 1))", "docstring": "Get the token_id of given token.\n\nArgs:\ntoken (str): token from vocabulary.\n\nReturns:\nint: int id of token.", "source": "codesearchnet"} {"code": "def _parse_dbpath(dbpath):\n \n if isinstance(dbpath, list):\n \n dbpath = '|'.join(dbpath)\n\n \n if not dbpath.endswith('$'):\n dbpath = '(%s)$' % dbpath\n\n return dbpath", "docstring": "Converts the dbpath to a regexp pattern.\n\nTransforms dbpath from a string or an array of strings to a\nregexp pattern which will be used to match database names.\n\nArgs:\ndbpath: a string or an array containing the databases to be matched\nfrom a cluster.\n\nReturns:\nA regexp pattern that will match any of the desired databases on\non a cluster.", "source": "juraj-google-style"} {"code": "def UploadOperations(self, operations, is_last=False):\n if self._is_last:\n raise googleads.errors.AdWordsBatchJobServiceInvalidOperationError(\"Can't add new operations to a completed incremental upload.\")\n req = self._request_builder.BuildUploadRequest(self._upload_url, operations, current_content_length=self._current_content_length, is_last=is_last)\n try:\n _batch_job_logger.debug('Outgoing request: %s %s %s', req.get_full_url(), req.headers, req.data)\n self._url_opener.open(req)\n if _batch_job_logger.isEnabledFor(logging.INFO):\n _batch_job_logger.info('Request summary: %s', self._ExtractRequestSummaryFields(req))\n except urllib2.HTTPError as e:\n if (e.code != 308):\n if _batch_job_logger.isEnabledFor(logging.WARNING):\n _batch_job_logger.warning('Request summary: %s', self._ExtractRequestSummaryFields(req, error=e))\n raise\n self._current_content_length += len(req.data)\n self._is_last = is_last", "docstring": "Uploads operations to the given uploadUrl in incremental steps.\n\nNote: Each list of operations is expected to contain operations of the\nsame type, similar to how one would normally send operations in an\nAdWords API Service request.\n\nArgs:\noperations: one or more lists of operations as would be sent to the\nAdWords API for the associated service.\nis_last: a boolean indicating whether this is the final increment to be\nadded to the batch job.", "source": "codesearchnet"} {"code": "class XLMPoolerStartLogits(nn.Module):\n\n def __init__(self, config: XLMConfig):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, 1)\n\n def forward(self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n \n x = self.dense(hidden_states).squeeze(-1)\n if p_mask is not None:\n if p_mask.dtype == torch.float16:\n x = x * (1 - p_mask) - 65500 * p_mask\n else:\n x = x * (1 - p_mask) - 1e+30 * p_mask\n return x", "docstring": "Compute SQuAD start logits from sequence hidden states.\n\nArgs:\nconfig ([`XLMConfig`]):\nThe config used by the model, will be used to grab the `hidden_size` of the model.", "source": "github-repos"} {"code": "def transform(node, ctx):\n node = qual_names.resolve(node)\n node = CallTreeTransformer(ctx).visit(node)\n return node", "docstring": "Transform function call to the compiled counterparts.\n\nArgs:\nnode: AST\nctx: EntityContext\nReturns:\nA tuple (node, new_names):\nnode: The transformed AST\nnew_names: set(string), containing any newly-generated names", "source": "github-repos"} {"code": "def _ParseIdentifierMappingRecord(\n self, parser_mediator, table_name, esedb_record):\n \n record_values = self._GetRecordValues(\n parser_mediator, table_name, esedb_record)\n\n identifier = record_values.get('IdIndex', None)\n if identifier is None:\n parser_mediator.ProduceExtractionWarning(\n 'IdIndex value missing from table: SruDbIdMapTable')\n return None, None\n\n identifier_type = record_values.get('IdType', None)\n if identifier_type not in self._SUPPORTED_IDENTIFIER_TYPES:\n parser_mediator.ProduceExtractionWarning(\n 'unsupported IdType value: {0!s} in table: SruDbIdMapTable'.format(\n identifier_type))\n return None, None\n\n mapped_value = record_values.get('IdBlob', None)\n if mapped_value is None:\n parser_mediator.ProduceExtractionWarning(\n 'IdBlob value missing from table: SruDbIdMapTable')\n return None, None\n\n if identifier_type == 3:\n try:\n fwnt_identifier = pyfwnt.security_identifier()\n fwnt_identifier.copy_from_byte_stream(mapped_value)\n mapped_value = fwnt_identifier.get_string()\n except IOError:\n parser_mediator.ProduceExtractionWarning(\n 'unable to decode IdBlob value as Windows NT security identifier')\n return None, None\n\n else:\n try:\n mapped_value = mapped_value.decode('utf-16le').rstrip('\\0')\n except UnicodeDecodeError:\n parser_mediator.ProduceExtractionWarning(\n 'unable to decode IdBlob value as UTF-16 little-endian string')\n return None, None\n\n return identifier, mapped_value", "docstring": "Extracts an identifier mapping from a SruDbIdMapTable record.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\ntable_name (str): name of the table the record is stored in.\nesedb_record (pyesedb.record): record.\n\nReturns:\ntuple[int, str]: numeric identifier and its string representation or\nNone, None if no identifier mapping can be retrieved from the record.", "source": "juraj-google-style"} {"code": "def from_json(cls, data):\n required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition', 'humidity_condition', 'wind_condition', 'sky_condition')\n for key in required_keys:\n assert (key in data), 'Required key \"{}\" is missing!'.format(key)\n return cls(data['name'], data['day_type'], Location.from_json(data['location']), DryBulbCondition.from_json(data['dry_bulb_condition']), HumidityCondition.from_json(data['humidity_condition']), WindCondition.from_json(data['wind_condition']), SkyCondition.from_json(data['sky_condition']))", "docstring": "Create a Design Day from a dictionary.\n\nArgs:\ndata = {\n\"name\": string,\n\"day_type\": string,\n\"location\": ladybug Location schema,\n\"dry_bulb_condition\": ladybug DryBulbCondition schema,\n\"humidity_condition\": ladybug HumidityCondition schema,\n\"wind_condition\": ladybug WindCondition schema,\n\"sky_condition\": ladybug SkyCondition schema}", "source": "codesearchnet"} {"code": "def add(self, node):\n \n if node.parent_id != _node.Root.ID:\n raise exception.InvalidException('Not a top level node')\n\n self._nodes[node.id] = node\n self._nodes[node.parent_id].append(node, False)", "docstring": "Register a top level node (and its children) for syncing up to the server. There's no need to call this for nodes created by\n:py:meth:`createNote` or :py:meth:`createList` as they are automatically added.\n\nLoginException: If :py:meth:`login` has not been called.\nArgs:\nnode (gkeepapi.node.Node): The node to sync.\n\nRaises:\nInvalid: If the parent node is not found.", "source": "juraj-google-style"} {"code": "def _get_params(mcs, bases, namespace):\n params = [(name, namespace.pop(name)) for (name, attribute) in list(namespace.items()) if isinstance(attribute, BaseParam)]\n for base in reversed(bases):\n if hasattr(base, mcs._params_storage_key):\n params = (list(getattr(base, mcs._params_storage_key).items()) + params)\n return OrderedDict(params)", "docstring": "Create params dictionary to be used in resource class namespace.\n\nPop all parameter objects from attributes dict (namespace)\nand store them under _params_storage_key atrribute.\nAlso collect all params from base classes in order that ensures\nparams can be overriden.\n\nArgs:\nbases: all base classes of created resource class\nnamespace (dict): namespace as dictionary of attributes", "source": "codesearchnet"} {"code": "def directed_bipartition_of_one(seq):\n bipartitions = list(bipartition_of_one(seq))\n return chain(bipartitions, reverse_elements(bipartitions))", "docstring": "Generate directed bipartitions where one part is of length 1.\n\nArgs:\nseq (Iterable): The sequence to partition.\n\nReturns:\nlist[tuple[tuple]]: A list of tuples containing each of the two\npartitions.\n\nExample:\n>>> partitions = directed_bipartition_of_one((1, 2, 3))\n>>> list(partitions) # doctest: +NORMALIZE_WHITESPACE\n[((1,), (2, 3)),\n((2,), (1, 3)),\n((3,), (1, 2)),\n((2, 3), (1,)),\n((1, 3), (2,)),\n((1, 2), (3,))]", "source": "codesearchnet"} {"code": "def print_start_trigger(self, type):\n \n types = {'recieved': 1,\n 'filled': 2,\n 'num_recieved': 3}\n \n if type in types:\n self.send('^PT'+chr(types[type]))\n else:\n raise RuntimeError('Invalid type.')", "docstring": "Set print start trigger.\n\nArgs:\ntype: The type of trigger you desire.\nReturns:\nNone\nRaises:\nRuntimeError: Invalid type.", "source": "juraj-google-style"} {"code": "def _AddRestoreOps(self, filename_tensor, saveables, restore_sequentially, reshape, preferred_shard=-1, name='restore_all'):\n all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard, restore_sequentially)\n assign_ops = []\n idx = 0\n for saveable in saveables:\n shapes = None\n if reshape:\n shapes = []\n for spec in saveable.specs:\n v = spec.tensor\n shape = v.get_shape()\n if not shape.is_fully_defined():\n shape = array_ops.shape(v)\n shapes.append(shape)\n saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]\n idx += len(saveable.specs)\n assign_ops.append(saveable.restore(saveable_tensors, shapes))\n return control_flow_ops.group(*assign_ops, name=name)", "docstring": "Add operations to restore saveables.\n\nArgs:\nfilename_tensor: Tensor for the path of the file to load.\nsaveables: A list of SaveableObject objects.\nrestore_sequentially: True if we want to restore variables sequentially\nwithin a shard.\nreshape: True if we want to reshape loaded tensors to the shape of the\ncorresponding variable.\npreferred_shard: Shard to open first when loading a sharded file.\nname: Name for the returned op.\n\nReturns:\nAn Operation that restores the variables.", "source": "github-repos"} {"code": "def __chunk(segment, abbr=False):\n \n names = ('north', 'east', 'south', 'west', 'north')\n if not abbr:\n sjoin = '-'\n else:\n names = [s[0].upper() for s in names]\n sjoin = ''\n if segment % 2 == 0:\n return (names[segment].capitalize(),\n sjoin.join((names[segment].capitalize(), names[segment],\n names[segment + 1])),\n sjoin.join((names[segment].capitalize(), names[segment + 1])),\n sjoin.join((names[segment + 1].capitalize(), names[segment],\n names[segment + 1])))\n else:\n return (names[segment].capitalize(),\n sjoin.join((names[segment].capitalize(), names[segment + 1],\n names[segment])),\n sjoin.join((names[segment + 1].capitalize(), names[segment])),\n sjoin.join((names[segment + 1].capitalize(),\n names[segment + 1], names[segment])))", "docstring": "Generate a ``tuple`` of compass direction names.\n\nArgs:\nsegment (list): Compass segment to generate names for\nabbr (bool): Names should use single letter abbreviations\n\nReturns:\nbool: Direction names for compass segment", "source": "juraj-google-style"} {"code": "def decode_raw_v1(input_bytes=None, out_type=None, little_endian=True, name=None, bytes=None):\n input_bytes = deprecation.deprecated_argument_lookup('input_bytes', input_bytes, 'bytes', bytes)\n if out_type is None:\n raise ValueError(\"decode_raw_v1() missing 1 positional argument: 'out_type'\")\n return gen_parsing_ops.decode_raw(input_bytes, out_type, little_endian=little_endian, name=name)", "docstring": "Convert raw byte strings into tensors.\n\nArgs:\ninput_bytes:\nEach element of the input Tensor is converted to an array of bytes.\nout_type:\n`DType` of the output. Acceptable types are `half`, `float`, `double`,\n`int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`.\nlittle_endian:\nWhether the `input_bytes` data is in little-endian format. Data will be\nconverted into host byte order if necessary.\nname: A name for the operation (optional).\nbytes: Deprecated parameter. Use `input_bytes` instead.\n\nReturns:\nA `Tensor` object storing the decoded bytes.", "source": "github-repos"} {"code": "def orth_gs(order, dist, normed=False, sort='GR', cross_truncation=1.0, **kws):\n logger = logging.getLogger(__name__)\n dim = len(dist)\n if isinstance(order, int):\n if (order == 0):\n return chaospy.poly.Poly(1, dim=dim)\n basis = chaospy.poly.basis(0, order, dim, sort, cross_truncation=cross_truncation)\n else:\n basis = order\n basis = list(basis)\n polynomials = [basis[0]]\n if normed:\n for idx in range(1, len(basis)):\n for idy in range(idx):\n orth = chaospy.descriptives.E((basis[idx] * polynomials[idy]), dist, **kws)\n basis[idx] = (basis[idx] - (polynomials[idy] * orth))\n norms = chaospy.descriptives.E((polynomials[(- 1)] ** 2), dist, **kws)\n if (norms <= 0):\n logger.warning('Warning: Polynomial cutoff at term %d', idx)\n break\n basis[idx] = (basis[idx] / numpy.sqrt(norms))\n polynomials.append(basis[idx])\n else:\n norms = [1.0]\n for idx in range(1, len(basis)):\n for idy in range(idx):\n orth = chaospy.descriptives.E((basis[idx] * polynomials[idy]), dist, **kws)\n basis[idx] = (basis[idx] - ((polynomials[idy] * orth) / norms[idy]))\n norms.append(chaospy.descriptives.E((polynomials[(- 1)] ** 2), dist, **kws))\n if (norms[(- 1)] <= 0):\n logger.warning('Warning: Polynomial cutoff at term %d', idx)\n break\n polynomials.append(basis[idx])\n return chaospy.poly.Poly(polynomials, dim=dim, shape=(len(polynomials),))", "docstring": "Gram-Schmidt process for generating orthogonal polynomials.\n\nArgs:\norder (int, Poly):\nThe upper polynomial order. Alternative a custom polynomial basis\ncan be used.\ndist (Dist):\nWeighting distribution(s) defining orthogonality.\nnormed (bool):\nIf True orthonormal polynomials will be used instead of monic.\nsort (str):\nOrdering argument passed to poly.basis. If custom basis is used,\nargument is ignored.\ncross_truncation (float):\nUse hyperbolic cross truncation scheme to reduce the number of\nterms in expansion.\n\nReturns:\n(Poly):\nThe orthogonal polynomial expansion.\n\nExamples:\n>>> Z = chaospy.J(chaospy.Normal(), chaospy.Normal())\n>>> print(chaospy.around(chaospy.orth_gs(2, Z), 4))\n[1.0, q1, q0, q1^2-1.0, q0q1, q0^2-1.0]", "source": "codesearchnet"} {"code": "def EqualTo(self, value):\n self._awql = self._CreateSingleValueCondition(value, '=')\n return self._query_builder", "docstring": "Sets the type of the WHERE clause as \"equal to\".\n\nArgs:\nvalue: The value to be used in the WHERE condition.\n\nReturns:\nThe query builder that this WHERE builder links to.", "source": "codesearchnet"} {"code": "def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var, var_default):\n var = environ_cp.get(var_name)\n if var is None:\n var = get_input(ask_for_var)\n print('\\n')\n if not var:\n var = var_default\n return var", "docstring": "Get var_name either from env, or user or default.\n\nIf var_name has been set as environment variable, use the preset value, else\nask for user input. If no input is provided, the default is used.\n\nArgs:\nenviron_cp: copy of the os.environ.\nvar_name: string for name of environment variable, e.g. \"TF_NEED_CUDA\".\nask_for_var: string for how to ask for user input.\nvar_default: default value string.\n\nReturns:\nstring value for var_name", "source": "github-repos"} {"code": "def ensure_mingw_drive(win32_path):\n r\n win32_drive, _path = splitdrive(win32_path)\n mingw_drive = '/' + win32_drive[:-1].lower()\n mingw_path = mingw_drive + _path\n return mingw_path", "docstring": "r\"\"\" replaces windows drives with mingw style drives\n\nArgs:\nwin32_path (str):\n\nCommandLine:\npython -m utool.util_path --test-ensure_mingw_drive\n\nExample:\n>>> # DISABLE_DOCTEST\n>>> from utool.util_path import * # NOQA\n>>> win32_path = r'C:/Program Files/Foobar'\n>>> result = ensure_mingw_drive(win32_path)\n>>> print(result)\n/c/Program Files/Foobar", "source": "juraj-google-style"} {"code": "def __init__(self, clean_stop_exception_types=None):\n if clean_stop_exception_types is None:\n clean_stop_exception_types = (errors.OutOfRangeError,)\n self._clean_stop_exception_types = tuple(clean_stop_exception_types)\n self._lock = threading.Lock()\n self._stop_event = threading.Event()\n self._exc_info_to_raise = None\n self._joined = False\n self._registered_threads = set()", "docstring": "Create a new Coordinator.\n\nArgs:\nclean_stop_exception_types: Optional tuple of Exception types that should\ncause a clean stop of the coordinator. If an exception of one of these\ntypes is reported to `request_stop(ex)` the coordinator will behave as\nif `request_stop(None)` was called. Defaults to\n`(tf.errors.OutOfRangeError,)` which is used by input queues to signal\nthe end of input. When feeding training data from a Python iterator it\nis common to add `StopIteration` to this list.", "source": "github-repos"} {"code": "def get_minutes_description(self):\n return self.get_segment_description(self._expression_parts[1], _('every minute'), (lambda s: s), (lambda s: _('every {0} minutes').format(s)), (lambda s: _('minutes {0} through {1} past the hour')), (lambda s: ('' if (s == '0') else _('at {0} minutes past the hour'))))", "docstring": "Generates a description for only the MINUTE portion of the expression\n\nReturns:\nThe MINUTE description", "source": "codesearchnet"} {"code": "def get_height(self, id=None, endpoint=None):\n \n return self._call_endpoint(GET_BLOCK_COUNT, id=id, endpoint=endpoint)", "docstring": "Get the current height of the blockchain\nArgs:\nid: (int, optional) id to use for response tracking\nendpoint: (RPCEndpoint, optional) endpoint to specify to use\n\nReturns:\njson object of the result or the error encountered in the RPC call", "source": "juraj-google-style"} {"code": "def remove_callback(self, callback):\n self.callback_handler.remove_callback(callback)", "docstring": "Remove a callback from the current list of [`~transformers.TrainerCallback`].\n\nArgs:\ncallback (`type` or [`~transformers.TrainerCallback]`):\nA [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the\nfirst case, will remove the first member of that class found in the list of callbacks.", "source": "github-repos"} {"code": "def run_notebook_hook(notebook_type, action, *args, **kw):\n if (notebook_type not in _HOOKS):\n raise RuntimeError(('no display hook installed for notebook type %r' % notebook_type))\n if (_HOOKS[notebook_type][action] is None):\n raise RuntimeError(('notebook hook for %r did not install %r action' % notebook_type), action)\n return _HOOKS[notebook_type][action](*args, **kw)", "docstring": "Run an installed notebook hook with supplied arguments.\n\nArgs:\nnoteboook_type (str) :\nName of an existing installed notebook hook\n\nactions (str) :\nName of the hook action to execute, ``'doc'`` or ``'app'``\n\nAll other arguments and keyword arguments are passed to the hook action\nexactly as supplied.\n\nReturns:\nResult of the hook action, as-is\n\nRaises:\nRuntimeError\nIf the hook or specific action is not installed", "source": "codesearchnet"} {"code": "def remove_showcase(self, showcase):\n \n \n dataset_showcase = self._get_dataset_showcase_dict(showcase)\n showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)\n showcase._write_to_hdx('disassociate', dataset_showcase, 'package_id')", "docstring": "Remove dataset from showcase\n\nArgs:\nshowcase (Union[Showcase,Dict,str]): Either a showcase id string or showcase metadata from a Showcase object or dictionary\n\nReturns:\nNone", "source": "juraj-google-style"} {"code": "def verify(self, token, **kwargs):\n \n path = '/runners/verify'\n post_data = {'token': token}\n self.gitlab.http_post(path, post_data=post_data, **kwargs)", "docstring": "Validates authentication credentials for a registered Runner.\n\nArgs:\ntoken (str): The runner's authentication token\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabVerifyError: If the server failed to verify the token", "source": "juraj-google-style"} {"code": "def make_directory_writable(dirname):\n \n retval = shell_call(['docker', 'run', '-v',\n '{0}:/output_dir'.format(dirname),\n 'busybox:1.27.2',\n 'chmod', '-R', 'a+rwx', '/output_dir'])\n if not retval:\n logging.error('Failed to change permissions on directory: %s', dirname)\n return retval", "docstring": "Makes directory readable and writable by everybody.\n\nArgs:\ndirname: name of the directory\n\nReturns:\nTrue if operation was successfull\n\nIf you run something inside Docker container and it writes files, then\nthese files will be written as root user with restricted permissions.\nSo to be able to read/modify these files outside of Docker you have to change\npermissions to be world readable and writable.", "source": "juraj-google-style"} {"code": "def __init__(self, variables, name='TPUReplicatedVariable'):\n if not isinstance(variables, abc.Sequence) or not variables or any((not isinstance(v, variables_lib.Variable) for v in variables)):\n raise TypeError(f'Argument `variables` should be a non-empty list of `variables.Variable`s. Received {variables}')\n if any((v.dtype != variables[0].dtype for v in variables)):\n raise ValueError(f'All elements in argument `variables` must have the same dtype. Received dtypes: {[v.dtype for v in variables]}')\n if any((v.shape != variables[0].shape for v in variables)):\n raise ValueError(f'All elements in argument `variables` must have the same shape. Received shapes: {[v.shape for v in variables]}')\n self._vars = variables\n self._name = name\n self._common_name = self._name.split(':')[0]\n self._cached_value = None", "docstring": "Treats `variables` as a replicated list of `tf.Variable`s.\n\nExample:\n\n```\nvariables = [\ntf.Variable(..., shape=(10, 100), dtype=tf.float32),\ntf.Variable(..., shape=(10, 100), dtype=tf.float32),\ntf.Variable(..., shape=(10, 100), dtype=tf.float32),\ntf.Variable(..., shape=(10, 100), dtype=tf.float32),\n]\nreplicated_variable = TPUReplicatedVariable(variables)\nassert replicated_variable.shape.as_list() == [10, 100]\n```\n\nArgs:\nvariables: A list of `ResourceVariable`s that comprise this replicated\nvariable. Variables should not be shared between different\n`TPUReplicatedVariable` objects.\nname: String. Name of this container. Defaults to \"TPUReplicatedVariable\".", "source": "github-repos"} {"code": "def compute_q(self, query_antecedent):\n ret = mtf.einsum([query_antecedent, self.wq], reduced_dims=[self.query_input_dim])\n if self.combine_dims:\n ret = mtf.replace_dimensions(ret, ret.shape.dims[(- 1)], self.q_dims)\n return ret", "docstring": "Compute query Tensor q.\n\nArgs:\nquery_antecedent: a Tensor with dimensions\n{query_input_dim} + other_dims\nReturns:\na Tensor with dimensions\nquery_heads_dims + {key_dim} + other_dims", "source": "codesearchnet"} {"code": "def _parse_logline_timestamp(t):\n (date, time) = t.split(' ')\n (month, day) = date.split('-')\n (h, m, s) = time.split(':')\n (s, ms) = s.split('.')\n return (month, day, h, m, s, ms)", "docstring": "Parses a logline timestamp into a tuple.\n\nArgs:\nt: Timestamp in logline format.\n\nReturns:\nAn iterable of date and time elements in the order of month, day, hour,\nminute, second, microsecond.", "source": "codesearchnet"} {"code": "def mutex(self, mutex, **kwargs):\n indicator_obj = Mutex(mutex, **kwargs)\n return self._indicator(indicator_obj)", "docstring": "Add Mutex data to Batch object.\n\nArgs:\nmutex (str): The value for this Indicator.\nconfidence (str, kwargs): The threat confidence for this Indicator.\ndate_added (str, kwargs): The date timestamp the Indicator was created.\nlast_modified (str, kwargs): The date timestamp the Indicator was last modified.\nrating (str, kwargs): The threat rating for this Indicator.\nxid (str, kwargs): The external id for this Indicator.\n\nReturns:\nobj: An instance of Mutex.", "source": "codesearchnet"} {"code": "def to_avro(file_path_or_buffer, df, schema=None, codec='null', append=False):\n if (schema is None):\n schema = __schema_infer(df)\n open_mode = ('wb' if (not append) else 'a+b')\n if isinstance(file_path_or_buffer, six.string_types):\n with open(file_path_or_buffer, open_mode) as f:\n fastavro.writer(f, schema=schema, records=df.to_dict('records'), codec=codec)\n else:\n fastavro.writer(file_path_or_buffer, schema=schema, records=df.to_dict('records'), codec=codec)", "docstring": "Avro file writer.\n\nArgs:\nfile_path_or_buffer:\nOutput file path or file-like object.\ndf: pd.DataFrame.\nschema: Dict of Avro schema.\nIf it's set None, inferring schema.\nappend: Boolean to control if will append to existing file\ncodec: A string indicating the compression codec to use.\nDefault is no compression (\"null\"), other acceptable values are\n\"snappy\" and \"deflate\". You must have python-snappy installed to use\nthe snappy codec.", "source": "codesearchnet"} {"code": "def _ConvertFieldValuePair(js, message):\n names = []\n message_descriptor = message.DESCRIPTOR\n for name in js:\n try:\n field = message_descriptor.fields_by_camelcase_name.get(name, None)\n if (not field):\n raise ParseError('Message type \"{0}\" has no field named \"{1}\".'.format(message_descriptor.full_name, name))\n if (name in names):\n raise ParseError('Message type \"{0}\" should not have multiple \"{1}\" fields.'.format(message.DESCRIPTOR.full_name, name))\n names.append(name)\n if (field.containing_oneof is not None):\n oneof_name = field.containing_oneof.name\n if (oneof_name in names):\n raise ParseError('Message type \"{0}\" should not have multiple \"{1}\" oneof fields.'.format(message.DESCRIPTOR.full_name, oneof_name))\n names.append(oneof_name)\n value = js[name]\n if (value is None):\n message.ClearField(field.name)\n continue\n if _IsMapEntry(field):\n message.ClearField(field.name)\n _ConvertMapFieldValue(value, message, field)\n elif (field.label == descriptor.FieldDescriptor.LABEL_REPEATED):\n message.ClearField(field.name)\n if (not isinstance(value, list)):\n raise ParseError('repeated field {0} must be in [] which is {1}.'.format(name, value))\n if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE):\n for item in value:\n sub_message = getattr(message, field.name).add()\n if ((item is None) and (sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value')):\n raise ParseError('null is not allowed to be used as an element in a repeated field.')\n _ConvertMessage(item, sub_message)\n else:\n for item in value:\n if (item is None):\n raise ParseError('null is not allowed to be used as an element in a repeated field.')\n getattr(message, field.name).append(_ConvertScalarFieldValue(item, field))\n elif (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE):\n sub_message = getattr(message, field.name)\n _ConvertMessage(value, sub_message)\n else:\n setattr(message, field.name, _ConvertScalarFieldValue(value, field))\n except ParseError as e:\n if (field and (field.containing_oneof is None)):\n raise ParseError('Failed to parse {0} field: {1}'.format(name, e))\n else:\n raise ParseError(str(e))\n except ValueError as e:\n raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))\n except TypeError as e:\n raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))", "docstring": "Convert field value pairs into regular message.\n\nArgs:\njs: A JSON object to convert the field value pairs.\nmessage: A regular protocol message to record the data.\n\nRaises:\nParseError: In case of problems converting.", "source": "codesearchnet"} {"code": "def calculate_energy(self, energies):\n return sum([(amt * energies[c]) for (amt, c) in zip(self._coeffs, self._all_comp)])", "docstring": "Calculates the energy of the reaction.\n\nArgs:\nenergies ({Composition: float}): Energy for each composition.\nE.g ., {comp1: energy1, comp2: energy2}.\n\nReturns:\nreaction energy as a float.", "source": "codesearchnet"} {"code": "def _compute_theoretical_jacobian(f, y_shape, y_dtype, xs, param):\n x = xs[param]\n x_shape = tuple(x.shape) + (2,) if x.dtype.is_complex else x.shape\n y_factor = 2 if y_dtype.is_complex else 1\n x_size = _product(x_shape)\n x_val_size = _product(x_shape[1:])\n y_size = _product(y_shape) * y_factor\n jacobian = np.zeros((y_size, x_size), dtype=x.dtype.real_dtype.as_numpy_dtype)\n dy_data = np.zeros(y_shape, dtype=y_dtype.as_numpy_dtype)\n dy_data_flat = dy_data.ravel().view(y_dtype.real_dtype.as_numpy_dtype)\n grad_fn_unprep = backprop.gradients_function(f, [param])\n grad_fn = _prepare(lambda dy, *xs: grad_fn_unprep(*xs, dy=dy), [y_dtype] + [z.dtype for z in xs], [None] + [z.shape for z in xs])\n for row in range(y_size):\n dy_data_flat[row] = 1\n grad = _to_numpy(grad_fn(dy_data, *xs)[0])\n grad = _eval_indexed_slices(grad)\n if isinstance(grad, indexed_slices.IndexedSlicesValue):\n for i, v in zip(grad.indices, grad.values):\n c_begin = i * x_val_size\n c_end = c_begin + x_val_size\n jacobian[row, c_begin:c_end] += v.flat\n elif grad is not None:\n jacobian[row, :] = grad.ravel().view(jacobian.dtype)\n dy_data_flat[row] = 0\n if y_size == 0:\n grad = _to_numpy(grad_fn(dy_data, *xs)[0])\n if grad.shape != x.shape:\n raise ValueError('Empty gradient has wrong shape: expected %s, got %s' % (x.shape, grad.shape))\n if np.any(grad):\n raise ValueError('Empty tensor with nonzero gradients')\n logging.vlog(1, 'Theoretical Jacobian =\\n%s', jacobian)\n return jacobian", "docstring": "Computes the theoretical Jacobian for f regarding xs[param].\n\nOne can think of the relation among f, xs and y as y = f(xs).\n\nArgs:\nf: the function.\ny_shape: the shape of the result.\ny_dtype: the dtype of the result.\nxs: a list of tensors.\nparam: the index of the target parameter.\n\nReturns:\nA 2-d numpy array representing the Jacobian. It has \"y_size\" rows\nand \"x_size\" columns where \"x_size\" is the number of elements in xs[param]\nand \"y_size\" is the number of elements in the result.\n\nRaises:\nValueError: If result is empty but the gradient is nonzero.", "source": "github-repos"} {"code": "def CallDhclient(\n interfaces, logger, dhclient_script=None):\n \n logger.info('Enabling the Ethernet interfaces %s.', interfaces)\n\n dhclient_command = ['dhclient']\n\n if dhclient_script and os.path.exists(dhclient_script):\n dhclient_command += ['-sf', dhclient_script]\n\n try:\n subprocess.check_call(dhclient_command + ['-x'] + interfaces)\n subprocess.check_call(dhclient_command + interfaces)\n except subprocess.CalledProcessError:\n logger.warning('Could not enable interfaces %s.', interfaces)", "docstring": "Configure the network interfaces using dhclient.\n\nArgs:\ninterfaces: list of string, the output device names to enable.\nlogger: logger object, used to write to SysLog and serial port.\ndhclient_script: string, the path to a dhclient script used by dhclient.", "source": "juraj-google-style"} {"code": "def expand_path_cfg(path_cfg, alias_dict={}, overriding_kargs={}):\n if isinstance(path_cfg, str):\n return _expand_str(path_cfg, alias_dict, overriding_kargs)\n if isinstance(path_cfg, dict):\n return _expand_dict(path_cfg, alias_dict)\n return _expand_tuple(path_cfg, alias_dict, overriding_kargs)", "docstring": "expand a path config\n\nArgs:\npath_cfg (str, tuple, dict): a config for path\nalias_dict (dict): a dict for aliases\noverriding_kargs (dict): to be used for recursive call", "source": "codesearchnet"} {"code": "def __init__(self, **kwargs):\n \n if isinstance(kwargs.get(\"learning_rate\"), float) and isinstance(kwargs.get(\"learning_rate_decay\"), float):\n KerasModel.__init__(self, **kwargs)\n else:\n KerasModel.__init__(self, **kwargs)\n LRScheduledModel.__init__(self, **kwargs)", "docstring": "Initialize model with given parameters\n\nArgs:\n**kwargs: dictionary of parameters", "source": "juraj-google-style"} {"code": "def groups_createChild(self, *, channel: str, **kwargs) -> SlackResponse:\n \n self._validate_xoxp_token()\n kwargs.update({\"channel\": channel})\n return self.api_call(\"groups.createChild\", http_verb=\"GET\", params=kwargs)", "docstring": "Clones and archives a private channel.\n\nArgs:\nchannel (str): The group id. e.g. 'G1234567890'", "source": "juraj-google-style"} {"code": "def get_gpus(num_gpu=1, worker_index=(- 1)):\n list_gpus = subprocess.check_output(['nvidia-smi', '--list-gpus']).decode()\n logging.debug('all GPUs:\\n{0}'.format(list_gpus))\n gpus = [x for x in list_gpus.split('\\n') if (len(x) > 0)]\n\n def parse_gpu(gpu_str):\n cols = gpu_str.split(' ')\n return (cols[5].split(')')[0], cols[1].split(':')[0])\n gpu_list = [parse_gpu(gpu) for gpu in gpus]\n free_gpus = []\n retries = 0\n while ((len(free_gpus) < num_gpu) and (retries < MAX_RETRIES)):\n smi_output = subprocess.check_output(['nvidia-smi', '--format=csv,noheader,nounits', '--query-compute-apps=gpu_uuid']).decode()\n logging.debug('busy GPUs:\\n{0}'.format(smi_output))\n busy_uuids = [x for x in smi_output.split('\\n') if (len(x) > 0)]\n for (uuid, index) in gpu_list:\n if (uuid not in busy_uuids):\n free_gpus.append(index)\n if (len(free_gpus) < num_gpu):\n logging.warn('Unable to find available GPUs: requested={0}, available={1}'.format(num_gpu, len(free_gpus)))\n retries += 1\n time.sleep((30 * retries))\n free_gpus = []\n logging.info('Available GPUs: {}'.format(free_gpus))\n if (len(free_gpus) < num_gpu):\n smi_output = subprocess.check_output(['nvidia-smi', '--format=csv', '--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory']).decode()\n logging.info(': {0}'.format(smi_output))\n raise Exception('Unable to find {} free GPU(s)\\n{}'.format(num_gpu, smi_output))\n num_available = len(free_gpus)\n if (worker_index == (- 1)):\n random.shuffle(free_gpus)\n proposed_gpus = free_gpus[:num_gpu]\n else:\n if (((worker_index * num_gpu) + num_gpu) > num_available):\n worker_index = ((worker_index * num_gpu) % num_available)\n proposed_gpus = free_gpus[(worker_index * num_gpu):((worker_index * num_gpu) + num_gpu)]\n logging.info('Proposed GPUs: {}'.format(proposed_gpus))\n return ','.join((str(x) for x in proposed_gpus))", "docstring": "Get list of free GPUs according to nvidia-smi.\n\nThis will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.\n\nArgs:\n:num_gpu: number of GPUs desired.\n:worker_index: index \"hint\" for allocation of available GPUs.\n\nReturns:\nComma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.", "source": "codesearchnet"} {"code": "def module_selected(self, module_name, module_ui):\n if (self.current_button == self.module_buttons[module_name]):\n return\n self.module_buttons[module_name].config(bg='\n if (self.current_button is not None):\n self.current_button.config(bg='white')\n self.current_button = self.module_buttons[module_name]\n self.clear_ui()\n try:\n module_ui_frame = ModuleUIBaseFrame(self.module_ui, module_name, module_ui)\n module_ui_frame.grid(column=0, row=0, sticky='W E N S')\n except Exception as e:\n logger.error('Could not load UI for {}'.format(module_name))\n logger.exception(e)\n tk.Label(self.module_ui, text='Could not load UI for {}'.format(module_name)).grid(column=0, row=0, padx=0, pady=0, sticky='W E N S')", "docstring": "Called when a module is selected\n\nArgs:\nmodule_name (str): The name of the module\nmodule_ui: The function to call to create the module's UI", "source": "codesearchnet"} {"code": "def prune(self, regex=r\".*\"):\n \n return filetree(self.root, ignore=self.ignore, regex=regex)", "docstring": "Prune leaves of filetree according to specified\nregular expression.\n\nArgs:\nregex (str): Regular expression to use in pruning tree.", "source": "juraj-google-style"} {"code": "def initialize_all_tables(name='init_all_tables'):\n return tables_initializer(name)", "docstring": "Returns an Op that initializes all tables of the default graph.\n\nArgs:\nname: Optional name for the initialization op.\n\nReturns:\nAn Op that initializes all tables. Note that if there are\nnot tables the returned Op is a NoOp.", "source": "github-repos"} {"code": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool=False):\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, training=training)\n tf.debugging.assert_equal(shape_list(hidden_states), shape_list(residual), message=f'Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}')\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n return (hidden_states, self_attn_weights)", "docstring": "Args:\nhidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\nattention_mask (`tf.Tensor`): attention mask of size\n`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\nlayer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size\n`(encoder_attention_heads,)`", "source": "github-repos"} {"code": "def from_versions(cls, versions):\n range = cls(None)\n range.bounds = []\n for version in dedup(sorted(versions)):\n lower = _LowerBound(version, True)\n upper = _UpperBound(version, True)\n bound = _Bound(lower, upper)\n range.bounds.append(bound)\n return range", "docstring": "Create a range from a list of versions.\n\nThis method creates a range that contains only the given versions and\nno other. Typically the range looks like (for eg) \"==3|==4|==5.1\".\n\nArgs:\nversions: List of Version objects.\n\nReturns:\n`VersionRange` object.", "source": "codesearchnet"} {"code": "def sum2diag(A, D, out=None):\n A = asarray(A, float)\n D = asarray(D, float)\n if (out is None):\n out = copy(A)\n else:\n copyto(out, A)\n einsum('ii->i', out)[:] += D\n return out", "docstring": "r\"\"\"Add values ``D`` to the diagonal of matrix ``A``.\n\nArgs:\nA (array_like): Left-hand side.\nD (array_like or float): Values to add.\nout (:class:`numpy.ndarray`, optional): copy result to.\n\nReturns:\n:class:`numpy.ndarray`: Resulting matrix.", "source": "codesearchnet"} {"code": "def log_softmax(x, axis=-1):\n if any_symbolic_tensors((x,)):\n return LogSoftmax(axis).symbolic_call(x)\n if isinstance(axis, tuple):\n axis_to_keep = [v for v in range(len(x.shape)) if v not in axis]\n x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis))\n x_reshaped = backend.numpy.reshape(x_transposed, (*[x.shape[v] for v in axis_to_keep], -1))\n x = backend.nn.log_softmax(x_reshaped, axis=-1)\n x = backend.numpy.reshape(x, x_transposed.shape)\n x = backend.numpy.transpose(x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis])))\n return x\n else:\n return backend.nn.log_softmax(x, axis=axis)", "docstring": "Log-softmax activation function.\n\nIt is defined as:\n`f(x) = x - max(x) - log(sum(exp(x - max(x))))`\n\nArgs:\nx: Input tensor.\naxis: Integer, axis along which the log-softmax is applied.\nDefaults to `-1`.\n\nReturns:\nA tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-1., 0., 1.])\n>>> x_log_softmax = keras.ops.log_softmax(x)\n>>> print(x_log_softmax)\narray([-2.40760596, -1.40760596, -0.40760596], shape=(3,), dtype=float64)", "source": "github-repos"} {"code": "def add_state_sensors(self, agent_name, sensors):\n \n if isinstance(sensors, list):\n for sensor in sensors:\n self.add_state_sensors(agent_name, sensor)\n else:\n if agent_name not in self._sensor_map:\n self._sensor_map[agent_name] = dict()\n\n self._sensor_map[agent_name][sensors] = self._client.malloc(agent_name + \"_\" + Sensors.name(sensors),\n Sensors.shape(sensors),\n Sensors.dtype(sensors))", "docstring": "Adds a sensor to a particular agent. This only works if the world you are running also includes\nthat particular sensor on the agent.\n\nArgs:\nagent_name (str): The name of the agent to add the sensor to.\nsensors (:obj:`HolodeckSensor` or list of :obj:`HolodeckSensor`): Sensors to add to the agent.\nShould be objects that inherit from :obj:`HolodeckSensor`.", "source": "juraj-google-style"} {"code": "def _parse_description(self, config):\n value = DESCRIPTION_RE.search(config).group('value')\n return dict(description=value)", "docstring": "_parse_description scans the provided configuration block and\nextracts the vrf description value. The return dict is intended to\nbe merged into the response dict.\n\nArgs:\nconfig (str): The vrf configuration block from the nodes\nrunning configuration\n\nReturns:\ndict: resource dict attribute", "source": "codesearchnet"} {"code": "def print_run_bidirectional_blast(reference, other_genome, dbtype, outdir):\n if (dbtype == 'nucl'):\n command = 'blastn'\n elif (dbtype == 'prot'):\n command = 'blastp'\n else:\n raise ValueError('dbtype must be \"nucl\" or \"prot\"')\n (r_folder, r_name, r_ext) = utils.split_folder_and_path(reference)\n (g_folder, g_name, g_ext) = utils.split_folder_and_path(other_genome)\n r_vs_g_name = ((r_name + '_vs_') + g_name)\n r_vs_g = (r_vs_g_name + '_blast.out')\n if (op.exists(op.join(outdir, r_vs_g)) and (os.stat(op.join(outdir, r_vs_g)).st_size != 0)):\n log.debug('{} vs {} BLAST already run'.format(r_name, g_name))\n else:\n cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, reference, g_name, r_vs_g)\n utils.write_torque_script(command=cmd, err=r_vs_g_name, out=r_vs_g_name, name=r_vs_g_name, outfile=(op.join(outdir, r_vs_g_name) + '.sh'), walltime='00:15:00', queue='regular')\n g_vs_r_name = ((g_name + '_vs_') + r_name)\n g_vs_r = (g_vs_r_name + '_blast.out')\n if (op.exists(op.join(outdir, g_vs_r)) and (os.stat(op.join(outdir, g_vs_r)).st_size != 0)):\n log.debug('{} vs {} BLAST already run'.format(g_name, r_name))\n else:\n cmd = '{} -query {} -db {} -outfmt 6 -out {}'.format(command, other_genome, r_name, g_vs_r)\n utils.write_torque_script(command=cmd, err=g_vs_r_name, out=g_vs_r_name, name=g_vs_r_name, outfile=(op.join(outdir, g_vs_r_name) + '.sh'), walltime='00:15:00', queue='regular')", "docstring": "Write torque submission files for running bidirectional blast on a server and print execution command.\n\nArgs:\nreference (str): Path to \"reference\" genome, aka your \"base strain\"\nother_genome (str): Path to other genome which will be BLASTed to the reference\ndbtype (str): \"nucl\" or \"prot\" - what format your genome files are in\noutdir (str): Path to folder where Torque scripts should be placed", "source": "codesearchnet"} {"code": "def log(self, msg):\n \n\n time = self.get_time()\n\n msg = \"{:s}\\t {:s}\".format(time, msg)\n\n self.history.append(msg)\n self.history_model.insertRow(0, QtGui.QStandardItem(msg))", "docstring": "log function\nArgs:\nmsg: the text message to be logged", "source": "juraj-google-style"} {"code": "def new_space(self, name=None, bases=None, formula=None, *, refs=None, source=None, is_derived=False, prefix=''):\n from modelx.core.space import StaticSpaceImpl\n if (name is None):\n name = self.spacenamer.get_next(self.namespace, prefix)\n if (name in self.namespace):\n raise ValueError((\"Name '%s' already exists.\" % name))\n if ((not prefix) and (not is_valid_name(name))):\n raise ValueError((\"Invalid name '%s'.\" % name))\n space = self._new_space(name=name, formula=formula, refs=refs, source=source, is_derived=is_derived)\n self._set_space(space)\n self.model.spacegraph.add_space(space)\n if (bases is not None):\n if isinstance(bases, StaticSpaceImpl):\n bases = [bases]\n space.add_bases(bases)\n return space", "docstring": "Create a new child space.\n\nArgs:\nname (str): Name of the space. If omitted, the space is\ncreated automatically.\nbases: If specified, the new space becomes a derived space of\nthe `base` space.\nformula: Function whose parameters used to set space parameters.\nrefs: a mapping of refs to be added.\narguments: ordered dict of space parameter names to their values.\nsource: A source module from which cell definitions are read.\nprefix: Prefix to the autogenerated name when name is None.", "source": "codesearchnet"} {"code": "def get_naive(dt):\n \n if not dt.tzinfo:\n return dt\n if hasattr(dt, \"asdatetime\"):\n return dt.asdatetime()\n return dt.replace(tzinfo=None)", "docstring": "Gets a naive datetime from a datetime.\n\ndatetime_tz objects can't just have tzinfo replaced with None, you need to\ncall asdatetime.\n\nArgs:\ndt: datetime object.\n\nReturns:\ndatetime object without any timezone information.", "source": "juraj-google-style"} {"code": "def create_cosmosdb_account(access_token, subscription_id, rgname, account_name, location, cosmosdb_kind):\n endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.DocumentDB/databaseAccounts/', account_name, '?api-version=', COSMOSDB_API])\n cosmosdb_body = {'location': location, 'kind': cosmosdb_kind, 'properties': {'databaseAccountOfferType': 'Standard', 'locations': [{'failoverPriority': 0, 'locationName': location}]}}\n body = json.dumps(cosmosdb_body)\n return do_put(endpoint, body, access_token)", "docstring": "Create a new Cosmos DB account in the named resource group, with the named location.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nrgname (str): Azure resource group name.\naccount_name (str): Name of the new Cosmos DB account.\nlocation (str): Azure data center location. E.g. westus.\ncosmosdb_kind (str): Database type. E.g. GlobalDocumentDB.\n\nReturns:\nHTTP response. JSON body of storage account properties.", "source": "codesearchnet"} {"code": "def get_attribute_id(self, attribute_key):\n attribute = self.attribute_key_map.get(attribute_key)\n has_reserved_prefix = attribute_key.startswith(RESERVED_ATTRIBUTE_PREFIX)\n if attribute:\n if has_reserved_prefix:\n self.logger.warning(('Attribute %s unexpectedly has reserved prefix %s; using attribute ID instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX)))\n return attribute.id\n if has_reserved_prefix:\n return attribute_key\n self.logger.error(('Attribute \"%s\" is not in datafile.' % attribute_key))\n self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_ERROR))\n return None", "docstring": "Get attribute ID for the provided attribute key.\n\nArgs:\nattribute_key: Attribute key for which attribute is to be fetched.\n\nReturns:\nAttribute ID corresponding to the provided attribute key.", "source": "codesearchnet"} {"code": "def generate_example(config, ext='json'):\n \n template_name = 'example.{0}'.format(ext.lower())\n template = ENV.get_template(template_name)\n return template.render(config=config)", "docstring": "Generate an example file based on the given Configuration object.\n\nArgs:\nconfig (confpy.core.configuration.Configuration): The configuration\nobject on which to base the example.\next (str): The file extension to render. Choices: JSON and INI.\n\nReturns:\nstr: The text of the example file.", "source": "juraj-google-style"} {"code": "def maybe_reduce(nodes):\n (_, num_nodes) = nodes.shape\n if (num_nodes < 2):\n return (False, nodes)\n elif (num_nodes == 2):\n projection = _PROJECTION0\n denom = _PROJ_DENOM0\n elif (num_nodes == 3):\n projection = _PROJECTION1\n denom = _PROJ_DENOM1\n elif (num_nodes == 4):\n projection = _PROJECTION2\n denom = _PROJ_DENOM2\n elif (num_nodes == 5):\n projection = _PROJECTION3\n denom = _PROJ_DENOM3\n else:\n raise _helpers.UnsupportedDegree((num_nodes - 1), supported=(0, 1, 2, 3, 4))\n projected = (_helpers.matrix_product(nodes, projection) / denom)\n relative_err = projection_error(nodes, projected)\n if (relative_err < _REDUCE_THRESHOLD):\n return (True, reduce_pseudo_inverse(nodes))\n else:\n return (False, nodes)", "docstring": "r\"\"\"Reduce nodes in a curve if they are degree-elevated.\n\n.. note::\n\nThis is a helper for :func:`_full_reduce`. Hence there is no\ncorresponding Fortran speedup.\n\nWe check if the nodes are degree-elevated by projecting onto the\nspace of degree-elevated curves of the same degree, then comparing\nto the projection. We form the projection by taking the corresponding\n(right) elevation matrix :math:`E` (from one degree lower) and forming\n:math:`E^T \\left(E E^T\\right)^{-1} E`.\n\nArgs:\nnodes (numpy.ndarray): The nodes in the curve.\n\nReturns:\nTuple[bool, numpy.ndarray]: Pair of values. The first indicates\nif the ``nodes`` were reduced. The second is the resulting nodes,\neither the reduced ones or the original passed in.\n\nRaises:\n.UnsupportedDegree: If the curve is degree 5 or higher.", "source": "codesearchnet"} {"code": "def get_course_completions(self, enterprise_customer, days):\n \n return PersistentCourseGrade.objects.filter(\n passed_timestamp__gt=datetime.datetime.now() - datetime.timedelta(days=days)\n ).filter(\n user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True)\n )", "docstring": "Get course completions via PersistentCourseGrade for all the learners of given enterprise customer.\n\nArguments:\nenterprise_customer (EnterpriseCustomer): Include Course enrollments for learners\nof this enterprise customer.\ndays (int): Include course enrollment of this number of days.\n\nReturns:\n(list): A list of PersistentCourseGrade objects.", "source": "juraj-google-style"} {"code": "def speed_clustering(clf, points, min_time):\n changepoints = detect_changepoints(points, min_time)\n cp_info = []\n for i in range(0, (len(changepoints) - 1)):\n from_index = changepoints[i]\n to_index = changepoints[(i + 1)]\n info = classify(clf, points[from_index:to_index], min_time, from_index, to_index)\n if info:\n cp_info.append(info)\n return group_modes(cp_info)", "docstring": "Transportation mode infering, based on changepoint segmentation\n\nArgs:\nclf (:obj:`Classifier`): Classifier to use\npoints (:obj:`list` of :obj:`Point`)\nmin_time (float): Min time, in seconds, before do another segmentation\nReturns:\n:obj:`list` of :obj:`dict`", "source": "codesearchnet"} {"code": "def insert(parent: ScheduleComponent, time: int, child: ScheduleComponent, name: str=None) -> Schedule:\n return union(parent, (time, child), name=name)", "docstring": "Return a new schedule with the `child` schedule inserted into the `parent` at `start_time`.\n\nArgs:\nparent: Schedule to be inserted into\ntime: Time to be inserted defined with respect to `parent`\nchild: Schedule to insert\nname: Name of the new schedule. Defaults to name of parent", "source": "codesearchnet"} {"code": "def _get_populate_from_value(instance, field_name: Union[str, Tuple[str]], language: str):\n \n\n if callable(field_name):\n return field_name(instance)\n\n def get_field_value(name):\n value = resolve_object_property(instance, name)\n with translation.override(language):\n return str(value)\n\n if isinstance(field_name, tuple) or isinstance(field_name, list):\n value = '-'.join([\n value\n for value in [get_field_value(name) for name in field_name]\n if value\n ])\n return value\n\n return get_field_value(field_name)", "docstring": "Gets the value to create a slug from in the specified language.\n\nArguments:\ninstance:\nThe model that the field resides on.\n\nfield_name:\nThe name of the field to generate a slug for.\n\nlanguage:\nThe language to generate the slug for.\n\nReturns:\nThe text to generate a slug for.", "source": "juraj-google-style"} {"code": "def _build_update_ops(self, mean, variance, is_training):\n \n\n def build_update_ops():\n \n\n update_mean_op = moving_averages.assign_moving_average(\n variable=self._moving_mean,\n value=tf.reshape(mean, (self._num_channels,)),\n decay=self._decay_rate,\n zero_debias=False,\n name=\"update_moving_mean\").op\n\n update_variance_op = moving_averages.assign_moving_average(\n variable=self._moving_variance,\n value=tf.reshape(variance, (self._num_channels,)),\n decay=self._decay_rate,\n zero_debias=False,\n name=\"update_moving_variance\").op\n\n return update_mean_op, update_variance_op\n\n def build_no_ops():\n return (tf.no_op(), tf.no_op())\n\n \n \n is_training_const = utils.constant_value(is_training)\n if is_training_const is None or is_training_const:\n update_mean_op, update_variance_op = utils.smart_cond(\n is_training,\n build_update_ops,\n build_no_ops,\n )\n return (update_mean_op, update_variance_op)\n else:\n return None", "docstring": "Builds the moving average update ops when using moving variance.\n\nArgs:\nmean: The mean value to update with.\nvariance: The variance value to update with.\nis_training: Boolean Tensor to indicate if we're currently in\ntraining mode.\n\nReturns:\nTuple of `(update_mean_op, update_variance_op)` when `is_training` is or\ncould be `True`. Returns `None` when `is_training=False`.", "source": "juraj-google-style"} {"code": "def get_default(__func: Callable, __arg: str) -> str:\n \n return signature(__func).parameters[__arg].default", "docstring": "Fetch default value for a function argument\n\nArgs:\n__func: Function to inspect\n__arg: Argument to extract default value for", "source": "juraj-google-style"} {"code": "def _ConvertRowToUnicode(self, parser_mediator, row):\n \n for key, value in iter(row.items()):\n if isinstance(value, py2to3.UNICODE_TYPE):\n continue\n\n try:\n row[key] = value.decode(self._encoding)\n except UnicodeDecodeError:\n replaced_value = value.decode(self._encoding, errors='replace')\n parser_mediator.ProduceExtractionWarning(\n 'error decoding DSV value: {0:s} as {1:s}, characters have been '\n 'replaced in {2:s}'.format(key, self._encoding, replaced_value))\n row[key] = replaced_value\n\n return row", "docstring": "Converts all strings in a DSV row dict to Unicode.\n\nArgs:\nparser_mediator (ParserMediator): mediates interactions between parsers\nand other components, such as storage and dfvfs.\nrow (dict[str, bytes]): a row from a DSV file, where the dictionary\nkey contains the column name and the value a binary string.\n\nReturns:\ndict[str, str]: a row from the DSV file, where the dictionary key\ncontains the column name and the value a Unicode string.", "source": "juraj-google-style"} {"code": "def update(self, file_path, new_data={}, **kwargs):\n \n\n data = new_data.copy()\n file_path = file_path.replace('/', '%2F')\n data['file_path'] = file_path\n path = '%s/%s' % (self.path, file_path)\n self._check_missing_update_attrs(data)\n return self.gitlab.http_put(path, post_data=data, **kwargs)", "docstring": "Update an object on the server.\n\nArgs:\nid: ID of the object to update (can be None if not required)\nnew_data: the update data for the object\n**kwargs: Extra options to send to the server (e.g. sudo)\n\nReturns:\ndict: The new object data (*not* a RESTObject)\n\nRaises:\nGitlabAuthenticationError: If authentication is not correct\nGitlabUpdateError: If the server cannot perform the request", "source": "juraj-google-style"} {"code": "def points_random_3d(count, range_x=(-10.0, 10.0), range_y=(-10.0, 10.0), range_z=(-10.0, 10.0), seed=None) -> VAO:\n \n random.seed(seed)\n\n def gen():\n for _ in range(count):\n yield random.uniform(*range_x)\n yield random.uniform(*range_y)\n yield random.uniform(*range_z)\n\n data = numpy.fromiter(gen(), count=count * 3, dtype=numpy.float32)\n\n vao = VAO(\"geometry:points_random_3d\", mode=moderngl.POINTS)\n vao.buffer(data, '3f', ['in_position'])\n\n return vao", "docstring": "Generates random positions inside a confied box.\n\nArgs:\ncount (int): Number of points to generate\n\nKeyword Args:\nrange_x (tuple): min-max range for x axis: Example (-10.0. 10.0)\nrange_y (tuple): min-max range for y axis: Example (-10.0. 10.0)\nrange_z (tuple): min-max range for z axis: Example (-10.0. 10.0)\nseed (int): The random seed\n\nReturns:\nA :py:class:`demosys.opengl.vao.VAO` instance", "source": "juraj-google-style"} {"code": "def register(self, address, retry=True):\n logger.debug(('<%s> Sending REGISTER request to: %s' % (str(self.cuuid), str(address))))\n if (not self.listener.listening):\n logger.warning('Neteria client is not listening.')\n message = {'method': 'REGISTER', 'cuuid': str(self.cuuid)}\n if self.encryption:\n message['encryption'] = [self.encryption.n, self.encryption.e]\n self.listener.send_datagram(serialize_data(message, self.compression, encryption=False), address)\n if retry:\n self.register_retries = 0\n self.listener.call_later(self.timeout, self.retransmit, {'method': 'REGISTER', 'address': address})", "docstring": "This function will send a register packet to the discovered Neteria\nserver.\n\nArgs:\naddress (tuple): A tuple of the (address, port) to send the register\nrequest to.\nretry (boolean): Whether or not we want to reset the current number\nof registration retries to 0.\n\nReturns:\nNone\n\nExamples:\n>>> address\n('192.168.0.20', 40080)", "source": "codesearchnet"} {"code": "def attention_bias_to_padding(attention_bias, cast_fn=tf.to_float):\n \n \n \n return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2])", "docstring": "Inverse of attention_bias_ignore_padding().\n\nArgs:\nattention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as\nreturned by attention_bias_ignore_padding().\ncast_fn: function used to cast to output type.\n\nReturns:\na Tensor with shape [batch, memory_length] with 1.0 in padding positions\nand 0.0 in non-padding positions. Type is determined by cast_fn.", "source": "juraj-google-style"} {"code": "def network_connect(self, value):\n \n if value == self._defaults['networkConnect'] and 'networkConnect' in self._values:\n del self._values['networkConnect']\n else:\n self._values['networkConnect'] = value", "docstring": "The network_connect property.\n\nArgs:\nvalue (string). the property value.", "source": "juraj-google-style"} {"code": "def adjust_saturation(img, saturation_factor):\n \n if not _is_pil_image(img):\n raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n enhancer = ImageEnhance.Color(img)\n img = enhancer.enhance(saturation_factor)\n return img", "docstring": "Adjust color saturation of an image.\n\nArgs:\nimg (PIL Image): PIL Image to be adjusted.\nsaturation_factor (float): How much to adjust the saturation. 0 will\ngive a black and white image, 1 will give the original image while\n2 will enhance the saturation by a factor of 2.\n\nReturns:\nPIL Image: Saturation adjusted image.", "source": "juraj-google-style"} {"code": "def convert(self):\n assert not self._converted\n if self._input_graph_def:\n self._convert_graph_def()\n else:\n self._convert_saved_model()\n return self._converted_graph_def", "docstring": "Run the TF-TRT conversion.\n\nReturns:\nThe converted GraphDef for TF 1.x.", "source": "github-repos"} {"code": "def put(self, path, value, timeout=None, event_timeout=None):\n \n future = self.put_async(path, value)\n self.wait_all_futures(\n future, timeout=timeout, event_timeout=event_timeout)\n return future.result()", "docstring": "Puts a value to a path and returns when it completes\n\nArgs:\npath (list): The path to put to\nvalue (object): The value to set\ntimeout (float): time in seconds to wait for responses, wait forever\nif None\nevent_timeout: maximum time in seconds to wait between each response\nevent, wait forever if None\n\nReturns:\nThe value after the put completes", "source": "juraj-google-style"} {"code": "def heightmap_get_interpolated_value(hm: np.ndarray, x: float, y: float) -> float:\n return float(lib.TCOD_heightmap_get_interpolated_value(_heightmap_cdata(hm), x, y))", "docstring": "Return the interpolated height at non integer coordinates.\n\nArgs:\nhm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.\nx (float): A floating point x coordinate.\ny (float): A floating point y coordinate.\n\nReturns:\nfloat: The value at ``x``, ``y``.", "source": "codesearchnet"} {"code": "def lresolve(self, path):\n path = make_string_path(path)\n if (path == self.root.name):\n return self.root\n path = self._path_without_trailing_separators(path)\n path = self._original_path(path)\n (parent_directory, child_name) = self.splitpath(path)\n if (not parent_directory):\n parent_directory = self.cwd\n try:\n parent_obj = self.resolve(parent_directory)\n assert parent_obj\n if (not isinstance(parent_obj, FakeDirectory)):\n if ((not self.is_windows_fs) and isinstance(parent_obj, FakeFile)):\n self.raise_io_error(errno.ENOTDIR, path)\n self.raise_io_error(errno.ENOENT, path)\n return parent_obj.get_entry(child_name)\n except KeyError:\n self.raise_io_error(errno.ENOENT, path)", "docstring": "Search for the specified object, resolving only parent links.\n\nThis is analogous to the stat/lstat difference. This resolves links\n*to* the object but not of the final object itself.\n\nArgs:\npath: Specifies target FakeFile object to retrieve.\n\nReturns:\nThe FakeFile object corresponding to path.\n\nRaises:\nIOError: if the object is not found.", "source": "codesearchnet"} {"code": "def append(self, event, category=None):\n \n date = datetime.datetime.now()\n self.store.insert(0, (date, event, category))\n if len(self.store) > self.size:\n del self.store[-1]", "docstring": "Adds a new event to the trace store.\nThe event may hava a category\n\nArgs:\nevent (spade.message.Message): the event to be stored\ncategory (str, optional): a category to classify the event (Default value = None)", "source": "juraj-google-style"} {"code": "def _matmul_2d(a, b, **kwargs):\n ragged_err = 'The matrices in `a` and `b` may not be ragged in their innermost dimension.'\n checks = []\n if isinstance(a, ragged_tensor.RaggedTensor):\n original_size = array_ops.size(a.flat_values)\n a = a.to_tensor()\n checks.append(check_ops.assert_equal(original_size, array_ops.size(a), message=ragged_err))\n if isinstance(b, ragged_tensor.RaggedTensor):\n original_size = array_ops.size(b.flat_values)\n b = b.to_tensor()\n checks.append(check_ops.assert_equal(original_size, array_ops.size(b), message=ragged_err))\n with ops.control_dependencies(checks):\n return math_ops.matmul(a, b, **kwargs)", "docstring": "Multiplies potentially ragged 2D tensors.\n\nArgs:\na: A 2D Tensor or RaggedTensor with `shape=[I, J]`\nb: A 2D Tensor or RaggedTensor with `shape=[J, K]`\n**kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).\n\nReturns:\nA 2D Tensor with `shape=[I, K]`.", "source": "github-repos"} {"code": "def generate_output_notices(self, source, key, val, line='1', hr=True, show_name=False, colorize=True):\n output = generate_output(line=line, short=(HR_RDAP[source][key]['_short'] if hr else key), name=(HR_RDAP[source][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize)\n if (val is not None):\n count = 0\n for item in val:\n title = item['title']\n description = item['description']\n links = item['links']\n if (count > 0):\n output += generate_output(line=str((int(line) + 1)), is_parent=True, colorize=colorize)\n output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['title']['_short'] if hr else 'title'), name=(HR_RDAP_COMMON[key]['title']['_name'] if (hr and show_name) else None), value=title, colorize=colorize)\n output += generate_output(line=str((int(line) + 1)), short=(HR_RDAP_COMMON[key]['description']['_short'] if hr else 'description'), name=(HR_RDAP_COMMON[key]['description']['_name'] if (hr and show_name) else None), value=description.replace('\\n', '\\n{0}'.format(generate_output(line='3'))), colorize=colorize)\n output += self.generate_output_list(source=source, key='links', val=links, line=str((int(line) + 1)), hr=hr, show_name=show_name, colorize=colorize)\n count += 1\n return output", "docstring": "The function for generating CLI output RDAP notices results.\n\nArgs:\nsource (:obj:`str`): The parent key 'network' or 'objects'\n(required).\nkey (:obj:`str`): The event key 'events' or 'events_actor'\n(required).\nval (:obj:`dict`): The event dictionary (required).\nline (:obj:`str`): The line number (0-4). Determines indentation.\nDefaults to '0'.\nhr (:obj:`bool`): Enable human readable key translations. Defaults\nto True.\nshow_name (:obj:`bool`): Show human readable name (default is to\nonly show short). Defaults to False.\ncolorize (:obj:`bool`): Colorize the console output with ANSI\ncolors. Defaults to True.\n\nReturns:\nstr: The generated output.", "source": "codesearchnet"} {"code": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\nattention_mask (`torch.Tensor`):\nA 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n`(batch_size, 1, query_length, key_value_length)`.\nsequence_length (`int`):\nThe sequence length being processed.\ntarget_length (`int`):\nThe target length: when generating with static cache, the mask should be as long as the static cache,\nto account for the 0 padding, the part of the cache that is not filled yet.\ndtype (`torch.dtype`):\nThe dtype to use for the 4D attention mask.\ncache_position (`torch.Tensor`):\nIndices depicting the position of the input sequence tokens in the sequence.\nbatch_size (`torch.Tensor`):\nBatch size.", "source": "github-repos"} {"code": "def check_candidate_exists(self, basepath, candidates):\n checked = []\n for item in candidates:\n abspath = os.path.join(basepath, item)\n if os.path.exists(abspath):\n checked.append(abspath)\n return checked", "docstring": "Check that at least one candidate exist into a directory.\n\nArgs:\nbasepath (str): Directory path where to search for candidate.\ncandidates (list): List of candidate file paths.\n\nReturns:\nlist: List of existing candidates.", "source": "codesearchnet"} {"code": "def layer_postprocess(layer_input, layer_output, hparams):\n \n return layer_prepostprocess(\n layer_input,\n layer_output,\n sequence=hparams.layer_postprocess_sequence,\n dropout_rate=hparams.layer_prepostprocess_dropout,\n norm_type=hparams.norm_type,\n depth=None,\n epsilon=hparams.norm_epsilon,\n dropout_broadcast_dims=comma_separated_string_to_integer_list(\n getattr(hparams, \"layer_prepostprocess_dropout_broadcast_dims\", \"\")),\n default_name=\"layer_postprocess\")", "docstring": "Apply layer postprocessing.\n\nSee layer_prepostprocess() for details.\n\nA hyperparameters object is passed for convenience. The hyperparameters\nthat may be used are:\n\nlayer_postprocess_sequence\nlayer_prepostprocess_dropout\nnorm_type\nhidden_size\nnorm_epsilon\n\nArgs:\nlayer_input: a Tensor\nlayer_output: a Tensor\nhparams: a hyperparameters object.\n\nReturns:\na Tensor", "source": "juraj-google-style"} {"code": "def variable_dtype(self):\n return self._variable_dtype", "docstring": "The variable dtype of this policy.\n\nThis is the dtype layers will create their variables in, unless a layer\nexplicitly chooses a different dtype. If this is different than\n`DTypePolicy.compute_dtype`, Layers will cast variables to\nthe compute dtype to avoid type errors.\n\nVariable regularizers are run in the variable dtype, not the compute\ndtype.\n\nReturns:\nThe variable dtype of this policy, as a string.", "source": "github-repos"} {"code": "def write_to(self, content, content_type):\n \n try:\n self._api.object_upload(self._bucket, self._key, content, content_type)\n except Exception as e:\n raise e", "docstring": "Writes text content to this item.\n\nArgs:\ncontent: the text content to be written.\ncontent_type: the type of text content.\nRaises:\nException if there was an error requesting the item's content.", "source": "juraj-google-style"} {"code": "def upload_rows(bq_legacy_client: BigQueryLegacyClient, table_metadata: TableMetadata, rows: List[Dict[str, Any]]) -> List[str]:\n if len(rows) > 0:\n result = bq_legacy_client.insert_rows_json(table_metadata.full_table_id, rows)\n for row in result:\n if 'errors' in row and len(row['errors']) > 0:\n return [str(e) for e in row['errors']]\n return []", "docstring": "Upload a List of Dict rows to a BigQuery table, by appending.\n\nNote: Does NOT support nested columns.\n\nArgs:\n* bq_legacy_client: BigQuery Legacy API client\n* table_metadata: TableMetadata object\n* column: Column name to select\n\nReturns:\n* List of errors, if any", "source": "github-repos"} {"code": "def startProducing(self, consumer):\n self._consumer = consumer\n self._current_deferred = defer.Deferred()\n self._sent = 0\n self._paused = False\n if (not hasattr(self, '_chunk_headers')):\n self._build_chunk_headers()\n if self._data:\n block = ''\n for field in self._data:\n block += self._chunk_headers[field]\n block += self._data[field]\n block += '\\r\\n'\n self._send_to_consumer(block)\n if self._files:\n self._files_iterator = self._files.iterkeys()\n self._files_sent = 0\n self._files_length = len(self._files)\n self._current_file_path = None\n self._current_file_handle = None\n self._current_file_length = None\n self._current_file_sent = 0\n result = self._produce()\n if result:\n return result\n else:\n return defer.succeed(None)\n return self._current_deferred", "docstring": "Start producing.\n\nArgs:\nconsumer: Consumer", "source": "codesearchnet"} {"code": "def ones_comp_sum16(num1: int, num2: int) -> int:\n \n\n carry = 1 << 16\n result = num1 + num2\n return result if result < carry else result + 1 - carry", "docstring": "Calculates the 1's complement sum for 16-bit numbers.\n\nArgs:\nnum1: 16-bit number.\nnum2: 16-bit number.\n\nReturns:\nThe calculated result.", "source": "juraj-google-style"} {"code": "def get_version_details(self, version_name):\n name = ('%s/versions/%s' % (self._full_model_name, version_name))\n return self._api.projects().models().versions().get(name=name).execute()", "docstring": "Get details of a version.\n\nArgs:\nversion: the name of the version in short form, such as \"v1\".\nReturns: a dictionary containing the version details.", "source": "codesearchnet"} {"code": "def on_test_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `evaluate` methods.\n\nAlso called at the beginning of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\nbatch: Integer, index of batch within the current epoch.\nlogs: Dict, contains the return value of `model.test_step`. Typically,\nthe values of the `Model`'s metrics are returned. Example:\n`{'loss': 0.2, 'accuracy': 0.7}`.", "source": "github-repos"} {"code": "def add_child(self, total, prefix=None, width=300):\n self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)\n return self.child_bar", "docstring": "Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be\neasily updated).\n\nArgs:\ntotal (`int`): The number of iterations for the child progress bar.\nprefix (`str`, *optional*): A prefix to write on the left of the progress bar.\nwidth (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar.", "source": "github-repos"} {"code": "def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None):\n root_dir = _get_package_name(book_id=book_id, prefix=prefix)\n if os.path.exists(root_dir):\n shutil.rmtree(root_dir)\n os.mkdir(root_dir)\n original_dir = os.path.join(root_dir, 'original')\n metadata_dir = os.path.join(root_dir, 'metadata')\n os.mkdir(original_dir)\n os.mkdir(metadata_dir)\n return (root_dir, original_dir, metadata_dir)", "docstring": "Create hierarchy of directories, at it is required in specification.\n\n`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`\nand :func:`_get_package_name`.\n\n`orig_dir` is path to the directory, where the data files are stored.\n\n`metadata_dir` is path to the directory with MODS metadata.\n\nArgs:\nbook_id (str, default None): UUID of the book.\nprefix (str, default settings.TEMP_DIR): Where the package will be\nstored. Default :attr:`settings.TEMP_DIR`.\n\nWarning:\nIf the `root_dir` exists, it is REMOVED!\n\nReturns:\nlist of str: root_dir, orig_dir, metadata_dir", "source": "codesearchnet"} {"code": "def __getitem___(self, py_type):\n raise NotImplementedError", "docstring": "Given a type creates a TypeConstraint instance parameterized by the type.\n\nThis function serves as a factory function which creates TypeConstraint\ninstances. Additionally, implementations by sub-classes should perform any\nsanity checking of the passed types in this method in order to rule-out\ndisallowed behavior. Such as, attempting to create a TypeConstraint whose\nparameterized type is actually an object instance.\n\nArgs:\npy_type: An instance of a Python type or TypeConstraint.\n\nReturns: An instance of a custom TypeConstraint for this CompositeTypeHint.\n\nRaises:\nTypeError: If the passed type violates any contraints for this particular\nTypeHint.", "source": "github-repos"} {"code": "def _BiasAddGrad(op: ops.Operation, received_grad):\n try:\n data_format = op.get_attr('data_format')\n except ValueError:\n data_format = None\n return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad, data_format=data_format))", "docstring": "Return the gradients for the 2 inputs of bias_op.\n\nThe first input of unused_bias_op is the tensor t, and its gradient is\njust the gradient the unused_bias_op received.\n\nThe second input of unused_bias_op is the bias vector which has one fewer\ndimension than \"received_grad\" (the batch dimension.) Its gradient is the\nreceived gradient Summed on the batch dimension, which is the first dimension.\n\nArgs:\nop: The BiasOp for which we need to generate gradients.\nreceived_grad: Tensor. The gradients passed to the BiasOp.\n\nReturns:\nTwo tensors, the first one for the \"tensor\" input of the BiasOp,\nthe second one for the \"bias\" input of the BiasOp.", "source": "github-repos"} {"code": "def update_reserved_vlan_range(self, id_or_uri, vlan_pool, force=False):\n \n uri = self._client.build_uri(id_or_uri) + \"/reserved-vlan-range\"\n return self._client.update(resource=vlan_pool, uri=uri, force=force, default_values=self.DEFAULT_VALUES)", "docstring": "Updates the reserved vlan ID range for the fabric.\n\nNote:\nThis method is only available on HPE Synergy.\n\nArgs:\nid_or_uri: ID or URI of fabric.\nvlan_pool (dict): vlan-pool data to update.\nforce: If set to true, the operation completes despite any problems with network connectivity or errors\non the resource itself. The default is false.\n\nReturns:\ndict: The fabric", "source": "juraj-google-style"} {"code": "def build_ellipse(X, Y):\n \n x_mean = np.mean(X)\n y_mean = np.mean(Y)\n\n cov_matrix = np.cov(np.vstack((X, Y)))\n U, s, V = linalg.svd(cov_matrix, full_matrices=False)\n\n chi_95 = np.sqrt(4.61) \n width = np.sqrt(cov_matrix[0][0]) * chi_95 * 2\n height = np.sqrt(cov_matrix[1][1]) * chi_95 * 2\n\n eigenvector = V.T[0]\n angle = np.arctan(eigenvector[1] / eigenvector[0])\n\n return x_mean, y_mean, width, height, angle", "docstring": "Construct ellipse coordinates from two arrays of numbers.\n\nArgs:\nX (1D array_like)\nY (1D array_like)\n\nReturns:\nfloat: The mean of `X`.\nfloat: The mean of `Y`.\nfloat: The width of the ellipse.\nfloat: The height of the ellipse.\nfloat: The angle of orientation of the ellipse.", "source": "juraj-google-style"} {"code": "def drift_fn(self):\n return self._drift_fn", "docstring": "Python callable calculating instantaneous drift.\n\nThe callable should accept two real `Tensor` arguments of the same dtype.\nThe first argument is the scalar time t, the second argument is the value of\nIto process X - `Tensor` of shape\n`batch_shape + sample_shape + [dim]`, where `batch_shape` represents a batch\nof models and `sample_shape` represents samples for each of the models. The\nresult is value of drift a(t, X). The return value of the callable is a real\n`Tensor` of the same dtype as the input arguments and of shape\n`batch_shape + sample_shape + [dim]`. For example, `sample_shape` can stand\nfor `[num_samples]` for Monte Carlo sampling, or\n`[num_grid_points_1, ..., num_grid_points_dim]` for Finite Difference\nsolvers.\n\nReturns:\nThe instantaneous drift rate callable.", "source": "github-repos"} {"code": "def sequence_edit_distance(predictions, labels, weights_fn=common_layers.weights_nonzero):\n if (weights_fn is not common_layers.weights_nonzero):\n raise ValueError('Only weights_nonzero can be used for this metric.')\n with tf.variable_scope('edit_distance', values=[predictions, labels]):\n predictions = tf.to_int32(tf.squeeze(tf.argmax(predictions, axis=(- 1)), axis=(2, 3)))\n nonzero_idx = tf.where(tf.not_equal(predictions, 0))\n sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(predictions, nonzero_idx), tf.shape(predictions, out_type=tf.int64))\n labels = tf.squeeze(labels, axis=(2, 3))\n nonzero_idx = tf.where(tf.not_equal(labels, 0))\n label_sparse_outputs = tf.SparseTensor(nonzero_idx, tf.gather_nd(labels, nonzero_idx), tf.shape(labels, out_type=tf.int64))\n distance = tf.reduce_sum(tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False))\n reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0])\n return ((distance / reference_length), reference_length)", "docstring": "Average edit distance, ignoring padding 0s.\n\nThe score returned is the edit distance divided by the total length of\nreference truth and the weight returned is the total length of the truth.\n\nArgs:\npredictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and\ntype tf.float32 representing the logits, 0-padded.\nlabels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32\nrepresenting the labels of same length as logits and 0-padded.\nweights_fn: ignored. The weights returned are the total length of the ground\ntruth labels, excluding 0-paddings.\n\nReturns:\n(edit distance / reference length, reference length)\n\nRaises:\nValueError: if weights_fn is not common_layers.weights_nonzero.", "source": "codesearchnet"} {"code": "def convert(framework: str, model: str, output: Path, opset: int, tokenizer: Optional[str]=None, use_external_format: bool=False, pipeline_name: str='feature-extraction', **model_kwargs):\n warnings.warn('The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of Transformers', FutureWarning)\n print(f'ONNX opset version set to: {opset}')\n nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)\n if not output.parent.exists():\n print(f'Creating folder {output.parent}')\n makedirs(output.parent.as_posix())\n elif len(listdir(output.parent.as_posix())) > 0:\n raise Exception(f'Folder {output.parent.as_posix()} is not empty, aborting conversion')\n if framework == 'pt':\n convert_pytorch(nlp, opset, output, use_external_format)\n else:\n convert_tensorflow(nlp, opset, output)", "docstring": "Convert the pipeline object to the ONNX Intermediate Representation (IR) format\n\nArgs:\nframework: The framework the pipeline is backed by (\"pt\" or \"tf\")\nmodel: The name of the model to load for the pipeline\noutput: The path where the ONNX graph will be stored\nopset: The actual version of the ONNX operator set to use\ntokenizer: The name of the model to load for the pipeline, default to the model's name if not provided\nuse_external_format:\nSplit the model definition from its parameters to allow model bigger than 2GB (PyTorch only)\npipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)\nmodel_kwargs: Keyword arguments to be forwarded to the model constructor\n\nReturns:", "source": "github-repos"} {"code": "def scan_chain_len(self, scan_chain):\n \n res = self._dll.JLINKARM_MeasureSCLen(scan_chain)\n if res < 0:\n raise errors.JLinkException(res)\n return res", "docstring": "Retrieves and returns the number of bits in the scan chain.\n\nArgs:\nself (JLink): the ``JLink`` instance\nscan_chain (int): scan chain to be measured\n\nReturns:\nNumber of bits in the specified scan chain.\n\nRaises:\nJLinkException: on error.", "source": "juraj-google-style"} {"code": "def __convertRlocToRouterId(self, xRloc16):\n \n routerList = []\n routerList = self.__sendCommand('router list')[0].split()\n print routerList\n print xRloc16\n\n for index in routerList:\n router = []\n cmd = 'router %s' % index\n router = self.__sendCommand(cmd)\n\n for line in router:\n if 'Done' in line:\n break\n elif 'Router ID' in line:\n routerid = line.split()[2]\n elif 'Rloc' in line:\n rloc16 = line.split()[1]\n else:\n pass\n\n \n if isinstance(xRloc16, str):\n rloc16 = '0x' + rloc16\n if rloc16 == xRloc16:\n return routerid\n elif isinstance(xRloc16, int):\n if int(rloc16, 16) == xRloc16:\n return routerid\n else:\n pass\n\n return None", "docstring": "mapping Rloc16 to router id\n\nArgs:\nxRloc16: hex rloc16 short address\n\nReturns:\nactual router id allocated by leader", "source": "juraj-google-style"} {"code": "def _query(self, url, xpath):\n \n return self.session.query(CachedRequest).filter(CachedRequest.url == url).filter(CachedRequest.xpath == xpath)", "docstring": "Base query for an url and xpath\n\nArgs:\nurl (str): URL to search\nxpath (str): xpath to search (may be ``None``)", "source": "juraj-google-style"} {"code": "def save_dataframes(self, outdir, prefix='df_'):\n dfs = list(filter((lambda x: x.startswith(prefix)), dir(self)))\n counter = 0\n for df in dfs:\n outpath = ssbio.utils.outfile_maker(inname=df, outext='.csv', outdir=outdir)\n my_df = getattr(self, df)\n if (not isinstance(my_df, pd.DataFrame)):\n raise TypeError('{}: object is not a Pandas DataFrame'.format(df))\n if my_df.empty:\n log.debug('{}: empty dataframe, not saving'.format(df))\n else:\n my_df.to_csv(outpath)\n log.debug('{}: saved dataframe'.format(outpath))\n counter += 1\n log.debug('Saved {} dataframes at {}'.format(counter, outdir))", "docstring": "Save all attributes that start with \"df\" into a specified directory.\n\nArgs:\noutdir (str): Path to output directory\nprefix (str): Prefix that dataframe attributes start with", "source": "codesearchnet"} {"code": "def is10(msg):\n if allzeros(msg):\n return False\n d = hex2bin(data(msg))\n if (d[0:8] != '00010000'):\n return False\n if (bin2int(d[9:14]) != 0):\n return False\n if ((d[14] == '1') and (bin2int(d[16:23]) < 5)):\n return False\n if ((d[14] == '0') and (bin2int(d[16:23]) > 4)):\n return False\n return True", "docstring": "Check if a message is likely to be BDS code 1,0\n\nArgs:\nmsg (String): 28 bytes hexadecimal message string\n\nReturns:\nbool: True or False", "source": "codesearchnet"} {"code": "def split_locator(self, path):\n \n relative = self.relpath(path)\n try:\n locator, tail = relative.split('/', 1)\n except ValueError:\n locator = relative\n tail = ''\n return locator, tail", "docstring": "Split the path into a pair (locator, path).\n\nargs:\npath (str): Absolute path or URL.\n\nReturns:\ntuple of str: locator, path.", "source": "juraj-google-style"} {"code": "def forward(self, pixel_values: torch.FloatTensor, spatial_shapes: torch.LongTensor) -> torch.Tensor:\n target_dtype = self.patch_embedding.weight.dtype\n patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))\n positional_embeddings = self.position_embedding.weight.reshape(self.position_embedding_size, self.position_embedding_size, -1)\n resized_positional_embeddings = self.resize_positional_embeddings(positional_embeddings, spatial_shapes, max_length=pixel_values.shape[1])\n embeddings = patch_embeds + resized_positional_embeddings\n return embeddings", "docstring": "Args:\npixel_values (`torch.FloatTensor`):\nPixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)\nspatial_shapes (`List[Tuple[int, int]]`):\nSpatial shapes of shape (batch_size, 2) to resize the positional embeddings to", "source": "github-repos"} {"code": "def get_autosave_filename(self, filename):\n try:\n autosave_filename = self.name_mapping[filename]\n except KeyError:\n autosave_dir = get_conf_path('autosave')\n if (not osp.isdir(autosave_dir)):\n try:\n os.mkdir(autosave_dir)\n except EnvironmentError as error:\n action = _('Error while creating autosave directory')\n msgbox = AutosaveErrorDialog(action, error)\n msgbox.exec_if_enabled()\n autosave_filename = self.create_unique_autosave_filename(filename, autosave_dir)\n self.name_mapping[filename] = autosave_filename\n self.stack.sig_option_changed.emit('autosave_mapping', self.name_mapping)\n logger.debug('New autosave file name')\n return autosave_filename", "docstring": "Get name of autosave file for specified file name.\n\nThis function uses the dict in `self.name_mapping`. If `filename` is\nin the mapping, then return the corresponding autosave file name.\nOtherwise, construct a unique file name and update the mapping.\n\nArgs:\nfilename (str): original file name", "source": "codesearchnet"} {"code": "def _validate_none_or_type(t):\n\n def _validate(setting):\n \"\\n Check the setting to make sure it's the right type.\\n\\n Args:\\n setting (object): The setting to check.\\n\\n Returns:\\n object: The unmodified object if it's the proper type.\\n\\n Raises:\\n ValueError: If the setting is the wrong type.\\n \"\n if ((setting is not None) and (not isinstance(setting, t))):\n raise ValueError('\"{}\" is not \"{}\"'.format(setting, t))\n return setting\n return _validate", "docstring": "Create a validator that checks if a setting is either None or a given type.\n\nArgs:\nt: The type to assert.\n\nReturns:\ncallable: A callable that will validate a setting for that type.", "source": "codesearchnet"} {"code": "def _retrieve_problem(self, id_):\n future = Future(self, id_, self.return_matrix, None)\n self.client._poll(future)\n return future", "docstring": "Resume polling for a problem previously submitted.\n\nArgs:\nid_: Identification of the query.\n\nReturns:\n:obj: `Future`", "source": "codesearchnet"} {"code": "def find1(self, kw: YangIdentifier, arg: str=None, pref: YangIdentifier=None, required: bool=False) -> Optional['Statement']:\n for sub in self.substatements:\n if ((sub.keyword == kw) and (sub.prefix == pref) and ((arg is None) or (sub.argument == arg))):\n return sub\n if required:\n raise StatementNotFound(str(self), kw)", "docstring": "Return first substatement with the given parameters.\n\nArgs:\nkw: Statement keyword (local part for extensions).\narg: Argument (all arguments will match if ``None``).\npref: Keyword prefix (``None`` for built-in statements).\nrequired: Should an exception be raised on failure?\n\nRaises:\nStatementNotFound: If `required` is ``True`` and the\nstatement is not found.", "source": "codesearchnet"} {"code": "def create_dir(self, directory_path, perm_bits=PERM_DEF):\n directory_path = self.make_string_path(directory_path)\n directory_path = self.absnormpath(directory_path)\n self._auto_mount_drive_if_needed(directory_path)\n if self.exists(directory_path, check_link=True):\n self.raise_os_error(errno.EEXIST, directory_path)\n path_components = self._path_components(directory_path)\n current_dir = self.root\n new_dirs = []\n for component in path_components:\n directory = self._directory_content(current_dir, component)[1]\n if (not directory):\n new_dir = FakeDirectory(component, filesystem=self)\n new_dirs.append(new_dir)\n current_dir.add_entry(new_dir)\n current_dir = new_dir\n else:\n if S_ISLNK(directory.st_mode):\n directory = self.resolve(directory.contents)\n current_dir = directory\n if ((directory.st_mode & S_IFDIR) != S_IFDIR):\n self.raise_os_error(errno.ENOTDIR, current_dir.path)\n for new_dir in new_dirs:\n new_dir.st_mode = (S_IFDIR | perm_bits)\n self._last_ino += 1\n current_dir.st_ino = self._last_ino\n return current_dir", "docstring": "Create `directory_path`, and all the parent directories.\n\nHelper method to set up your test faster.\n\nArgs:\ndirectory_path: The full directory path to create.\nperm_bits: The permission bits as set by `chmod`.\n\nReturns:\nThe newly created FakeDirectory object.\n\nRaises:\nOSError: if the directory already exists.", "source": "codesearchnet"} {"code": "def _is_user_included_op(self, op):\n for opname_re in self._parameters.included_opname_re_list:\n if opname_re.match(op.name):\n return True\n for optype_re in self._parameters.included_optype_re_list:\n if optype_re.match(op.type):\n return True\n return False", "docstring": "Checks whether the op is included in the tensor tracer flags.\n\nArgs:\nop: tf Operation\nReturns:\nTrue, if the op is included.\nAn op is included if:\n- Its op name is given in included_opnames\n- Its op type is given in included_optypes\n- The op is at most _trace_ops_before_included hops before an included op\n- The op is at most _trace_ops_after_included hops after an included op", "source": "github-repos"} {"code": "def _generate_async(self, generator):\n first_value_future = self._workers.submit(next, generator)\n\n def get_first_element(future=first_value_future):\n '\\n Get first element value from future.\\n\\n Args:\\n future (concurrent.futures._base.Future): First value future.\\n\\n Returns:\\n Evaluated value\\n '\n try:\n (yield future.result())\n except StopIteration:\n return\n return chain(get_first_element(), generator)", "docstring": "Return the previous generator object after having run the first element\nevaluation as a background task.\n\nArgs:\ngenerator (iterable): A generator function.\n\nReturns:\niterable: The generator function with first element evaluated\nin background.", "source": "codesearchnet"} {"code": "def list_devices(self):\n raw_device_list = tf_session.TF_SessionListDevices(self._session)\n device_list = []\n size = tf_session.TF_DeviceListCount(raw_device_list)\n for i in range(size):\n name = tf_session.TF_DeviceListName(raw_device_list, i)\n device_type = tf_session.TF_DeviceListType(raw_device_list, i)\n memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)\n incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)\n device_list.append(_DeviceAttributes(name, device_type, memory, incarnation))\n tf_session.TF_DeleteDeviceList(raw_device_list)\n return device_list", "docstring": "Lists available devices in this session.\n\n```python\ndevices = sess.list_devices()\nfor d in devices:\nprint(d.name)\n```\n\nWhere:\nEach element in the list has the following properties\nname: A string with the full name of the device. ex:\n`/job:worker/replica:0/task:3/device:CPU:0`\ndevice_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)\nmemory_limit: The maximum amount of memory available on the device.\nNote: depending on the device, it is possible the usable memory could\nbe substantially less.\n\nRaises:\ntf.errors.OpError: If it encounters an error (e.g. session is in an\ninvalid state, or network errors occur).\n\nReturns:\nA list of devices in the session.", "source": "github-repos"} {"code": "def filename(self, fname, timestep=None, suffix='', force_legacy=False):\n if (timestep is not None):\n fname += '{:05d}'.format(timestep)\n fname += suffix\n if ((not force_legacy) and self.hdf5):\n fpath = (self.hdf5 / fname)\n else:\n fpath = ((self.par['ioin']['output_file_stem'] + '_') + fname)\n fpath = (self.path / fpath)\n return fpath", "docstring": "Return name of StagYY output file.\n\nArgs:\nfname (str): name stem.\ntimestep (int): snapshot number, set to None if this is not\nrelevant.\nsuffix (str): optional suffix of file name.\nforce_legacy (bool): force returning the legacy output path.\nReturns:\n:class:`pathlib.Path`: the path of the output file constructed\nwith the provided segments.", "source": "codesearchnet"} {"code": "def GetMessages(self, formatter_mediator, event):\n \n event_values = event.CopyToDict()\n\n \n \n text_pieces = []\n for key, value in event_values.items():\n if key in definitions.RESERVED_VARIABLE_NAMES:\n continue\n text_pieces.append('{0:s}: {1!s}'.format(key, value))\n\n event_values['attribute_driven'] = ' '.join(text_pieces)\n event_values['data_type'] = self.DATA_TYPE\n\n return self._FormatMessages(\n self.FORMAT_STRING, self.FORMAT_STRING_SHORT, event_values)", "docstring": "Determines the formatted message strings for an event object.\n\nArgs:\nformatter_mediator (FormatterMediator): mediates the interactions\nbetween formatters and other components, such as storage and Windows\nEventLog resources.\nevent (EventObject): event.\n\nReturns:\ntuple(str, str): formatted message string and short message string.", "source": "juraj-google-style"} {"code": "def add_droplets(self, droplet_ids):\n \n return self.get_data(\n \"load_balancers/%s/droplets/\" % self.id,\n type=POST,\n params={\"droplet_ids\": droplet_ids}\n )", "docstring": "Assign a LoadBalancer to a Droplet.\n\nArgs:\ndroplet_ids (obj:`list` of `int`): A list of Droplet IDs", "source": "juraj-google-style"} {"code": "def emit_tree_format(tree, verbose=False):\n \n if verbose:\n print(\"Converting: \" + repr(tree))\n ret_str = __recursive_formatter(tree)\n return ret_str", "docstring": "Returns a tree representation of a parse tree.\n\nArguments:\ntree: the parse tree whose tree representation is to be generated\nverbose (bool): if True prints the parse tree to be formatted\n\nReturns:\nstr: tree-like representation of the parse tree", "source": "juraj-google-style"} {"code": "def _read_pem_file(cls, filename, from_package=False):\n _logger.debug('Reading PEM {0}.'.format(filename))\n if from_package:\n return wpull.util.filter_pem(wpull.util.get_package_data(filename))\n with open(filename, 'rb') as in_file:\n return wpull.util.filter_pem(in_file.read())", "docstring": "Read the PEM file.\n\nReturns:\niterable: An iterable of certificates. The certificate data\nis :class:`byte`.", "source": "codesearchnet"} {"code": "def sg_flatten(tensor, opt):\n r\n dim = np.prod(tensor.get_shape().as_list()[1:])\n return tf.reshape(tensor, [-1, dim], name=opt.name)", "docstring": "r\"\"\"Reshapes a tensor to `batch_size x -1`.\n\nSee `tf.reshape()` in tensorflow.\n\nArgs:\ntensor: A `Tensor` (automatically given by chain).\nopt:\nname: If provided, it replaces current tensor's name.\n\nReturns:\nA 2-D tensor.", "source": "juraj-google-style"} {"code": "def match(pattern, path):\n \n \n try:\n levels, recursive, re_pattern = _PATTERN_CACHE[(pattern, True)]\n except KeyError:\n levels, recursive, re_pattern = _translate_glob(pattern, case_sensitive=True)\n _PATTERN_CACHE[(pattern, True)] = (levels, recursive, re_pattern)\n return bool(re_pattern.match(path))", "docstring": "Compare a glob pattern with a path (case sensitive).\n\nArguments:\npattern (str): A glob pattern.\npath (str): A path.\n\nReturns:\nbool: ``True`` if the path matches the pattern.\n\nExample:\n\n>>> from fs.glob import match\n>>> match(\"**/*.py\", \"/fs/glob.py\")\nTrue", "source": "juraj-google-style"} {"code": "def concatechain(*generators: types.FrameGenerator, separator: str=''):\n while True:\n try:\n next_ = [next(gen) for gen in generators]\n (yield separator.join(next_))\n except StopIteration as exc:\n return exc.value", "docstring": "Return a generator that in each iteration takes one value from each of the\nsupplied generators, joins them together with the specified separator and\nyields the result. Stops as soon as any iterator raises StopIteration and\nreturns the value contained in it.\n\nPrimarily created for chaining string generators, hence the name.\n\nArgs:\ngenerators: Any number of generators that yield types that can be\njoined together with the separator string.\nseparator: A separator to insert between each value yielded by\nthe different generators.\nReturns:\nA generator that yields strings that are the concatenation of one value\nfrom each of the generators, joined together with the separator string.", "source": "codesearchnet"} {"code": "def load_settings(self, path):\n if (not os.path.exists(path)):\n raise exceptions.ConfigurationError(\"The server configuration file ('{0}') could not be located.\".format(path))\n self._logger.info('Loading server configuration settings from: {0}'.format(path))\n parser = configparser.ConfigParser()\n parser.read(path)\n self._parse_settings(parser)\n self.parse_auth_settings(parser)", "docstring": "Load configuration settings from the file pointed to by path.\n\nThis will overwrite all current setting values.\n\nArgs:\npath (string): The path to the configuration file containing\nthe settings to load. Required.\nRaises:\nConfigurationError: Raised if the path does not point to an\nexisting file or if a setting value is invalid.", "source": "codesearchnet"} {"code": "def on_smart_contract_event(self, sc_event: NotifyEvent):\n \n if not isinstance(sc_event, NotifyEvent):\n logger.info(\"Not Notify Event instance\")\n return\n if sc_event.ShouldPersist:\n if sc_event.notify_type in [NotifyType.TRANSFER, NotifyType.REFUND, NotifyType.MINT]:\n self._events_to_write.append(sc_event)", "docstring": "Listener for NotifyEvent\nArgs:\nsc_event (NotifyEvent): event to check whether it should be persisted", "source": "juraj-google-style"} {"code": "def daily_from_hourly(df):\n \n\n df_daily = pd.DataFrame()\n\n if 'temp' in df:\n df_daily['temp'] = df.temp.resample('D').mean()\n df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min()\n df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max()\n\n if 'precip' in df:\n df_daily['precip'] = df.precip.resample('D').sum()\n\n if 'glob' in df:\n df_daily['glob'] = df.glob.resample('D').mean()\n\n if 'hum' in df:\n df_daily['hum'] = df.hum.resample('D').mean()\n\n if 'hum' in df:\n df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min()\n\n if 'hum' in df:\n df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max()\n\n if 'wind' in df:\n df_daily['wind'] = df.wind.resample('D').mean()\n\n if 'ssd' in df:\n df_daily['ssd'] = df.ssd.resample('D').sum() / 60 \n\n df_daily.index.name = None\n return df_daily", "docstring": "Aggregates data (hourly to daily values) according to the characteristics\nof each variable (e.g., average for temperature, sum for precipitation)\n\nArgs:\ndf: dataframe including time series with one hour time steps\n\nReturns:\ndataframe (daily)", "source": "juraj-google-style"} {"code": "def handle_unexpected_exception(exc):\n \n \n try:\n write_logfile()\n addendum = 'Please see the log file for more information.'\n except IOError:\n addendum = 'Unable to write log file.'\n try:\n message = str(exc)\n return '{}{}{}'.format(message, '\\n' if message else '', addendum)\n except Exception: \n return str(exc)", "docstring": "Return an error message and write a log file if logging was not enabled.\n\nArgs:\nexc: The unexpected exception.\n\nReturns:\nA message to display to the user concerning the unexpected exception.", "source": "juraj-google-style"} {"code": "def _generate_placements(self, width, height):\n skyline = self._skyline\n points = collections.deque()\n left_index = right_index = 0\n support_height = skyline[0].top\n support_index = 0\n placements = self._placement_points_generator(skyline, width)\n for p in placements:\n if ((p + width) > skyline[right_index].right):\n for right_index in range((right_index + 1), len(skyline)):\n if (skyline[right_index].top >= support_height):\n support_index = right_index\n support_height = skyline[right_index].top\n if ((p + width) <= skyline[right_index].right):\n break\n if (p >= skyline[left_index].right):\n left_index += 1\n if (support_index < left_index):\n support_index = left_index\n support_height = skyline[left_index].top\n for i in range(left_index, (right_index + 1)):\n if (skyline[i].top >= support_height):\n support_index = i\n support_height = skyline[i].top\n if ((support_height + height) <= self.height):\n points.append((Rectangle(p, support_height, width, height), left_index, right_index))\n return points", "docstring": "Generate a list with\n\nArguments:\nskyline (list): SkylineHSegment list\nwidth (number):\n\nReturns:\ntuple (Rectangle, fitness):\nRectangle: Rectangle in valid position\nleft_skyline: Index for the skyline under the rectangle left edge.\nright_skyline: Index for the skyline under the rectangle right edte.", "source": "codesearchnet"} {"code": "def reciprocal(x):\n if any_symbolic_tensors((x,)):\n return Reciprocal().symbolic_call(x)\n return backend.numpy.reciprocal(x)", "docstring": "Return the reciprocal of the argument, element-wise.\n\nCalculates `1/x`.\n\nArgs:\nx: Input tensor.\n\nReturns:\nOutput tensor, element-wise reciprocal of `x`.", "source": "github-repos"} {"code": "def cross_replica_sum(x, group_assignment=None, name=None):\n if group_assignment is None:\n group_assignment = _create_default_group_assignment()\n return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)", "docstring": "Sum the input tensor across replicas according to group_assignment.\n\nArgs:\nx: The local tensor to the sum.\ngroup_assignment: Optional 2d int32 lists with shape [num_groups,\nnum_replicas_per_group]. `group_assignment[i]` represents the replica ids\nin the ith subgroup.\nname: Optional op name.\n\nReturns:\nA `Tensor` which is summed across replicas.", "source": "github-repos"} {"code": "def is_valid_bibtex(reference):\n sio = StringIO(remove_non_ascii(reference))\n parser = bibtex.Parser()\n errors.set_strict_mode(False)\n bib_data = parser.parse_stream(sio)\n return (len(bib_data.entries) > 0)", "docstring": "Use pybtex to validate that a reference is in proper BibTeX format\n\nArgs:\nreference: A String reference in BibTeX format.\n\nReturns:\nBoolean indicating if reference is valid bibtex.", "source": "codesearchnet"} {"code": "def query(self, expr, **kwargs):\n \n columns = self.columns\n\n def query_builder(df, **kwargs):\n \n \n df = df.copy()\n df.index = pandas.RangeIndex(len(df))\n df.columns = columns\n df.query(expr, inplace=True, **kwargs)\n df.columns = pandas.RangeIndex(len(df.columns))\n return df\n\n func = self._prepare_method(query_builder, **kwargs)\n new_data = self._map_across_full_axis(1, func)\n \n new_index = self.compute_index(0, new_data, True)\n\n return self.__constructor__(new_data, new_index, self.columns, self.dtypes)", "docstring": "Query columns of the DataManager with a boolean expression.\n\nArgs:\nexpr: Boolean expression to query the columns with.\n\nReturns:\nDataManager containing the rows where the boolean expression is satisfied.", "source": "juraj-google-style"} {"code": "def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum):\n is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum)\n if (not (is_namespace_indent_item or is_forward_declaration)):\n return False\n if IsMacroDefinition(raw_lines_no_comments, linenum):\n return False\n return IsBlockInNameSpace(nesting_state, is_forward_declaration)", "docstring": "This method determines if we should apply our namespace indentation check.\n\nArgs:\nnesting_state: The current nesting state.\nis_namespace_indent_item: If we just put a new class on the stack, True.\nIf the top of the stack is not a class, or we did not recently\nadd the class, False.\nraw_lines_no_comments: The lines without the comments.\nlinenum: The current line number we are processing.\n\nReturns:\nTrue if we should apply our namespace indentation check. Currently, it\nonly works for classes and namespaces inside of a namespace.", "source": "codesearchnet"} {"code": "def reduce(reducer, data, chunk_size=DEFAULT_CHUNK_SIZE):\n \n if not chunk_size:\n return finalize(reducer, fold(reducer, data))\n\n \n \n \n chunks = generate_chunks(data, chunk_size)\n intermediate = fold(reducer, next(chunks))\n for chunk in chunks:\n intermediate = merge(reducer, intermediate, fold(reducer, chunk))\n\n return finalize(reducer, intermediate)", "docstring": "Repeatedly call fold and merge on data and then finalize.\n\nArguments:\ndata: Input for the fold function.\nreducer: The IReducer to use.\nchunk_size: How many items should be passed to fold at a time?\n\nReturns:\nReturn value of finalize.", "source": "juraj-google-style"} {"code": "def load(cls, config: Optional[Config] = None):\n \n if cls._dfk is not None:\n raise RuntimeError('Config has already been loaded')\n\n if config is None:\n cls._dfk = DataFlowKernel(Config())\n else:\n cls._dfk = DataFlowKernel(config)\n\n return cls._dfk", "docstring": "Load a DataFlowKernel.\n\nArgs:\n- config (Config) : Configuration to load. This config will be passed to a\nnew DataFlowKernel instantiation which will be set as the active DataFlowKernel.\nReturns:\n- DataFlowKernel : The loaded DataFlowKernel object.", "source": "juraj-google-style"} {"code": "def GetMetadataLegacy(client, token=None):\n \n if isinstance(client, rdfvalue.RDFURN):\n client_fd = aff4.FACTORY.Open(client, mode=\"r\", token=token)\n else:\n client_fd = client\n\n metadata = ExportedMetadata()\n\n metadata.client_urn = client_fd.urn\n metadata.client_age = client_fd.urn.age\n\n metadata.hostname = utils.SmartUnicode(\n client_fd.Get(client_fd.Schema.HOSTNAME, \"\"))\n\n metadata.os = utils.SmartUnicode(client_fd.Get(client_fd.Schema.SYSTEM, \"\"))\n\n metadata.uname = utils.SmartUnicode(client_fd.Get(client_fd.Schema.UNAME, \"\"))\n\n metadata.os_release = utils.SmartUnicode(\n client_fd.Get(client_fd.Schema.OS_RELEASE, \"\"))\n\n metadata.os_version = utils.SmartUnicode(\n client_fd.Get(client_fd.Schema.OS_VERSION, \"\"))\n\n kb = client_fd.Get(client_fd.Schema.KNOWLEDGE_BASE)\n usernames = \"\"\n if kb:\n usernames = [user.username for user in kb.users] or \"\"\n metadata.usernames = utils.SmartUnicode(usernames)\n\n metadata.mac_address = utils.SmartUnicode(\n client_fd.Get(client_fd.Schema.MAC_ADDRESS, \"\"))\n\n system_labels = set()\n user_labels = set()\n for l in client_fd.GetLabels():\n if l.owner == \"GRR\":\n system_labels.add(l.name)\n else:\n user_labels.add(l.name)\n\n metadata.labels = \",\".join(sorted(system_labels | user_labels))\n\n metadata.system_labels = \",\".join(sorted(system_labels))\n\n metadata.user_labels = \",\".join(sorted(user_labels))\n\n metadata.hardware_info = client_fd.Get(client_fd.Schema.HARDWARE_INFO)\n\n metadata.kernel_version = client_fd.Get(client_fd.Schema.KERNEL)\n\n return metadata", "docstring": "Builds ExportedMetadata object for a given client id.\n\nNote: This is a legacy aff4-only implementation.\nTODO(user): deprecate as soon as REL_DB migration is done.\n\nArgs:\nclient: RDFURN of a client or VFSGRRClient object itself.\ntoken: Security token.\n\nReturns:\nExportedMetadata object with metadata of the client.", "source": "juraj-google-style"} {"code": "def calculate_etag(file_path):\n \n stat = file_path.stat()\n etag = \"%x-%x\" % (stat.st_mtime_ns, stat.st_size)\n return etag", "docstring": "Calculate an etag value\n\nArgs:\na_file (pathlib.Path): The filepath to the\n\nReturns:\nString of the etag value to be sent back in header", "source": "juraj-google-style"} {"code": "def __init__(self, project_id, credentials):\n \n self._project_id = project_id\n self._credentials = credentials", "docstring": "Initializes an instance of a Context object.\n\nArgs:\nproject_id: the current cloud project.\ncredentials: the credentials to use to authorize requests.", "source": "juraj-google-style"} {"code": "def take_parenting(self, inst):\n if (self is inst):\n return\n for decl in inst.declarations:\n decl.parent = self\n self.declarations.append(decl)\n inst.declarations = []", "docstring": "Takes parenting from inst and transfers it to self.\n\nArgs:\ninst (namespace_t): a namespace declaration", "source": "codesearchnet"} {"code": "def project_and_occlude_texture(texture, surface, angle=DEFAULT_ANGLE):\n projected_surface = project_surface(surface, angle)\n projected_surface = _remove_hidden_parts(projected_surface)\n texture_y = map_texture_to_surface(texture, projected_surface)\n (texture_x, _) = texture\n return (texture_x, texture_y)", "docstring": "Projects a texture onto a surface with occluded areas removed.\n\nArgs:\ntexture (texture): the texture to map to the projected surface\nsurface (surface): the surface to project\nangle (float): the angle to project at, in degrees (0 = overhead, 90 = side view)\n\nReturns:\nlayer: A layer.", "source": "codesearchnet"} {"code": "def AddStopTimeObject(self, stoptime, schedule=None, problems=None):\n if (schedule is None):\n schedule = self._schedule\n if (schedule is None):\n warnings.warn('No longer supported. _schedule attribute is used to get stop_times table', DeprecationWarning)\n if (problems is None):\n problems = schedule.problem_reporter\n new_secs = stoptime.GetTimeSecs()\n cursor = schedule._connection.cursor()\n cursor.execute('SELECT max(stop_sequence), max(arrival_secs), max(departure_secs) FROM stop_times WHERE trip_id=?', (self.trip_id,))\n row = cursor.fetchone()\n if (row[0] is None):\n stoptime.stop_sequence = 1\n if (new_secs == None):\n problems.OtherProblem(('No time for first StopTime of trip_id \"%s\"' % (self.trip_id,)))\n else:\n stoptime.stop_sequence = (row[0] + 1)\n prev_secs = max(row[1], row[2])\n if ((new_secs != None) and (new_secs < prev_secs)):\n problems.OtherProblem(('out of order stop time for stop_id=%s trip_id=%s %s < %s' % (util.EncodeUnicode(stoptime.stop_id), util.EncodeUnicode(self.trip_id), util.FormatSecondsSinceMidnight(new_secs), util.FormatSecondsSinceMidnight(prev_secs))))\n self._AddStopTimeObjectUnordered(stoptime, schedule)", "docstring": "Add a StopTime object to the end of this trip.\n\nArgs:\nstoptime: A StopTime object. Should not be reused in multiple trips.\nschedule: Schedule object containing this trip which must be\npassed to Trip.__init__ or here\nproblems: ProblemReporter object for validating the StopTime in its new\nhome\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def apply_filter(objs, selector, mode):\n indices_to_delete = []\n presumption = (DELETE if (mode == KEEP) else KEEP)\n for (i, obj) in enumerate(objs):\n timer.log(('Applying selector: %s' % selector))\n marks = {k: mode for k in selector_to_ids(selector, obj, mode)}\n timer.log('done applying selector')\n timer.log('filtering object...')\n filter_object(obj, marks, presumption=presumption)\n timer.log('done filtering')\n if (obj is None):\n indices_to_delete.append(i)\n for index in reversed(indices_to_delete):\n del objs[index]", "docstring": "Apply selector to transform each object in objs.\n\nThis operates in-place on objs. Empty objects are removed from the list.\n\nArgs:\nmode: either KEEP (to keep selected items & their ancestors) or DELETE\n(to delete selected items and their children).", "source": "codesearchnet"} {"code": "def SetTimeZone(self, time_zone):\n \n try:\n self._time_zone = pytz.timezone(time_zone)\n except (AttributeError, pytz.UnknownTimeZoneError):\n raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))", "docstring": "Sets the time zone.\n\nArgs:\ntime_zone (str): time zone.\n\nRaises:\nValueError: if the timezone is not supported.", "source": "juraj-google-style"} {"code": "def standardize_cell(cell, to_primitive=False, no_idealize=False, symprec=1e-05, angle_tolerance=(- 1.0)):\n _set_no_error()\n (lattice, _positions, _numbers, _) = _expand_cell(cell)\n if (lattice is None):\n return None\n num_atom = len(_positions)\n positions = np.zeros(((num_atom * 4), 3), dtype='double', order='C')\n positions[:num_atom] = _positions\n numbers = np.zeros((num_atom * 4), dtype='intc')\n numbers[:num_atom] = _numbers\n num_atom_std = spg.standardize_cell(lattice, positions, numbers, num_atom, (to_primitive * 1), (no_idealize * 1), symprec, angle_tolerance)\n _set_error_message()\n if (num_atom_std > 0):\n return (np.array(lattice.T, dtype='double', order='C'), np.array(positions[:num_atom_std], dtype='double', order='C'), np.array(numbers[:num_atom_std], dtype='intc'))\n else:\n return None", "docstring": "Return standardized cell.\n\nArgs:\ncell, symprec, angle_tolerance:\nSee the docstring of get_symmetry.\nto_primitive:\nbool: If True, the standardized primitive cell is created.\nno_idealize:\nbool: If True, it is disabled to idealize lengths and angles of\nbasis vectors and positions of atoms according to crystal\nsymmetry.\nReturn:\nThe standardized unit cell or primitive cell is returned by a tuple of\n(lattice, positions, numbers).\nIf it fails, None is returned.", "source": "codesearchnet"} {"code": "def _RemoveUsers(self, remove_users):\n \n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "docstring": "Deprovision Linux user accounts that do not appear in account metadata.\n\nArgs:\nremove_users: list, the username strings of the Linux accounts to remove.", "source": "juraj-google-style"} {"code": "def __init__(self, regex: Optional[str]=None):\n super().__init__()\n self._regex = re.compile(regex) if regex else None", "docstring": "Constructor.\n\nArgs:\nregex: An optional regular expression. If set to None, any string value is\nacceptable.", "source": "github-repos"} {"code": "def _format_data_list_with_options(self, data_list):\n if self._options and self._options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and (not self._options.experimental_fetch_to_device):\n return [data_list]\n else:\n return data_list", "docstring": "Change the data in to a list type if required.\n\nThe OwnedMultiDeviceIterator returns the list data type,\nwhile the PER_REPLICA iterator (when used with prefetch disabled)\nreturns without the enclosed list. This is to fix the inconsistency.\nArgs:\ndata_list: data_list\nReturns:\nlist", "source": "github-repos"} {"code": "def from_dict(cls, d):\n \n defect = MontyDecoder().process_decoded( d[\"defect\"])\n uncorrected_energy = d[\"uncorrected_energy\"]\n corrections = d.get(\"corrections\", None)\n parameters = d.get(\"parameters\", None)\n entry_id = d.get(\"entry_id\", None)\n\n return cls(defect, uncorrected_energy, corrections=corrections,\n parameters=parameters, entry_id=entry_id)", "docstring": "Reconstitute a DefectEntry object from a dict representation created using\nas_dict().\nArgs:\nd (dict): dict representation of DefectEntry.\nReturns:\nDefectEntry object", "source": "juraj-google-style"} {"code": "def _get_normalizations(prices, forwards, strikes, discount_factors):\n strikes_abs = tf.abs(strikes)\n forwards_abs = tf.abs(forwards)\n orientations = strikes_abs >= forwards_abs\n normalization = tf.where(orientations, strikes_abs, forwards_abs)\n normalization = tf.where(tf.equal(normalization, 0), tf.ones_like(normalization), normalization)\n normalized_prices = prices / normalization\n if discount_factors is not None:\n normalized_prices /= discount_factors\n else:\n discount_factors = tf.ones_like(normalized_prices)\n return (normalized_prices, normalization, discount_factors)", "docstring": "Returns the normalized prices, normalization factors, and discount_factors.\n\nThe normalization factors is the larger of strikes and forwards.\nIf `discount_factors` is not None, these are the discount factors to expiry.\nIf None, no discounting is applied and 1's are returned.\n\nArgs:\nprices: A real `Tensor` of any shape. The observed market prices of the\nassets.\nforwards: A real `Tensor` of the same shape and dtype as `prices`. The\ncurrent forward prices to expiry.\nstrikes: A real `Tensor` of the same shape and dtype as `prices`. The strike\nprices of the options.\ndiscount_factors: A real `Tensor` of same dtype as the `prices`.\n\nReturns:\nthe normalized prices, normalization factors, and discount_factors.", "source": "github-repos"} {"code": "def write_version_info(filename, git_version):\n if b'\"' in git_version or b'\\\\' in git_version:\n git_version = b'git_version_is_invalid'\n contents = '\\n\\n\\n\n open(filename, 'w').write(contents)", "docstring": "Write a c file that defines the version functions.\n\nArgs:\nfilename: filename to write to.\ngit_version: the result of a git describe.", "source": "github-repos"} {"code": "def parse_meta(self, meta):\n res = {}\n for (key, val) in meta.items():\n if (not val):\n continue\n elif isinstance(val, dict):\n res[key] = self.parse_meta(val)\n elif val.startswith('current_user.'):\n res[key] = self.get_path_attribute(current_user, val)\n elif val.startswith('original.'):\n res[key] = self.get_path_attribute(self.get_original(), val)\n else:\n res[key] = self.get_path_attribute(self, val)\n return res", "docstring": "Parses the meta field in the message, copies it's keys into a new\ndict and replaces the values, which should be attribute paths relative\nto the passed in object, with the current value at the end of that\npath. This function will run recursively when it encounters other dicts\ninside the meta dict.\n\nArgs:\nmeta (dict):\nThe dictionary of mappings to pull structure of the meta from.\n\nReturns:\ndict:\nA copy of the keys from the meta dict with the values pulled\nfrom the paths.", "source": "codesearchnet"} {"code": "def __init__(\n self,\n indices,\n size,\n l2_regularization=0.0,\n l1_regularization=0.0,\n named_tensors=None,\n scope='embedding',\n summary_labels=()\n ):\n \n self.indices = indices\n self.size = size\n self.l2_regularization = l2_regularization\n self.l1_regularization = l1_regularization\n super(Embedding, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)", "docstring": "Embedding layer.\n\nArgs:\nindices: Number of embedding indices.\nsize: Embedding size.\nl2_regularization: L2 regularization weight.\nl1_regularization: L1 regularization weight.", "source": "juraj-google-style"} {"code": "def WatchMetadata(\n self, handler, metadata_key='', recursive=True, timeout=None):\n \n while True:\n response = self._HandleMetadataUpdate(\n metadata_key=metadata_key, recursive=recursive, wait=True,\n timeout=timeout)\n try:\n handler(response)\n except Exception as e:\n self.logger.exception('Exception calling the response handler. %s.', e)", "docstring": "Watch for changes to the contents of the metadata server.\n\nArgs:\nhandler: callable, a function to call with the updated metadata contents.\nmetadata_key: string, the metadata key to watch for changes.\nrecursive: bool, True if we should recursively watch for metadata changes.\ntimeout: int, timeout in seconds for returning metadata output.", "source": "juraj-google-style"} {"code": "def file_path_from_hash(self, file_hash, path=None, hash_list=None):\n if (hash_list is None):\n hash_list = list(file_hash)\n if (not hash_list):\n raise IOError('Directory structure is too full!')\n if (not path):\n path = os.path.join(self.path, hash_list.pop(0))\n files = os.listdir(path)\n if (file_hash in files):\n full_path = os.path.join(path, file_hash)\n if os.path.isfile(full_path):\n return PathAndHash(path=full_path, hash=file_hash)\n return PathAndHash(path=(full_path + '/'), hash=file_hash)\n next_path = os.path.join(path, hash_list.pop(0))\n if (not os.path.exists(next_path)):\n raise IOError('File not found in the structure.')\n return self.file_path_from_hash(file_hash=file_hash, path=next_path, hash_list=hash_list)", "docstring": "For given `file_hash`, return path on filesystem.\n\nArgs:\nfile_hash (str): Hash of the file, for which you wish to know the\npath.\npath (str, default None): Recursion argument, don't set this.\nhash_list (list, default None): Recursion argument, don't set this.\n\nReturns:\nstr: Path for given `file_hash` contained in :class:`.PathAndHash`\\\nobject.\n\nRaises:\nIOError: If the file with corresponding `file_hash` is not in \\\nstorage.", "source": "codesearchnet"} {"code": "def expand_batch_coordinates(bc, length_factor):\n \n assert bc.get_shape().as_list() == [1, None, 1]\n \n bc *= tf.constant([[1] * length_factor])\n \n bc = tf.reshape(bc, [1, -1, 1])\n \n return bc", "docstring": "Duplicate elements of bc by length_factor.\n\nArgs:\nbc (tf.Tensor): int32 tensor of shape [1, length, 1]\nlength_factor (int):\n\nReturns:\ntf.Tensor: of shape [1, length*length_factor, 1] where every elements has\nbeen duplicated length_factor times.", "source": "juraj-google-style"} {"code": "def Deserialize(self, reader: BinaryReader):\n \n super(ValidatorState, self).Deserialize(reader)\n self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)\n self.Registered = reader.ReadBool()\n self.Votes = reader.ReadFixed8()", "docstring": "Deserialize full object.\n\nArgs:\nreader (neocore.IO.BinaryReader):", "source": "juraj-google-style"} {"code": "def _parse_apps_to_sync(self):\n apps_to_sync = set()\n section_title = 'applications_to_sync'\n if self._parser.has_section(section_title):\n apps_to_sync = set(self._parser.options(section_title))\n return apps_to_sync", "docstring": "Parse the applications to backup in the config.\n\nReturns:\nset", "source": "codesearchnet"} {"code": "def compute_cost(A2, Y):\n \n\n m = Y.shape[1] \n\n \n logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))\n cost = -np.sum(logprobs) / m\n\n cost = np.squeeze(cost) \n \n\n assert (isinstance(cost, float))\n\n return cost", "docstring": "Computes the cross-entropy cost given in equation (13)\n\nArguments:\nA2 -- The sigmoid output of the second activation, of shape (1, number of examples)\nY -- \"true\" labels vector of shape (1, number of examples)\nparameters -- python dictionary containing your parameters W1, b1, W2 and b2\n\nReturns:\ncost -- cross-entropy cost given equation (13)", "source": "juraj-google-style"} {"code": "def check_imported(module):\n \n imported = True\n module_info = ('', '', '')\n \n \n try:\n importlib.import_module(module)\n module_info = imp.find_module(module)\n except ImportError:\n imported = False\n\n \n module_path = module_info[1]\n description = module_info[2]\n\n if not description:\n \n imported = False\n elif not description and not module_path:\n \n imported = False\n elif module_path is not None and (\n 'dist-packages' in module_path or 'site-packages' in module_path\n ):\n \n imported = False\n return imported", "docstring": "Check whether the provide module can be imported (package installed).\n\nArgs:\nmodule (str): The name of the module to check availability.\n\nReturns:\nbool: True if the module can be imported, False otherwise.", "source": "juraj-google-style"} {"code": "def ddot(L, R, left=None, out=None):\n L = asarray(L, float)\n R = asarray(R, float)\n if (left is None):\n ok = ((min(L.ndim, R.ndim) == 1) and (max(L.ndim, R.ndim) == 2))\n if (not ok):\n msg = 'Wrong array layout. One array should have'\n msg += ' ndim=1 and the other one ndim=2.'\n raise ValueError(msg)\n left = (L.ndim == 1)\n if left:\n if (out is None):\n out = copy(R)\n L = L.reshape((list(L.shape) + ([1] * (R.ndim - 1))))\n return multiply(L, R, out=out)\n else:\n if (out is None):\n out = copy(L)\n return multiply(L, R, out=out)", "docstring": "r\"\"\"Dot product of a matrix and a diagonal one.\n\nArgs:\nL (array_like): Left matrix.\nR (array_like): Right matrix.\nout (:class:`numpy.ndarray`, optional): copy result to.\n\nReturns:\n:class:`numpy.ndarray`: Resulting matrix.", "source": "codesearchnet"} {"code": "def resolve_pname(self, pname: PrefName,\n mid: ModuleId) -> Tuple[YangIdentifier, ModuleId]:\n \n p, s, loc = pname.partition(\":\")\n try:\n mdata = self.modules[mid]\n except KeyError:\n raise ModuleNotRegistered(*mid) from None\n try:\n return (loc, mdata.prefix_map[p]) if s else (p, mdata.main_module)\n except KeyError:\n raise UnknownPrefix(p, mid) from None", "docstring": "Return the name and module identifier in which the name is defined.\n\nArgs:\npname: Name with an optional prefix.\nmid: Identifier of the module in which `pname` appears.\n\nRaises:\nModuleNotRegistered: If `mid` is not registered in the data model.\nUnknownPrefix: If the prefix specified in `pname` is not declared.", "source": "juraj-google-style"} {"code": "def deroot(self, label='OLDROOT'):\n if (self.root.edge_length is not None):\n self.root.add_child(Node(edge_length=self.root.edge_length, label=label))\n self.root.edge_length = None", "docstring": "If the tree has a root edge, drop the edge to be a child of the root node\n\nArgs:\n``label`` (``str``): The desired label of the new child", "source": "codesearchnet"} {"code": "def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None):\n self._strategy = strategy\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n self._session_config = session_config\n self._worker_barrier = worker_barrier\n self._rpc_layer = rpc_layer\n self._master_target = self._get_master_target()\n self._num_workers = _get_num_workers(cluster_spec)\n self._is_chief_node = self._is_chief()", "docstring": "Initialize the worker context object.\n\nArgs:\nstrategy: a `DistributionStrategy` object.\ncluster_spec: a ClusterSpec object. It can be empty or None in the local\ntraining case.\ntask_type: a string indicating the role of the corresponding task, such as\n\"worker\" or \"ps\". It can be None if it is local training or in-graph\nreplicated training.\ntask_id: an integer indicating id of the corresponding task. It can be\nNone if it is local training or in-graph replicated training.\nsession_config: an optional `tf.compat.v1.ConfigProto` object.\nrpc_layer: optional string specifying the RPC protocol for communication\nwith worker masters. If None or empty, hosts in the `cluster_spec` will\nbe used directly.\nworker_barrier: optional, the barrier object for worker synchronization.", "source": "github-repos"} {"code": "def _validate_none_or_type(t):\n \n def _validate(setting):\n \n if setting is not None and not isinstance(setting, t):\n raise ValueError('\"{}\" is not \"{}\"'.format(setting, t))\n return setting\n return _validate", "docstring": "Create a validator that checks if a setting is either None or a given type.\n\nArgs:\nt: The type to assert.\n\nReturns:\ncallable: A callable that will validate a setting for that type.", "source": "juraj-google-style"} {"code": "def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)", "docstring": "Check that keys list are all in context and all have values.\n\nArgs:\n*keys: Will check each of these keys in context\ncaller: string. Calling function name - just used for informational\nmessages\n\nRaises:\nKeyNotInContextError: Key doesn't exist\nKeyInContextHasNoValueError: context[key] is None\nAssertionError: if *keys is None", "source": "codesearchnet"} {"code": "def ed25519_private_key_to_string(key):\n \n return base64.b64encode(key.private_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PrivateFormat.Raw,\n encryption_algorithm=serialization.NoEncryption()\n ), None).decode('utf-8')", "docstring": "Convert an ed25519 private key to a base64-encoded string.\n\nArgs:\nkey (Ed25519PrivateKey): the key to write to the file.\n\nReturns:\nstr: the key representation as a str", "source": "juraj-google-style"} {"code": "def delete_plan(self, plan_code):\n return self.client._delete((self.url + 'plans/{}'.format(plan_code)), headers=self.get_headers())", "docstring": "Delete an entire subscription plan associated with the merchant.\n\nArgs:\nplan_code: Plan’s identification code for the merchant.\n\nReturns:", "source": "codesearchnet"} {"code": "def list(self, **request_parameters):\n \n \n items = self._session.get_items(\n API_ENDPOINT,\n params=request_parameters\n )\n\n \n for item in items:\n yield self._object_factory(OBJECT_TYPE, item)", "docstring": "List all roles.\n\nArgs:\n**request_parameters: Additional request parameters (provides\nsupport for parameters that may be added in the future).\n\nReturns:\nGeneratorContainer: A GeneratorContainer which, when iterated,\nyields the roles returned by the Webex Teams query.\n\nRaises:\nTypeError: If the parameter types are incorrect.\nApiError: If the Webex Teams cloud returns an error.", "source": "juraj-google-style"} {"code": "def mapTypeToSql(fld_type=FieldType.NoType, fld_len=0):\n \n if fld_type == FieldType.Float:\n return \"FLOAT\"\n elif fld_type == FieldType.String:\n return \"VARCHAR(\" + str(fld_len) + \")\"\n elif fld_type == FieldType.Int:\n return \"INT\"\n elif fld_type == FieldType.Hex:\n return \"VARCHAR(\" + str(fld_len * 2) + \")\"\n elif fld_type == FieldType.PowerFactor:\n return \"VARCHAR(\" + str(fld_len) + \")\"\n else:\n ekm_log(\"Type \" + str(type) + \" not handled by mapTypeToSql, returned VARCHAR(255)\")\n return \"VARCHAR(255)\"", "docstring": "Translate FieldType to portable SQL Type. Override if needful.\nArgs:\nfld_type (int): :class:`~ekmmeters.FieldType` in serial block.\nfld_len (int): Binary length in serial block\n\nReturns:\nstring: Portable SQL type and length where appropriate.", "source": "juraj-google-style"} {"code": "def _log_progress(self, bytes_downloaded):\n \n self._total_bytes_downloaded += bytes_downloaded\n now = time.time()\n if (self._interactive_mode() or\n now - self._last_progress_msg_print_time > 15):\n \n \n self._print_download_progress_msg(\n \"Downloading %s: %s\" % (self._url,\n tf_utils.bytes_to_readable_str(\n self._total_bytes_downloaded, True)))\n self._last_progress_msg_print_time = now", "docstring": "Logs progress information about ongoing module download.\n\nArgs:\nbytes_downloaded: Number of bytes downloaded.", "source": "juraj-google-style"} {"code": "def Run(self, args, env):\n try:\n options, args = self.parser.parse_args(args)\n except SystemExit as e:\n return e.code\n conf = config.Config(env)\n if options.verbose:\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n if options.debug:\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n if options.config_file:\n conf.config_file = options.config_file\n self.log.info('using nss_cache library, version %s', nss_cache.__version__)\n self.log.debug('library path is %r', nss_cache.__file__)\n if not args:\n print('No command given')\n self.parser.print_help()\n return os.EX_USAGE\n if len(args) == 1 and args[0] == 'help':\n self.parser.print_help()\n return os.EX_OK\n self.log.debug('args: %r' % args)\n command_name = args.pop(0)\n self.log.debug('command: %r' % command_name)\n config.LoadConfig(conf)\n try:\n command_callable = getattr(command, command_name.capitalize())\n except AttributeError:\n self.log.warning('%s is not implemented', command_name)\n print('command %r is not implemented' % command_name)\n self.parser.print_help()\n return os.EX_SOFTWARE\n try:\n retval = command_callable().Run(conf=conf, args=args)\n except error.SourceUnavailable as e:\n self.log.error('Problem with configured data source: %s', e)\n return os.EX_TEMPFAIL\n return retval", "docstring": "Begin execution of nsscache.\n\nThis method loads our runtime configuration, instantiates the\nappropriate Source and Cache objects, and invokes the\nappropriate method based on the command given.\n\nNOTE: We avoid calling sys.exit() and instead return an int\nto our caller, who will exit with that status.\n\nArgs:\nargs: list of command line arguments\nenv: dictionary of environment variables\n\nReturns:\nPOSIX exit status", "source": "github-repos"} {"code": "def idxmax(self, axis=0, skipna=True, *args, **kwargs):\n \n if not all(d != np.dtype(\"O\") for d in self._get_dtypes()):\n raise TypeError(\"reduction operation 'argmax' not allowed for this dtype\")\n axis = self._get_axis_number(axis)\n return self._reduce_dimension(\n self._query_compiler.idxmax(axis=axis, skipna=skipna)\n )", "docstring": "Get the index of the first occurrence of the max value of the axis.\n\nArgs:\naxis (int): Identify the max over the rows (1) or columns (0).\nskipna (bool): Whether or not to skip NA values.\n\nReturns:\nA Series with the index for each maximum value for the axis\nspecified.", "source": "juraj-google-style"} {"code": "def _json_clean(d):\n \n result = {}\n compkeys = {}\n for k, v in d.items():\n if not isinstance(k, tuple):\n result[k] = v\n else:\n \n \n \n key = \"c.{}\".format(id(k))\n result[key] = v\n compkeys[key] = k\n\n return (result, compkeys)", "docstring": "Cleans the specified python `dict` by converting any tuple keys to\nstrings so that they can be serialized by JSON.\n\nArgs:\nd (dict): python dictionary to clean up.\n\nReturns:\ndict: cleaned-up dictionary.", "source": "juraj-google-style"} {"code": "def valid_content_type(self, content_type, accept):\n \n accept_tokens = accept.replace(' ', '').split(';')\n content_type_tokens = content_type.replace(' ', '').split(';')\n\n return (\n all(elem in content_type_tokens for elem in accept_tokens) and\n (content_type_tokens[0] == 'application/vnd.oasis.taxii+json' or\n content_type_tokens[0] == 'application/vnd.oasis.stix+json')\n )", "docstring": "Check that the server is returning a valid Content-Type\n\nArgs:\ncontent_type (str): ``Content-Type:`` header value\naccept (str): media type to include in the ``Accept:`` header.", "source": "juraj-google-style"} {"code": "def reconstruct_text(tokens: List[Token]) -> str:\n \n return \"\".join([x.text_with_ws for x in tokens])", "docstring": "Given a list of tokens, reconstruct the original text with as much fidelity as possible.\n\nArgs:\n[tokens]:\n\nReturns: a string.", "source": "juraj-google-style"} {"code": "def get_commit_tree(profile, sha):\n data = commits.get_commit(profile, sha)\n tree = data.get('tree')\n sha = tree.get('sha')\n return sha", "docstring": "Get the SHA of a commit's tree.\n\nArgs:\n\nprofile\nA profile generated from ``simplygithub.authentication.profile``.\nSuch profiles tell this module (i) the ``repo`` to connect to,\nand (ii) the ``token`` to connect with.\n\nsha\nThe SHA of a commit.\n\nReturns:\nThe SHA of the commit's tree.", "source": "codesearchnet"} {"code": "def enc(self, byts, asscd=None):\n \n iv = os.urandom(16)\n encryptor = AESGCM(self.ekey)\n byts = encryptor.encrypt(iv, byts, asscd)\n envl = {'iv': iv, 'data': byts, 'asscd': asscd}\n return s_msgpack.en(envl)", "docstring": "Encrypt the given bytes and return an envelope dict in msgpack form.\n\nArgs:\nbyts (bytes): The message to be encrypted.\nasscd (bytes): Extra data that needs to be authenticated (but not encrypted).\n\nReturns:\nbytes: The encrypted message. This is a msgpacked dictionary\ncontaining the IV, ciphertext, and associated data.", "source": "juraj-google-style"} {"code": "def Serialize(self, writer: BinaryWriter):\n \n super(ValidatorState, self).Serialize(writer)\n self.PublicKey.Serialize(writer)\n writer.WriteBool(self.Registered)\n writer.WriteFixed8(self.Votes)", "docstring": "Serialize full object.\n\nArgs:\nwriter (neo.IO.BinaryWriter):", "source": "juraj-google-style"} {"code": "def splitby(iterable: Iterable[_T], predicate: Callable[[_T], bool]) -> tuple[list[_T], list[_T]]:\n false_list = []\n true_list = []\n for v in iterable:\n if predicate(v):\n true_list.append(v)\n else:\n false_list.append(v)\n return (false_list, true_list)", "docstring": "Split the iterable into 2 lists (false, true), based on the predicate.\n\nExample:\n\n```python\nsmall, big = epy.splitby([100, 4, 4, 1, 200], lambda x: x > 10)\nassert small == [4, 4, 1]\nassert big == [100, 200]\n```\n\nArgs:\niterable: The iterable to split\npredicate: Function applied to split\n\nReturns:\nFalse list, True list", "source": "github-repos"} {"code": "def save(self, project):\n \n\n \n if 'id' in project and project['id'] is not None:\n \n self.logger.debug('Updating existing project: ' + json.dumps(project))\n url = '%(base_url)s/%(project_id)s' % {\n 'base_url': self.base_url, 'project_id': project['id']\n }\n r = self.gbdx_connection.put(url, json=project)\n try:\n r.raise_for_status()\n except:\n print(r.text)\n raise\n \n return project['id']\n else:\n self.logger.debug('Creating new project: ' + json.dumps(project))\n \n url = self.base_url\n r = self.gbdx_connection.post(url, json=project)\n try:\n r.raise_for_status()\n except:\n print(r.text)\n raise\n project_json = r.json()\n \n return project_json['id']", "docstring": "Saves an AnswerFactory Project\n\nArgs:\nproject (dict): Dictionary specifying an AnswerFactory Project.\n\nReturns:\nAnswerFactory Project id", "source": "juraj-google-style"} {"code": "def add_map(self, counters_map):\n \n for counter_name in counters_map.counters:\n self.increment(counter_name, counters_map.counters[counter_name])", "docstring": "Add all counters from the map.\n\nFor each counter in the passed map, adds its value to the counter in this\nmap.\n\nArgs:\ncounters_map: CounterMap instance to add.", "source": "juraj-google-style"} {"code": "def execute(self, *args, **kwargs):\n self.walk(*args, **kwargs)\n failed_steps = [step for step in self.steps if (step.status == FAILED)]\n if failed_steps:\n raise PlanFailed(failed_steps)", "docstring": "Walks each step in the underlying graph, and raises an exception if\nany of the steps fail.\n\nRaises:\nPlanFailed: Raised if any of the steps fail.", "source": "codesearchnet"} {"code": "def function_completions(completion_text: str, bel_spec: BELSpec, function_list: list, bel_fmt: str, size: int) -> list:\n if isinstance(function_list, list):\n if (bel_fmt in ['short', 'medium']):\n function_list = [bel_spec['functions']['to_short'][fn] for fn in function_list]\n else:\n function_list = [bel_spec['functions']['to_long'][fn] for fn in function_list]\n elif (bel_fmt in ['short', 'medium']):\n function_list = bel_spec['functions']['primary']['list_short']\n else:\n function_list = bel_spec['functions']['primary']['list_long']\n matches = []\n for f in function_list:\n escaped_completion_text = completion_text.replace('(', '\\\\(').replace(')', '\\\\)')\n log.debug(f'Completion match: {escaped_completion_text} F: {f}')\n if re.match(escaped_completion_text, f):\n matches.append(f)\n replace_list = []\n for match in matches:\n if completion_text:\n highlight = match.replace(completion_text, f'{completion_text}')\n else:\n highlight = completion_text\n replace_list.append({'replacement': match, 'label': f'{match}()', 'highlight': highlight, 'type': 'Function'})\n return replace_list[:size]", "docstring": "Filter BEL functions by prefix\n\nArgs:\nprefix: completion string\nbel_fmt: short, medium, long BEL formats\nspec: BEL specification\n\nReturns:\nlist: list of BEL functions that match prefix", "source": "codesearchnet"} {"code": "def __init__(self, ordinals, years, months, days):\n self._ordinals = tf.convert_to_tensor(ordinals, dtype=tf.int32, name='dt_ordinals')\n self._years = tf.convert_to_tensor(years, dtype=tf.int32, name='dt_years')\n self._months = tf.convert_to_tensor(months, dtype=tf.int32, name='dt_months')\n self._days = tf.convert_to_tensor(days, dtype=tf.int32, name='dt_days')\n self._day_of_year = None", "docstring": "Initializer.\n\nThis initializer is primarily for internal use. More convenient construction\nmethods are available via 'dates_from_*' functions.\n\nArgs:\nordinals: Tensor of type int32. Each value is number of days since 1 Jan\n0001. 1 Jan 0001 has `ordinal=1`. `years`, `months` and `days` must\nrepresent the same dates as `ordinals`.\nyears: Tensor of type int32, of same shape as `ordinals`.\nmonths: Tensor of type int32, of same shape as `ordinals`\ndays: Tensor of type int32, of same shape as `ordinals`.", "source": "github-repos"} {"code": "def reverse_action(self, action):\n \n FUNCTIONS = actions.FUNCTIONS \n\n aif = self._agent_interface_format\n\n def func_call_ability(ability_id, cmd_type, *args):\n \n if ability_id not in actions.ABILITY_IDS:\n logging.warning(\"Unknown ability_id: %s. This is probably dance or \"\n \"cheer, or some unknown new or map specific ability. \"\n \"Treating it as a no-op.\", ability_id)\n return FUNCTIONS.no_op()\n\n if aif.hide_specific_actions:\n general_id = next(iter(actions.ABILITY_IDS[ability_id])).general_id\n if general_id:\n ability_id = general_id\n\n for func in actions.ABILITY_IDS[ability_id]:\n if func.function_type is cmd_type:\n return FUNCTIONS[func.id](*args)\n raise ValueError(\"Unknown ability_id: %s, type: %s. Likely a bug.\" % (\n ability_id, cmd_type.__name__))\n\n if action.HasField(\"action_ui\"):\n act_ui = action.action_ui\n if act_ui.HasField(\"multi_panel\"):\n return FUNCTIONS.select_unit(act_ui.multi_panel.type - 1,\n act_ui.multi_panel.unit_index)\n if act_ui.HasField(\"control_group\"):\n return FUNCTIONS.select_control_group(\n act_ui.control_group.action - 1,\n act_ui.control_group.control_group_index)\n if act_ui.HasField(\"select_idle_worker\"):\n return FUNCTIONS.select_idle_worker(act_ui.select_idle_worker.type - 1)\n if act_ui.HasField(\"select_army\"):\n return FUNCTIONS.select_army(act_ui.select_army.selection_add)\n if act_ui.HasField(\"select_warp_gates\"):\n return FUNCTIONS.select_warp_gates(\n act_ui.select_warp_gates.selection_add)\n if act_ui.HasField(\"select_larva\"):\n return FUNCTIONS.select_larva()\n if act_ui.HasField(\"cargo_panel\"):\n return FUNCTIONS.unload(act_ui.cargo_panel.unit_index)\n if act_ui.HasField(\"production_panel\"):\n return FUNCTIONS.build_queue(act_ui.production_panel.unit_index)\n if act_ui.HasField(\"toggle_autocast\"):\n return func_call_ability(act_ui.toggle_autocast.ability_id,\n actions.autocast)\n\n if (action.HasField(\"action_feature_layer\") or\n action.HasField(\"action_render\")):\n act_sp = actions.spatial(action, aif.action_space)\n if act_sp.HasField(\"camera_move\"):\n coord = point.Point.build(act_sp.camera_move.center_minimap)\n return FUNCTIONS.move_camera(coord)\n if act_sp.HasField(\"unit_selection_point\"):\n select_point = act_sp.unit_selection_point\n coord = point.Point.build(select_point.selection_screen_coord)\n return FUNCTIONS.select_point(select_point.type - 1, coord)\n if act_sp.HasField(\"unit_selection_rect\"):\n select_rect = act_sp.unit_selection_rect\n \n \n \n tl = point.Point.build(select_rect.selection_screen_coord[0].p0)\n br = point.Point.build(select_rect.selection_screen_coord[0].p1)\n return FUNCTIONS.select_rect(select_rect.selection_add, tl, br)\n if act_sp.HasField(\"unit_command\"):\n cmd = act_sp.unit_command\n queue = int(cmd.queue_command)\n if cmd.HasField(\"target_screen_coord\"):\n coord = point.Point.build(cmd.target_screen_coord)\n return func_call_ability(cmd.ability_id, actions.cmd_screen,\n queue, coord)\n elif cmd.HasField(\"target_minimap_coord\"):\n coord = point.Point.build(cmd.target_minimap_coord)\n return func_call_ability(cmd.ability_id, actions.cmd_minimap,\n queue, coord)\n else:\n return func_call_ability(cmd.ability_id, actions.cmd_quick, queue)\n\n if action.HasField(\"action_raw\") or action.HasField(\"action_render\"):\n raise ValueError(\"Unknown action:\\n%s\" % action)\n\n return FUNCTIONS.no_op()", "docstring": "Transform an SC2-style action into an agent-style action.\n\nThis should be the inverse of `transform_action`.\n\nArgs:\naction: a `sc_pb.Action` to be transformed.\n\nReturns:\nA corresponding `actions.FunctionCall`.\n\nRaises:\nValueError: if it doesn't know how to transform this action.", "source": "juraj-google-style"} {"code": "def rtt_read(self, buffer_index, num_bytes):\n buf = (ctypes.c_ubyte * num_bytes)()\n bytes_read = self._dll.JLINK_RTTERMINAL_Read(buffer_index, buf, num_bytes)\n if (bytes_read < 0):\n raise errors.JLinkRTTException(bytes_read)\n return list(buf)[:bytes_read]", "docstring": "Reads data from the RTT buffer.\n\nThis method will read at most num_bytes bytes from the specified\nRTT buffer. The data is automatically removed from the RTT buffer.\nIf there are not num_bytes bytes waiting in the RTT buffer, the\nentire contents of the RTT buffer will be read.\n\nArgs:\nself (JLink): the ``JLink`` instance\nbuffer_index (int): the index of the RTT buffer to read from\nnum_bytes (int): the maximum number of bytes to read\n\nReturns:\nA list of bytes read from RTT.\n\nRaises:\nJLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails.", "source": "codesearchnet"} {"code": "def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):\n \n temperatures = np.linspace(tmin, tmax, ntemp)\n\n if self.structure:\n ylabel = r\"$C_v$ (J/K/mol)\"\n else:\n ylabel = r\"$C_v$ (J/K/mol-c)\"\n\n fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)\n\n return fig", "docstring": "Plots the constant volume specific heat C_v in a temperature range.\n\nArgs:\ntmin: minimum temperature\ntmax: maximum temperature\nntemp: number of steps\nylim: tuple specifying the y-axis limits.\nkwargs: kwargs passed to the matplotlib function 'plot'.\nReturns:\nmatplotlib figure", "source": "juraj-google-style"} {"code": "def convert_instancenorm(params, w_name, scope_name, inputs, layers, weights, names):\n print('Converting instancenorm ...')\n if (names == 'short'):\n tf_name = ('IN' + random_string(6))\n elif (names == 'keep'):\n tf_name = w_name\n else:\n tf_name = (w_name + str(random.random()))\n assert (len(inputs) == 3)\n bias_name = '{0}.bias'.format(w_name)\n weights_name = '{0}.weight'.format(w_name)\n if ((inputs[(- 2)] + '_np') in layers):\n gamma = layers[(inputs[(- 2)] + '_np')]\n else:\n gamma = weights[weights_name].numpy()\n if ((inputs[(- 1)] + '_np') in layers):\n beta = layers[(inputs[(- 1)] + '_np')]\n else:\n beta = weights[bias_name].numpy()\n\n def target_layer(x, epsilon=params['epsilon'], gamma=gamma, beta=beta):\n layer = tf.contrib.layers.instance_norm(x, param_initializers={'beta': tf.constant_initializer(beta), 'gamma': tf.constant_initializer(gamma)}, epsilon=epsilon, data_format='NCHW', trainable=False)\n return layer\n lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)\n layers[scope_name] = lambda_layer(layers[inputs[0]])", "docstring": "Convert instance normalization layer.\n\nArgs:\nparams: dictionary with layer parameters\nw_name: name prefix in state_dict\nscope_name: pytorch scope name\ninputs: pytorch node inputs\nlayers: dictionary with keras tensors\nweights: pytorch state_dict\nnames: use short names for keras layers", "source": "codesearchnet"} {"code": "def Flash(self, partition, timeout_ms=0, info_cb=DEFAULT_MESSAGE_CALLBACK):\n \n return self._SimpleCommand(b'flash', arg=partition, info_cb=info_cb,\n timeout_ms=timeout_ms)", "docstring": "Flashes the last downloaded file to the given partition.\n\nArgs:\npartition: Partition to overwrite with the new image.\ntimeout_ms: Optional timeout in milliseconds to wait for it to finish.\ninfo_cb: See Download. Usually no messages.\n\nReturns:\nResponse to a download request, normally nothing.", "source": "juraj-google-style"} {"code": "def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):\n method = cv2.TM_CCOEFF_NORMED\n if rgb:\n s_bgr = cv2.split(im_search)\n i_bgr = cv2.split(im_source)\n weight = (0.3, 0.3, 0.4)\n resbgr = [0, 0, 0]\n for i in range(3):\n resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)\n res = (((resbgr[0] * weight[0]) + (resbgr[1] * weight[1])) + (resbgr[2] * weight[2]))\n else:\n s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)\n i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)\n if bgremove:\n s_gray = cv2.Canny(s_gray, 100, 200)\n i_gray = cv2.Canny(i_gray, 100, 200)\n res = cv2.matchTemplate(i_gray, s_gray, method)\n (w, h) = (im_search.shape[1], im_search.shape[0])\n result = []\n while True:\n (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(res)\n if (method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]):\n top_left = min_loc\n else:\n top_left = max_loc\n if DEBUG:\n print(('templmatch_value(thresh:%.1f) = %.3f' % (threshold, max_val)))\n if (max_val < threshold):\n break\n middle_point = ((top_left[0] + (w / 2)), (top_left[1] + (h / 2)))\n result.append(dict(result=middle_point, rectangle=(top_left, (top_left[0], (top_left[1] + h)), ((top_left[0] + w), top_left[1]), ((top_left[0] + w), (top_left[1] + h))), confidence=max_val))\n if (maxcnt and (len(result) >= maxcnt)):\n break\n cv2.floodFill(res, None, max_loc, ((- 1000),), ((max_val - threshold) + 0.1), 1, flags=cv2.FLOODFILL_FIXED_RANGE)\n return result", "docstring": "Locate image position with cv2.templateFind\n\nUse pixel match to find pictures.\n\nArgs:\nim_source(string): 图像、素材\nim_search(string): 需要查找的图片\nthreshold: 阈值,当相识度小于该阈值的时候,就忽略掉\n\nReturns:\nA tuple of found [(point, score), ...]\n\nRaises:\nIOError: when file read error", "source": "codesearchnet"} {"code": "def merge_equivalent_compounds(model):\n \n def dicts_are_compatible(d1, d2):\n return all(key not in d1 or key not in d2 or d1[key] == d2[key]\n for key in set(d1) | set(d2))\n\n compound_compartment = {}\n inelegible = set()\n for reaction in model.reactions:\n equation = reaction.equation\n if equation is None:\n continue\n\n for compound, _ in equation.compounds:\n compartment = compound.compartment\n if compartment is not None:\n compound_compartment[compound.name] = compartment\n if not compound.name.endswith('_{}'.format(compartment)):\n inelegible.add(compound.name)\n\n compound_groups = {}\n for compound_id, compartment in iteritems(compound_compartment):\n if compound_id in inelegible:\n continue\n\n suffix = '_{}'.format(compound_compartment[compound_id])\n if compound_id.endswith(suffix):\n group_name = compound_id[:-len(suffix)]\n compound_groups.setdefault(group_name, set()).add(compound_id)\n\n compound_mapping = {}\n merged_compounds = {}\n for group, compound_set in iteritems(compound_groups):\n \n merged = []\n for compound_id in compound_set:\n props = dict(model.compounds[compound_id].properties)\n\n \n props.pop('id', None)\n props.pop('compartment', None)\n\n for merged_props, merged_set in merged:\n if dicts_are_compatible(props, merged_props):\n merged_set.add(compound_id)\n merged_props.update(props)\n break\n else:\n keys = set(key for key in set(props) | set(merged_props)\n if key not in props or\n key not in merged_props or\n props[key] != merged_props[key])\n logger.info(\n 'Unable to merge {} into {}, difference in'\n ' keys: {}'.format(\n compound_id, ', '.join(merged_set),\n ', '.join(keys)))\n else:\n merged.append((props, {compound_id}))\n\n if len(merged) == 1:\n \n merged_props, merged_set = merged[0]\n\n for compound_id in merged_set:\n compound_mapping[compound_id] = group\n merged_compounds[group] = merged_props\n else:\n \n \n for merged_props, merged_set in merged:\n compartments = set(compound_compartment[c] for c in merged_set)\n merged_name = '{}_{}'.format(\n group, '_'.join(sorted(compartments)))\n\n for compound_id in merged_set:\n compound_mapping[compound_id] = merged_name\n merged_compounds[merged_name] = merged_props\n\n \n for reaction in model.reactions:\n equation = reaction.equation\n if equation is None:\n continue\n\n reaction.equation = equation.translated_compounds(\n lambda c: compound_mapping.get(c, c))\n\n \n new_compounds = []\n for compound in model.compounds:\n if compound.id not in compound_mapping:\n new_compounds.append(compound)\n else:\n group = compound_mapping[compound.id]\n if group not in merged_compounds:\n continue\n props = merged_compounds.pop(group)\n props['id'] = group\n new_compounds.append(DictCompoundEntry(\n props, filemark=compound.filemark))\n\n model.compounds.clear()\n model.compounds.update(new_compounds)\n\n \n new_exchange = OrderedDict()\n for compound, reaction_id, lower, upper in itervalues(model.exchange):\n new_compound = compound.translate(\n lambda name: compound_mapping.get(name, name))\n new_exchange[new_compound] = new_compound, reaction_id, lower, upper\n\n model.exchange.clear()\n model.exchange.update(new_exchange)", "docstring": "Merge equivalent compounds in various compartments.\n\nTries to detect and merge compound entries that represent the same\ncompound in different compartments. The entries are only merged if all\nproperties are equivalent. Compound entries must have an ID with a suffix\nof an underscore followed by the compartment ID. This suffix will be\nstripped and compounds with identical IDs are merged if the properties\nare identical.\n\nArgs:\nmodel: :class:`NativeModel`.", "source": "juraj-google-style"} {"code": "def most_specific_compatible_type(self, other: 'TypeSpec') -> 'TypeSpec':\n result = self.most_specific_common_supertype([other])\n if result is None:\n raise ValueError('No TypeSpec is compatible with both %s and %s' % (self, other))\n return result", "docstring": "Returns the most specific TypeSpec compatible with `self` and `other`.\n\nDeprecated. Please use `most_specific_common_supertype` instead.\nDo not override this function.\n\nArgs:\nother: A `TypeSpec`.\n\nRaises:\nValueError: If there is no TypeSpec that is compatible with both `self`\nand `other`.", "source": "github-repos"} {"code": "def add_column(self, column_name, column_values):\n if (isinstance(column_values, list) and isinstance(column_values[0], list)):\n raise ValueError('\"column_values\" must be a flat list, but we detected that its first entry is a list')\n if (isinstance(column_values, np.ndarray) and (column_values.ndim != 1)):\n raise ValueError(('\"column_values\" should be of rank 1, but is of rank %d' % column_values.ndim))\n if (len(column_values) != self.num_points):\n raise ValueError(('\"column_values\" should be of length %d, but is of length %d' % (self.num_points, len(column_values))))\n if (column_name in self.name_to_values):\n raise ValueError(('The column name \"%s\" is already used' % column_name))\n self.column_names.append(column_name)\n self.name_to_values[column_name] = column_values", "docstring": "Adds a named column of metadata values.\n\nArgs:\ncolumn_name: Name of the column.\ncolumn_values: 1D array/list/iterable holding the column values. Must be\nof length `num_points`. The i-th value corresponds to the i-th point.\n\nRaises:\nValueError: If `column_values` is not 1D array, or of length `num_points`,\nor the `name` is already used.", "source": "codesearchnet"} {"code": "def _Connect(self, banner=None, **kwargs):\n \n\n if not banner:\n banner = socket.gethostname().encode()\n\n conn_str = self.protocol_handler.Connect(self._handle, banner=banner, **kwargs)\n\n \n parts = conn_str.split(b'::')\n self._device_state = parts[0]\n\n \n self.build_props = str(parts[1].split(b';'))\n\n return True", "docstring": "Connect to the device.\n\nArgs:\nbanner: See protocol_handler.Connect.\n**kwargs: See protocol_handler.Connect and adb_commands.ConnectDevice for kwargs.\nIncludes handle, rsa_keys, and auth_timeout_ms.\nReturns:\nAn instance of this class if the device connected successfully.", "source": "juraj-google-style"} {"code": "def local_symbol_table(imports=None, symbols=()):\n return SymbolTable(table_type=LOCAL_TABLE_TYPE, symbols=symbols, imports=imports)", "docstring": "Constructs a local symbol table.\n\nArgs:\nimports (Optional[SymbolTable]): Shared symbol tables to import.\nsymbols (Optional[Iterable[Unicode]]): Initial local symbols to add.\n\nReturns:\nSymbolTable: A mutable local symbol table with the seeded local symbols.", "source": "codesearchnet"} {"code": "def read(self, vals):\n \n i = 0\n if len(vals[i]) == 0:\n self.number_of_records_per_hour = None\n else:\n self.number_of_records_per_hour = vals[i]\n i += 1\n if len(vals[i]) == 0:\n self.data_period_name_or_description = None\n else:\n self.data_period_name_or_description = vals[i]\n i += 1\n if len(vals[i]) == 0:\n self.data_period_start_day_of_week = None\n else:\n self.data_period_start_day_of_week = vals[i]\n i += 1\n if len(vals[i]) == 0:\n self.data_period_start_day = None\n else:\n self.data_period_start_day = vals[i]\n i += 1\n if len(vals[i]) == 0:\n self.data_period_end_day = None\n else:\n self.data_period_end_day = vals[i]\n i += 1", "docstring": "Read values.\n\nArgs:\nvals (list): list of strings representing values", "source": "juraj-google-style"} {"code": "def get_referenced_object(prev_obj, obj, dot_separated_name, desired_type=None):\n from textx.scoping import Postponed\n assert (prev_obj or (not (type(obj) is list)))\n names = dot_separated_name.split('.')\n match = re.match('parent\\\\((\\\\w+)\\\\)', names[0])\n if match:\n next_obj = obj\n desired_parent_typename = match.group(1)\n next_obj = get_recursive_parent_with_typename(next_obj, desired_parent_typename)\n if next_obj:\n return get_referenced_object(None, next_obj, '.'.join(names[1:]), desired_type)\n else:\n return None\n elif (type(obj) is list):\n next_obj = None\n for res in obj:\n if (hasattr(res, 'name') and (res.name == names[0])):\n if ((desired_type is None) or textx_isinstance(res, desired_type)):\n next_obj = res\n else:\n raise TypeError('{} has type {} instead of {}.'.format(names[0], type(res).__name__, desired_type.__name__))\n if (not next_obj):\n if needs_to_be_resolved(prev_obj, names[0]):\n return Postponed()\n else:\n return None\n elif (type(obj) is Postponed):\n return Postponed()\n else:\n next_obj = getattr(obj, names[0])\n if (not next_obj):\n if needs_to_be_resolved(obj, names[0]):\n return Postponed()\n else:\n return None\n if (len(names) > 1):\n return get_referenced_object(obj, next_obj, '.'.join(names[1:]), desired_type)\n if ((type(next_obj) is list) and needs_to_be_resolved(obj, names[0])):\n return Postponed()\n return next_obj", "docstring": "get objects based on a path\n\nArgs:\nprev_obj: the object containing obj (req. if obj is a list)\nobj: the current object\ndot_separated_name: the attribute name \"a.b.c.d\" starting from obj\nNote: the attribute \"parent(TYPE)\" is a shortcut to jump to the\nparent of type \"TYPE\" (exact match of type name).\ndesired_type: (optional)\n\nReturns:\nthe object if found, None if not found or Postponed() if some postponed\nrefs are found on the path", "source": "codesearchnet"} {"code": "def get_file_handle(file_path):\n LOG.debug('Check if file end is correct')\n if (not os.path.exists(file_path)):\n raise IOError('No such file:{0}'.format(file_path))\n if (not (os.path.splitext(file_path)[(- 1)] in VALID_ENDINGS)):\n raise IOError('Not a valid vcf file name: {}'.format(file_path))\n vcf_obj = VCF(file_path)\n return vcf_obj", "docstring": "Return cyvcf2 VCF object\n\nArgs:\nfile_path(str)\n\nReturns:\nvcf_obj(cyvcf2.VCF)", "source": "codesearchnet"} {"code": "def _get_client_fqdn(self, client_info_contents):\n yamldict = yaml.safe_load(client_info_contents)\n fqdn = yamldict['system_info']['fqdn']\n client_id = yamldict['client_id'].split('/')[1]\n return (client_id, fqdn)", "docstring": "Extracts a GRR client's FQDN from its client_info.yaml file.\n\nArgs:\nclient_info_contents: The contents of the client_info.yaml file.\n\nReturns:\nA (str, str) tuple representing client ID and client FQDN.", "source": "codesearchnet"} {"code": "def find_item(self, fq_name):\n names = fq_name.split(self._separator)\n current = self._yapconf_items\n for name in names:\n if isinstance(current, (YapconfDictItem, YapconfListItem)):\n current = current.children\n if (name not in current):\n return None\n current = current[name]\n return current", "docstring": "Find an item in the specification by fully qualified name.\n\nArgs:\nfq_name (str): Fully-qualified name of the item.\n\nReturns:\nThe item if it is in the specification. None otherwise", "source": "codesearchnet"} {"code": "def get_configuration(self, uri):\n req_headers = {'Accept': 'application/vnd.onshape.v1+json', 'Content-Type': 'application/json'}\n return self._api.request('get', (((((((('/api/partstudios/d/' + uri['did']) + '/') + uri['wvm_type']) + '/') + uri['wvm']) + '/e/') + uri['eid']) + '/configuration'), headers=req_headers)", "docstring": "get the configuration of a PartStudio\n\nArgs:\n- uri (dict): points to a particular element\n\nReturns:\n- requests.Response: Onshape response data", "source": "codesearchnet"} {"code": "def get_run_start_intro(run_call_count, fetches, feed_dict, tensor_filters, is_callable_runner=False):\n fetch_lines = common.get_flattened_names(fetches)\n if not feed_dict:\n feed_dict_lines = [debugger_cli_common.RichLine(' (Empty)')]\n else:\n feed_dict_lines = []\n for feed_key in feed_dict:\n feed_key_name = common.get_graph_element_name(feed_key)\n feed_dict_line = debugger_cli_common.RichLine(' ')\n feed_dict_line += debugger_cli_common.RichLine(feed_key_name, debugger_cli_common.MenuItem(None, \"pf '%s'\" % feed_key_name))\n feed_dict_lines.append(feed_dict_line)\n feed_dict_lines = debugger_cli_common.rich_text_lines_from_rich_line_list(feed_dict_lines)\n out = debugger_cli_common.RichTextLines(_HORIZONTAL_BAR)\n if is_callable_runner:\n out.append('Running a runner returned by Session.make_callable()')\n else:\n out.append('Session.run() call \n out.append('')\n out.append('Fetch(es):')\n out.extend(debugger_cli_common.RichTextLines([' ' + line for line in fetch_lines]))\n out.append('')\n out.append('Feed dict:')\n out.extend(feed_dict_lines)\n out.append(_HORIZONTAL_BAR)\n out.append('')\n out.append('Select one of the following commands to proceed ---->')\n out.extend(_recommend_command('run', 'Execute the run() call with debug tensor-watching', create_link=True))\n out.extend(_recommend_command('run -n', 'Execute the run() call without debug tensor-watching', create_link=True))\n out.extend(_recommend_command('run -t ', 'Execute run() calls (T - 1) times without debugging, then execute run() once more with debugging and drop back to the CLI'))\n out.extend(_recommend_command('run -f ', 'Keep executing run() calls until a dumped tensor passes a given, registered filter (conditional breakpoint mode)'))\n more_lines = [' Registered filter(s):']\n if tensor_filters:\n filter_names = []\n for filter_name in tensor_filters:\n filter_names.append(filter_name)\n command_menu_node = debugger_cli_common.MenuItem('', 'run -f %s' % filter_name)\n more_lines.append(RL(' * ') + RL(filter_name, command_menu_node))\n else:\n more_lines.append(' (None)')\n out.extend(debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))\n out.append('')\n out.append_rich_line(RL('For more details, see ') + RL('help.', debugger_cli_common.MenuItem('', 'help')) + '.')\n out.append('')\n menu = debugger_cli_common.Menu()\n menu.append(debugger_cli_common.MenuItem('run', 'run'))\n menu.append(debugger_cli_common.MenuItem('exit', 'exit'))\n out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu\n return out", "docstring": "Generate formatted intro for run-start UI.\n\nArgs:\nrun_call_count: (int) Run call counter.\nfetches: Fetches of the `Session.run()` call. See doc of `Session.run()`\nfor more details.\nfeed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`\nfor more details.\ntensor_filters: (dict) A dict from tensor-filter name to tensor-filter\ncallable.\nis_callable_runner: (bool) whether a runner returned by\nSession.make_callable is being run.\n\nReturns:\n(RichTextLines) Formatted intro message about the `Session.run()` call.", "source": "github-repos"} {"code": "def run(coro, loop=None):\n \n loop = loop or asyncio.get_event_loop()\n return loop.run_until_complete(coro)", "docstring": "Convenient shortcut alias to ``loop.run_until_complete``.\n\nArguments:\ncoro (coroutine): coroutine object to schedule.\nloop (asyncio.BaseEventLoop): optional event loop to use.\nDefaults to: ``asyncio.get_event_loop()``.\n\nReturns:\nmixed: returned value by coroutine.\n\nUsage::\n\nasync def mul_2(num):\nreturn num * 2\n\npaco.run(mul_2(4))\n# => 8", "source": "juraj-google-style"} {"code": "def _FullUpdateFromFile(self, cache, source_map, force_write=False):\n return_val = 0\n for entry in source_map:\n if not entry.Verify():\n raise error.InvalidMap('Map is not valid. Aborting')\n if len(source_map) == 0 and (not force_write):\n raise error.EmptyMap('Source map empty during full update, aborting. Use --force-write to override.')\n return_val += cache.WriteMap(map_data=source_map, force_write=force_write)\n if return_val == 0:\n mtime = os.stat(cache.GetCacheFilename()).st_mtime\n self.log.debug('Cache filename %s has mtime %d', cache.GetCacheFilename(), mtime)\n self.WriteModifyTimestamp(mtime)\n self.WriteUpdateTimestamp()\n return return_val", "docstring": "Write a new map into the provided cache (overwrites).\n\nArgs:\ncache: A nss_cache.caches.Cache object.\nsource_map: The map whose contents we're replacing the cache with, that is\nused for verification.\nforce_write: A boolean flag forcing empty map updates when False,\ndefaults to False.\n\nReturns:\n0 if succesful, non-zero indicating number of failures otherwise.\n\nRaises:\nEmptyMap: Update is an empty map, not raised if force_write=True.\nInvalidMap:", "source": "github-repos"} {"code": "def load_config(self):\n logger.debug('loading config file: %s', self.config_file)\n if os.path.exists(self.config_file):\n with open(self.config_file) as file_handle:\n return json.load(file_handle)\n else:\n logger.error('configuration file is required for eventify')\n logger.error('unable to load configuration for service')\n raise EventifyConfigError(('Configuration is required! Missing: %s' % self.config_file))", "docstring": "Load configuration for the service\n\nArgs:\nconfig_file: Configuration file path", "source": "codesearchnet"} {"code": "def main(conf_file, overwrite, logger):\n \n uid = pwd.getpwnam(get_username()).pw_uid\n\n \n logger.info(\"Stopping the daemon.\")\n sh.service(get_service_name(), \"stop\")\n\n \n logger.info(\"Creating config file.\")\n create_config(\n cnf_file=conf_file,\n uid=uid,\n overwrite=overwrite\n )\n logger.info(\"Creating log file.\")\n create_log(\n log_file=REQUIRED_SETTINGS[\"LogFile\"],\n uid=uid\n )\n\n \n logger.info(\"Starting the daemon..\")\n sh.service(get_service_name(), \"start\")", "docstring": "Create configuration and log file. Restart the daemon when configuration\nis done.\n\nArgs:\nconf_file (str): Path to the configuration file.\noverwrite (bool): Overwrite the configuration file with `clean` config?", "source": "juraj-google-style"} {"code": "def set_features(self, partition=1):\n if (len(self.json) < (partition + 1)):\n raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition))\n data = []\n for offset in range((len(self.json) - partition)):\n json = self.json[offset:(offset + partition)]\n data.append(eval_features(json))\n return pd.DataFrame(data=data, dtype=np.float32)", "docstring": "Parses market data JSON for technical analysis indicators\n\nArgs:\npartition: Int of how many dates to take into consideration\nwhen evaluating technical analysis indicators.\n\nReturns:\nPandas DataFrame instance with columns as numpy.float32 features.", "source": "codesearchnet"} {"code": "def _enum_from_direction(direction):\n if isinstance(direction, int):\n return direction\n if (direction == Query.ASCENDING):\n return enums.StructuredQuery.Direction.ASCENDING\n elif (direction == Query.DESCENDING):\n return enums.StructuredQuery.Direction.DESCENDING\n else:\n msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING)\n raise ValueError(msg)", "docstring": "Convert a string representation of a direction to an enum.\n\nArgs:\ndirection (str): A direction to order by. Must be one of\n:attr:`~.firestore.Query.ASCENDING` or\n:attr:`~.firestore.Query.DESCENDING`.\n\nReturns:\nint: The enum corresponding to ``direction``.\n\nRaises:\nValueError: If ``direction`` is not a valid direction.", "source": "codesearchnet"} {"code": "def init_grad(obj, allow_lazy_initializer=False):\n if (obj is None):\n return 0.0\n (initializer, supports_lazy_initializer) = grad_initializers[type(obj)]\n if supports_lazy_initializer:\n if isinstance(obj, ZeroGradient):\n if allow_lazy_initializer:\n return ZeroGradient(obj.like)\n else:\n return obj.instantiate()\n elif allow_lazy_initializer:\n return ZeroGradient(obj)\n else:\n assert (not isinstance(obj, ZeroGradient))\n return initializer(obj)", "docstring": "Initialize the gradient for an object.\n\nArgs:\nobj: The object to initialize the gradient for, can be either a number,\narray, tuple, list, or dictionary.\nallow_lazy_initializer: Whether to allow using the ZeroGradient wrapper,\nfor efficiency.\n\nReturns:\nAn object of the same type, shape, etc. but with all numeric values set to\nzero. If the type is unknown, a zero is returned.", "source": "codesearchnet"} {"code": "def classify_field(value):\n if (not (isinstance(value, six.string_types) and value)):\n return\n schema = load_schema('elements/inspire_field')\n inspire_categories = schema['properties']['term']['enum']\n for inspire_category in inspire_categories:\n if (value.upper() == inspire_category.upper()):\n return inspire_category\n category = normalize_arxiv_category(value)\n return ARXIV_TO_INSPIRE_CATEGORY_MAPPING.get(category, 'Other')", "docstring": "Normalize ``value`` to an Inspire category.\n\nArgs:\nvalue(str): an Inspire category to properly case, or an arXiv category\nto translate to the corresponding Inspire category.\n\nReturns:\nstr: ``None`` if ``value`` is not a non-empty string,\notherwise the corresponding Inspire category.", "source": "codesearchnet"} {"code": "def to_json_string(self) -> str:\n dictionary = self.to_dict()\n for key, value in dictionary.items():\n if isinstance(value, np.ndarray):\n dictionary[key] = value.tolist()\n _processor_class = dictionary.pop('_processor_class', None)\n if _processor_class is not None:\n dictionary['processor_class'] = _processor_class\n return json.dumps(dictionary, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nReturns:\n`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.", "source": "github-repos"} {"code": "def extend_transformations(self, transformations, return_alternatives=False):\n for t in transformations:\n self.append_transformation(t, return_alternatives=return_alternatives)", "docstring": "Extends a sequence of transformations to the TransformedStructure.\n\nArgs:\ntransformations: Sequence of Transformations\nreturn_alternatives: Whether to return alternative\nTransformedStructures for one-to-many transformations.\nreturn_alternatives can be a number, which stipulates the\ntotal number of structures to return.", "source": "codesearchnet"} {"code": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does\nnot make use of token type ids, therefore a list of zeros is returned.\n\nArgs:\ntoken_ids_0 (`List[int]`):\nList of IDs.\ntoken_ids_1 (`List[int]`, *optional*):\nOptional second list of IDs for sequence pairs.\n\nReturns:\n`List[int]`: List of zeros.", "source": "github-repos"} {"code": "def bleu_score(logits, labels):\n predictions = tf.to_int32(tf.argmax(logits, axis=(- 1)))\n bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)\n return (bleu, tf.constant(1.0))", "docstring": "Approximate BLEU score computation between labels and predictions.\n\nAn approximate BLEU scoring method since we do not glue word pieces or\ndecode the ids and tokenize the output. By default, we use ngram order of 4\nand use brevity penalty. Also, this does not have beam search.\n\nArgs:\nlogits: Tensor of size [batch_size, length_logits, vocab_size]\nlabels: Tensor of size [batch-size, length_labels]\n\nReturns:\nbleu: int, approx bleu score", "source": "codesearchnet"} {"code": "def associate_blocks(blocks, layout_pairs, start_peb_num):\n seq_blocks = []\n for layout_pair in layout_pairs:\n seq_blocks = sort.by_image_seq(blocks, blocks[layout_pair[0]].ec_hdr.image_seq)\n layout_pair.append(seq_blocks)\n return layout_pairs", "docstring": "Group block indexes with appropriate layout pairs\n\nArguments:\nList:blocks -- List of block objects\nList:layout_pairs -- List of grouped layout blocks\nInt:start_peb_num -- Number of the PEB to start from.\n\nReturns:\nList -- Layout block pairs grouped with associated block ranges.", "source": "codesearchnet"} {"code": "def run_cmd(self, *args, **kwargs):\n \n timeout = kwargs.pop('timeout', None)\n p = self.raw_cmd(*args, **kwargs)\n return p.communicate(timeout=timeout)[0].decode('utf-8').replace('\\r\\n', '\\n')", "docstring": "Unix style output, already replace \\r\\n to \\n\n\nArgs:\n- timeout (float): timeout for a command exec", "source": "juraj-google-style"} {"code": "def Field(dagster_type, default_value=FIELD_NO_DEFAULT_PROVIDED, is_optional=INFER_OPTIONAL_COMPOSITE_FIELD, is_secret=False, description=None):\n config_type = resolve_to_config_type(dagster_type)\n if (not config_type):\n raise DagsterInvalidDefinitionError('Attempted to pass {value_repr} to a Field that expects a valid dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'.format(value_repr=repr(dagster_type)))\n return FieldImpl(config_type=resolve_to_config_type(dagster_type), default_value=default_value, is_optional=is_optional, is_secret=is_secret, description=description)", "docstring": "The schema for configuration data that describes the type, optionality, defaults, and description.\n\nArgs:\ndagster_type (DagsterType):\nA ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})`\ndefault_value (Any):\nA default value to use that respects the schema provided via dagster_type\nis_optional (bool): Whether the presence of this field is optional\ndespcription (str):", "source": "codesearchnet"} {"code": "def make_decoder(num_topics, num_words):\n \n topics_words_logits = tf.compat.v1.get_variable(\n \"topics_words_logits\",\n shape=[num_topics, num_words],\n initializer=tf.compat.v1.glorot_normal_initializer())\n topics_words = tf.nn.softmax(topics_words_logits, axis=-1)\n\n def decoder(topics):\n word_probs = tf.matmul(topics, topics_words)\n \n \n \n return tfd.OneHotCategorical(probs=word_probs,\n name=\"bag_of_words\")\n\n return decoder, topics_words", "docstring": "Create the decoder function.\n\nArgs:\nnum_topics: The number of topics.\nnum_words: The number of words.\n\nReturns:\ndecoder: A `callable` mapping a `Tensor` of encodings to a\n`tfd.Distribution` instance over words.", "source": "juraj-google-style"} {"code": "def pinch(self, direction='in', percent=0.6, duration=2.0, dead_zone=0.1):\n if (direction not in ('in', 'out')):\n raise ValueError('Argument `direction` should be one of \"in\" or \"out\". Got {}'.format(repr(direction)))\n if (dead_zone >= percent):\n raise ValueError('Argument `dead_zone` should not be greater than `percent`. dead_zoon={}, percent={}'.format(repr(dead_zone), repr(percent)))\n tracks = make_pinching(direction, [0.5, 0.5], [1, 1], percent, dead_zone, duration)\n speed = (((percent - dead_zone) / 2) / duration)\n ret = self.apply_motion_tracks(tracks, accuracy=(speed * 0.03))\n return ret", "docstring": "Squeezing or expanding 2 fingers on the entire screen.\n\nArgs:\ndirection (:py:obj:`str`): pinching direction, only \"in\" or \"out\". \"in\" for squeezing, \"out\" for expanding\npercent (:py:obj:`float`): squeezing range from or expanding range to of the entire screen\nduration (:py:obj:`float`): time interval in which the action is performed\ndead_zone (:py:obj:`float`): pinching inner circle radius. should not be greater than ``percent``", "source": "codesearchnet"} {"code": "def add_key_value(self, key, value):\n key = self._metadata_map.get(key, key)\n if (key in ['dateAdded', 'lastModified']):\n self._indicator_data[key] = self._utils.format_datetime(value, date_format='%Y-%m-%dT%H:%M:%SZ')\n elif (key == 'confidence'):\n self._indicator_data[key] = int(value)\n elif (key == 'rating'):\n self._indicator_data[key] = float(value)\n else:\n self._indicator_data[key] = value", "docstring": "Add custom field to Indicator object.\n\n.. note:: The key must be the exact name required by the batch schema.\n\nExample::\n\nfile_hash = tcex.batch.file('File', '1d878cdc391461e392678ba3fc9f6f32')\nfile_hash.add_key_value('size', '1024')\n\nArgs:\nkey (str): The field key to add to the JSON batch data.\nvalue (str): The field value to add to the JSON batch data.", "source": "codesearchnet"} {"code": "def deactivate(self, node_id):\n node = self.node_list[node_id]\n self.node_list[node_id] = node._replace(active=False)", "docstring": "Deactivate the node identified by node_id.\n\nDeactivates the node corresponding to node_id, which means that\nit can never be the output of a nearest_point query.\n\nNote:\nThe node is not removed from the tree, its data is steel available.\n\nArgs:\nnode_id (int): The node identifier (given to the user after\nits insertion).", "source": "codesearchnet"} {"code": "def _to_numpy_type(dtype):\n if isinstance(dtype, dtypes.DType):\n return dtype.as_numpy_dtype\n return np.dtype(dtype)", "docstring": "Converts a native python or TF DType to numpy type.\n\nArgs:\ndtype: Could be a python type, a numpy type or a TF DType.\n\nReturns:\nA NumPy `dtype`.", "source": "github-repos"} {"code": "def default_value(fieldname, datatype):\n if (fieldname in tsdb_coded_attributes):\n return str(tsdb_coded_attributes[fieldname])\n else:\n return _default_datatype_values.get(datatype, '')", "docstring": "Return the default value for a column.\n\nIf the column name (e.g. *i-wf*) is defined to have an idiosyncratic\nvalue, that value is returned. Otherwise the default value for the\ncolumn's datatype is returned.\n\nArgs:\nfieldname: the column name (e.g. `i-wf`)\ndatatype: the datatype of the column (e.g. `:integer`)\nReturns:\nThe default value for the column.\n\n.. deprecated:: v0.7.0", "source": "codesearchnet"} {"code": "def __init__(\n self, name, aliases=None, alignment_size=None, description=None,\n urls=None):\n \n super(PaddingDefinition, self).__init__(\n name, aliases=aliases, description=description, urls=urls)\n self.alignment_size = alignment_size", "docstring": "Initializes a padding data type definition.\n\nArgs:\nname (str): name.\naliases (Optional[list[str]]): aliases.\nalignment_size (Optional[int]): alignment size.\ndescription (Optional[str]): description.\nurls (Optional[list[str]]): URLs.", "source": "juraj-google-style"} {"code": "def url(self, endpoint=''):\n if (not endpoint.startswith('/')):\n endpoint = ('/' + endpoint)\n return (((self.protocol + ':", "docstring": "Get the base URL of the Remote.\n\nArguments:\nNone\nReturns:\n`str` base URL", "source": "codesearchnet"} {"code": "def _send_message(self, method, endpoint, params=None, data=None):\n url = (self.url + endpoint)\n r = self.session.request(method, url, params=params, data=data, auth=self.auth, timeout=30)\n return r.json()", "docstring": "Send API request.\n\nArgs:\nmethod (str): HTTP method (get, post, delete, etc.)\nendpoint (str): Endpoint (to be added to base URL)\nparams (Optional[dict]): HTTP request parameters\ndata (Optional[str]): JSON-encoded string payload for POST\n\nReturns:\ndict/list: JSON response", "source": "codesearchnet"} {"code": "def symmetric_difference(self, other):\n \n other = self._cast_to_frameset(other)\n if other is NotImplemented:\n return NotImplemented\n from_frozenset = self.items.symmetric_difference(other.items)\n return self.from_iterable(from_frozenset, sort=True)", "docstring": "Returns a new :class:`FrameSet` that contains all the elements in either\n`self` or `other`, but not both.\n\nArgs:\nother (:class:`FrameSet`):\n\nReturns:\n:class:`FrameSet`:", "source": "juraj-google-style"} {"code": "def export(self, composite=False):\n \n\n if composite:\n if rname_rfc6680 is None:\n raise NotImplementedError(\"Your GSSAPI implementation does \"\n \"not support RFC 6680 (the GSSAPI \"\n \"naming extensions)\")\n\n return rname_rfc6680.export_name_composite(self)\n else:\n return rname.export_name(self)", "docstring": "Export this name as a token.\n\nThis method exports the name into a byte string which can then be\nimported by using the `token` argument of the constructor.\n\nArgs:\ncomposite (bool): whether or not use to a composite token --\n:requires-ext:`rfc6680`\n\nReturns:\nbytes: the exported name in token form\n\nRaises:\nMechanismNameRequiredError\nBadNameTypeError\nBadNameError", "source": "juraj-google-style"} {"code": "def generate_pyi_ast(src: str, options: config.Options | None=None, loader: load_pytd.Loader | None=None) -> analyze.Analysis:\n options = options or config.Options.create()\n if options.use_rewrite:\n infer_types = rewrite_analyze.infer_types\n else:\n infer_types = analyze.infer_types\n with config.verbosity_from(options):\n ret = _call(infer_types, src, options, loader)\n mod = ret.ast\n mod.Visit(visitors.VerifyVisitor())\n mod = optimize.Optimize(mod, ret.ast_deps, lossy=False, use_abcs=False, max_union=7, remove_mutable=False)\n mod = pytd_utils.CanonicalOrdering(mod)\n ret.ast = mod\n return ret", "docstring": "Run the inferencer on a string of source code, producing output.\n\nArgs:\nsrc: The source code.\noptions: config.Options object.\nloader: A load_pytd.Loader instance.\n\nReturns:\nAn analyze.Analysis object containing the inferencer results.\n\nRaises:\nCompileError: If we couldn't parse the input file.\nUsageError: If the input filepath is invalid.", "source": "github-repos"} {"code": "def register_from_fields(self, *args):\n \n names = []\n for field in args:\n widget = self.resolve_widget(field)\n self.register(widget.config_name)\n if widget.config_name not in names:\n names.append(widget.config_name)\n\n return names", "docstring": "Register config name from field widgets\n\nArguments:\n*args: Fields that contains widget\n:class:`djangocodemirror.widget.CodeMirrorWidget`.\n\nReturns:\nlist: List of registered config names from fields.", "source": "juraj-google-style"} {"code": "def label_total_duration(self):\n durations = collections.defaultdict(float)\n for label in self:\n durations[label.value] += label.duration\n return durations", "docstring": "Return for each distinct label value the total duration of all occurrences.\n\nReturns:\ndict: A dictionary containing for every label-value (key)\nthe total duration in seconds (value).\n\nExample:\n>>> ll = LabelList(labels=[\n>>> Label('a', 3, 5),\n>>> Label('b', 5, 8),\n>>> Label('a', 8, 10),\n>>> Label('b', 10, 14),\n>>> Label('a', 15, 18.5)\n>>> ])\n>>> ll.label_total_duration()\n{'a': 7.5 'b': 7.0}", "source": "codesearchnet"} {"code": "def add_layer(self, layer, input_node_id):\n \n if isinstance(input_node_id, Iterable):\n layer.input = list(map(lambda x: self.node_list[x], input_node_id))\n output_node_id = self._add_node(Node(layer.output_shape))\n for node_id in input_node_id:\n self._add_edge(layer, node_id, output_node_id)\n\n else:\n layer.input = self.node_list[input_node_id]\n output_node_id = self._add_node(Node(layer.output_shape))\n self._add_edge(layer, input_node_id, output_node_id)\n\n layer.output = self.node_list[output_node_id]\n return output_node_id", "docstring": "Add a layer to the Graph.\nArgs:\nlayer: An instance of the subclasses of StubLayer in layers.py.\ninput_node_id: An integer. The ID of the input node of the layer.\nReturns:\noutput_node_id: An integer. The ID of the output node of the layer.", "source": "juraj-google-style"} {"code": "def _item_to_document_ref(iterator, item):\n document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[(- 1)]\n return iterator.collection.document(document_id)", "docstring": "Convert Document resource to document ref.\n\nArgs:\niterator (google.api_core.page_iterator.GRPCIterator):\niterator response\nitem (dict): document resource", "source": "codesearchnet"} {"code": "def FilterRange(self, start_time=None, stop_time=None):\n start_time = self._NormalizeTime(start_time)\n stop_time = self._NormalizeTime(stop_time)\n self.data = [p for p in self.data if (((start_time is None) or (p[1] >= start_time)) and ((stop_time is None) or (p[1] < stop_time)))]", "docstring": "Filter the series to lie between start_time and stop_time.\n\nRemoves all values of the series which are outside of some time range.\n\nArgs:\nstart_time: If set, timestamps before start_time will be dropped.\nstop_time: If set, timestamps at or past stop_time will be dropped.", "source": "codesearchnet"} {"code": "def __call__(self, product):\n \n reviewers = self.reviewers(product)\n Nq = len(reviewers)\n\n if Nq == 1:\n return 0.5\n\n else:\n \n var = np.var([self.review_score(r, product)\n for r in reviewers], ddof=1)\n return np.log(Nq) / (var + 1)", "docstring": "Compute credibility of a given product.\n\nArgs:\nproduct: An instance of :class:`bipartite.Product`.\n\nReturns:\nThe credibility of the product. It is >= 0.5.", "source": "juraj-google-style"} {"code": "def _object_identifier(self):\n return '_generic_user_object'", "docstring": "String used to identify this object in a SavedModel.\n\nTHIS FIELD HAS BEEN DEPRECATED IN FAVOR OF THE NAME REGISTERED WITH\n`register_serializable`.\n\nGenerally, the object identifier is constant across objects of the same\nclass, while the metadata field is used for instance-specific data.\n\nReturns:\nString object identifier.", "source": "github-repos"} {"code": "def __format__(self, format_spec='dd'):\n \n text = super(Station.__base__, self).__format__(format_spec)\n\n if self.alt_id:\n return '%s (%s - %s)' % (self.name, self.alt_id, text)\n else:\n return '%s (%s)' % (self.name, text)", "docstring": "Extended pretty printing for location strings.\n\nArgs:\nformat_spec str: Coordinate formatting system to use\n\nReturns:\nHuman readable string representation of ``Point`` object\n\nRaises:\nValueError: Unknown value for ``format_spec``", "source": "juraj-google-style"} {"code": "def _check_stop_conditions(self, sensor_graph):\n for stop in self.stop_conditions:\n if stop.should_stop(self.tick_count, (self.tick_count - self._start_tick), sensor_graph):\n return True\n return False", "docstring": "Check if any of our stop conditions are met.\n\nArgs:\nsensor_graph (SensorGraph): The sensor graph we are currently simulating\n\nReturns:\nbool: True if we should stop the simulation", "source": "codesearchnet"} {"code": "def str_to_timedelta(value):\n m = re.match('^(?:(?:datetime\\\\.)?timedelta)?\\\\(?([^)]*)\\\\)?$', value)\n if (not m):\n raise ValueError('Invalid string for datetime.timedelta')\n args = [int(a.strip()) for a in m.group(1).split(',')]\n return datetime.timedelta(*args)", "docstring": "Convert a string to a datetime.timedelta value.\n\nThe following strings are accepted:\n\n- 'datetime.timedelta(1, 5, 12345)'\n- 'timedelta(1, 5, 12345)'\n- '(1, 5, 12345)'\n- '1, 5, 12345'\n- '1'\n\nif there are less then three parameters, the missing parameters are\nassumed to be 0. Variations in the spacing of the parameters are allowed.\n\nRaises:\nValueError for strings not matching the above criterion.", "source": "codesearchnet"} {"code": "def check_placeholders(value):\n \n if isinstance(value, six.string_types):\n if TOKEN_REGEX.search(value):\n raise ValueError('{0:s} must be replaced in dictionary'.format(value))\n elif isinstance(value, list):\n return [check_placeholders(item) for item in value]\n elif isinstance(value, dict):\n return {key: check_placeholders(val) for key, val in value.items()}\n elif isinstance(value, tuple):\n return tuple(check_placeholders(val) for val in value)\n return value", "docstring": "Checks if any values in a given dictionary still contain @ parameters.\n\nArgs:\nvalue: Dictionary, list, or string that will be recursively checked for\nplaceholders\n\nRaises:\nValueError: There still exists a value with an @ parameter.\n\nReturns:\nTop-level caller: a modified dict with replaced tokens.\nRecursive caller: a modified object with replaced tokens.", "source": "juraj-google-style"} {"code": "def primary(self, repl_id):\n \n repl = self[repl_id]\n primary = repl.primary()\n return repl.member_info(repl.host2id(primary))", "docstring": "find and return primary hostname\nArgs:\nrepl_id - replica set identity", "source": "juraj-google-style"} {"code": "def add(clss, func, deprecated_name):\n\n @Deprecator(func.__name__, deprecated_name)\n def _old_function(*args, **kwargs):\n return func(*args, **kwargs)\n setattr(clss, deprecated_name, _old_function)", "docstring": "Add the deprecated version of a member function to the given class.\nGives a deprecation warning on usage.\n\nArgs:\nclss: the class where the deprecated function is to be added\nfunc: the actual function that is called by the deprecated version\ndeprecated_name: the deprecated name of the function", "source": "codesearchnet"} {"code": "def parse_mobly_cli_args(argv):\n parser = argparse.ArgumentParser(description='Mobly Test Executable.')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-c', '--config', type=str, metavar='', help='Path to the test configuration file.')\n group.add_argument('-l', '--list_tests', action='store_true', help='Print the names of the tests defined in a script without executing them.')\n parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[test_a test_b re:test_(c|d)...]', help='A list of tests in the test class to execute. Each value can be a test name string or a `re:` prefixed string for full regex match of test names.')\n parser.add_argument('-tb', '--test_bed', nargs='+', type=str, metavar='[ ...]', help='Specify which test beds to run tests on.')\n parser.add_argument('-v', '--verbose', action='store_true', help='Set console logger level to DEBUG')\n if not argv:\n argv = sys.argv[1:]\n return parser.parse_known_args(argv)[0]", "docstring": "Parses cli args that are consumed by Mobly.\n\nThis is the arg parsing logic for the default test_runner.main entry point.\n\nMultiple arg parsers can be applied to the same set of cli input. So you\ncan use this logic in addition to any other args you want to parse. This\nfunction ignores the args that don't apply to default `test_runner.main`.\n\nArgs:\nargv: A list that is then parsed as cli args. If None, defaults to cli\ninput.\n\nReturns:\nNamespace containing the parsed args.", "source": "github-repos"} {"code": "def _configure_from_module(self, item):\n \n package = None\n if item[0] == '.':\n package = self.import_name\n\n obj = importlib.import_module(item, package=package)\n\n self.config.from_object(obj)\n\n return self", "docstring": "Configure from a module by import path.\n\nEffectively, you give this an absolute or relative import path, it will\nimport it, and then pass the resulting object to\n``_configure_from_object``.\n\nArgs:\nitem (str):\nA string pointing to a valid import path.\n\nReturns:\nfleaker.App:\nReturns itself.", "source": "juraj-google-style"} {"code": "def for_default_graph(*args, **kwargs):\n \n graph = tf.get_default_graph()\n collection = graph.get_collection(_BOOKKEEPER)\n if collection:\n if args or kwargs:\n raise ValueError('Requesting construction of a BookKeeper that already '\n 'exists: %s %s' % (args, kwargs))\n return collection[0]\n else:\n books = BOOKKEEPER_FACTORY(*args, g=graph, **kwargs)\n graph.add_to_collection(_BOOKKEEPER, books)\n return books", "docstring": "Creates a bookkeeper for the default graph.\n\nArgs:\n*args: Arguments to pass into Bookkeeper's constructor.\n**kwargs: Arguments to pass into Bookkeeper's constructor.\nReturns:\nA new Bookkeeper.\nRaises:\nValueError: If args or kwargs are provided and the Bookkeeper already\nexists.", "source": "juraj-google-style"} {"code": "def is_prefix(cls, path):\n lagofile = paths.Paths(path).prefix_lagofile()\n return os.path.isfile(lagofile)", "docstring": "Check if a path is a valid prefix\n\nArgs:\npath(str): path to be checked\n\nReturns:\nbool: True if the given path is a prefix", "source": "codesearchnet"} {"code": "def SetKeyPathPrefix(self, key_path_prefix):\n \n self._key_path_prefix = key_path_prefix\n self._key_path_prefix_length = len(key_path_prefix)\n self._key_path_prefix_upper = key_path_prefix.upper()", "docstring": "Sets the Window Registry key path prefix.\n\nArgs:\nkey_path_prefix (str): Windows Registry key path prefix.", "source": "juraj-google-style"} {"code": "def __init__(self, latitude, longitude, altitude, name=None,\n identity=None):\n \n super(Trigpoint, self).__init__(latitude, longitude)\n self.altitude = altitude\n self.name = name\n self.identity = identity", "docstring": "Initialise a new ``Trigpoint`` object.\n\nArgs:\nlatitude (float): Location's latitude\nlongitude (float): Location's longitude\naltitude (float): Location's altitude\nname (str): Name for location\nidentity (int): Database identifier, if known", "source": "juraj-google-style"} {"code": "def extract_build_info(exe_path, elf_section=ELF_SECTION):\n \n build_info = {}\n with mkdtemp() as tempd, pushd(tempd):\n proc = subprocess.Popen(\n [\n OBJCOPY,\n DUMP_SECTION,\n \"{secn}={ofile}\".format(secn=elf_section, ofile=BUILDINFO_FILE),\n exe_path,\n ],\n stderr=subprocess.PIPE,\n )\n proc.wait()\n errno = proc.returncode\n stderr = proc.stderr.read()\n if errno or len(stderr): \n LOGGER.warning('objcopy failed with errno %s.', errno)\n if len(stderr):\n LOGGER.warning('objcopy failed with following msg:\\n%s', stderr)\n return build_info\n\n with open(BUILDINFO_FILE) as build_info_f:\n try:\n build_info = json.load(build_info_f, object_hook=byteify)\n except JSONDcdError as jsde:\n LOGGER.warning('benchmark executable build is not valid json:')\n LOGGER.warning(jsde.msg)\n LOGGER.warning('build info section content:')\n LOGGER.warning(jsde.doc)\n return build_info", "docstring": "Extracts the build information from a given executable.\n\nThe build information is expected to be in json format, which is parsed\nand returned as a dictionary.\nIf no build information is found an empty dictionary is returned.\n\nThis assumes binutils 2.25 to work.\n\nArgs:\nexe_path (str): The full path to the executable to be examined\n\nReturns:\ndict: A dictionary of the extracted information.", "source": "juraj-google-style"} {"code": "def IsOutOfLineMethodDefinition(clean_lines, linenum):\n \n \n for i in xrange(linenum, max(-1, linenum - 10), -1):\n if Match(r'^([^()]*\\w+)\\(', clean_lines.elided[i]):\n return Match(r'^[^()]*\\w+::\\w+\\(', clean_lines.elided[i]) is not None\n return False", "docstring": "Check if current line contains an out-of-line method definition.\n\nArgs:\nclean_lines: A CleansedLines instance containing the file.\nlinenum: The number of the line to check.\nReturns:\nTrue if current line contains an out-of-line method definition.", "source": "juraj-google-style"} {"code": "def diff_commonOverlap(self, text1, text2):\n \n \n text1_length = len(text1)\n text2_length = len(text2)\n \n if text1_length == 0 or text2_length == 0:\n return 0\n \n if text1_length > text2_length:\n text1 = text1[-text2_length:]\n elif text1_length < text2_length:\n text2 = text2[:text1_length]\n text_length = min(text1_length, text2_length)\n \n if text1 == text2:\n return text_length\n\n \n \n \n best = 0\n length = 1\n while True:\n pattern = text1[-length:]\n found = text2.find(pattern)\n if found == -1:\n return best\n length += found\n if found == 0 or text1[-length:] == text2[:length]:\n best = length\n length += 1", "docstring": "Determine if the suffix of one string is the prefix of another.\n\nArgs:\ntext1 First string.\ntext2 Second string.\n\nReturns:\nThe number of characters common to the end of the first\nstring and the start of the second string.", "source": "juraj-google-style"} {"code": "def tag(self, repository, tag=None, **kwargs):\n return self.client.api.tag(self.id, repository, tag=tag, **kwargs)", "docstring": "Tag this image into a repository. Similar to the ``docker tag``\ncommand.\n\nArgs:\nrepository (str): The repository to set for the tag\ntag (str): The tag name\nforce (bool): Force\n\nRaises:\n:py:class:`docker.errors.APIError`\nIf the server returns an error.\n\nReturns:\n(bool): ``True`` if successful", "source": "codesearchnet"} {"code": "def value_to_string(self, obj):\n value = self._get_val_from_obj(obj)\n return self.get_prep_value(value)", "docstring": "Convert the field value from the provided model to a string.\n\nUsed during model serialization.\n\nArgs:\nobj: db.Model, model object\n\nReturns:\nstring, the serialized field value", "source": "codesearchnet"} {"code": "def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):\n \n protein_df = pd.read_feather(protein_feather).set_index('index')\n\n \n from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE\n aggregators = {\n 'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],\n 'subseqs' : ['metal_2_5D', 'metal_3D']},\n 'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',\n 'surface_3D']},\n 'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],\n 'subseqs' : ['tm_2D', 'tm_3D']},\n 'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],\n 'subseqs' : ['tm_2D', 'tm_3D']},\n 'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],\n 'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',\n 'dna_2_5D']},\n 'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],\n 'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',\n 'dna_2_5D']}}\n\n \n for suffix, info in aggregators.items():\n agg_residues = info['residues']\n for prefix in info['subseqs']:\n to_add_idxes = []\n for agg_res in agg_residues:\n to_add_idx = prefix + '_aa_count_' + agg_res\n if to_add_idx in protein_df.index:\n to_add_idxes.append(to_add_idx)\n subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() \n protein_df.loc[prefix + '_' + suffix] = subseq_agged_col \n\n \n \n if length_filter_pid:\n keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index\n protein_df = protein_df[keep_cols]\n\n \n if copynum_scale:\n if not isinstance(copynum_df, pd.DataFrame):\n raise ValueError('Please supply copy numbers')\n protein_id = op.basename(protein_feather).split('_protein')[0]\n if protein_id in copynum_df.index:\n copynum = copynum_df.at[protein_id, 'copynum']\n if copynum > 0: \n protein_df = protein_df * copynum\n\n return protein_df", "docstring": "Load a feather of amino acid counts for a protein.\n\nArgs:\nprotein_feather (str): path to feather file\ncopynum_scale (bool): if counts should be multiplied by protein copy number\ncopynum_df (DataFrame): DataFrame of copy numbers\n\nReturns:\nDataFrame: of counts with some aggregated together", "source": "juraj-google-style"} {"code": "def num_inputs(self):\n num = 0\n for (walker, _) in self.inputs:\n if (not isinstance(walker, InvalidStreamWalker)):\n num += 1\n return num", "docstring": "Return the number of connected inputs.\n\nReturns:\nint: The number of connected inputs", "source": "codesearchnet"} {"code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n pass", "docstring": "Returns an IdWeightPair.\n\n`IdWeightPair` is a pair of `SparseTensor`s which represents ids and\nweights.\n\n`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`\n`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a\n`SparseTensor` of `float` or `None` to indicate all weights should be\ntaken to be 1. If specified, `weight_tensor` must have exactly the same\nshape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing\noutput of a `VarLenFeature` which is a ragged matrix.\n\nArgs:\ntransformation_cache: A `FeatureTransformationCache` object to access\nfeatures.\nstate_manager: A `StateManager` to create / access resources such as\nlookup tables.", "source": "github-repos"} {"code": "def _parse_data_fields(self, fields, tag_id='tag', sub_id='code'):\n for field in fields:\n params = field.params\n if (tag_id not in params):\n continue\n field_repr = OrderedDict([[self.i1_name, params.get(self.i1_name, ' ')], [self.i2_name, params.get(self.i2_name, ' ')]])\n for subfield in field.find('subfield'):\n if (sub_id not in subfield.params):\n continue\n content = MARCSubrecord(val=subfield.getContent().strip(), i1=field_repr[self.i1_name], i2=field_repr[self.i2_name], other_subfields=field_repr)\n code = subfield.params[sub_id]\n if (code in field_repr):\n field_repr[code].append(content)\n else:\n field_repr[code] = [content]\n tag = params[tag_id]\n if (tag in self.datafields):\n self.datafields[tag].append(field_repr)\n else:\n self.datafields[tag] = [field_repr]", "docstring": "Parse data fields.\n\nArgs:\nfields (list): of HTMLElements\ntag_id (str): parameter name, which holds the information, about\nfield name this is normally \"tag\", but in case of\noai_marc \"id\"\nsub_id (str): id of parameter, which holds informations about\nsubfield name this is normally \"code\" but in case of\noai_marc \"label\"", "source": "codesearchnet"} {"code": "def unpack(self, buff, offset=0):\n \n try:\n begin = offset\n end = begin + self.length\n unpacked_data = struct.unpack(self._fmt, buff[begin:end])[0]\n except struct.error:\n raise Exception(\"%s: %s\" % (offset, buff))\n\n self._value = unpacked_data.decode('ascii').rstrip('\\0')", "docstring": "Unpack a binary message into this object's attributes.\n\nUnpack the binary value *buff* and update this object attributes based\non the results.\n\nArgs:\nbuff (bytes): Binary data package to be unpacked.\noffset (int): Where to begin unpacking.\n\nRaises:\nException: If there is a struct unpacking error.", "source": "juraj-google-style"} {"code": "def tmybasename(usaf):\n \n url_file = open(env.SRC_PATH + '/tmy3.csv')\n for line in url_file.readlines():\n if line.find(usaf) is not -1:\n return line.rstrip().partition(',')[0]", "docstring": "Basename for USAF base.\n\nArgs:\nusaf (str): USAF code\n\nReturns:\n(str)", "source": "juraj-google-style"} {"code": "def sg_arg():\n if (not tf.app.flags.FLAGS.__dict__['__parsed']):\n tf.app.flags.FLAGS._parse_flags()\n return tf.sg_opt(tf.app.flags.FLAGS.__dict__['__flags'])", "docstring": "r\"\"\"Gets current command line options\n\nReturns:\ntf.sg_opt instance that is updated with current commandd line options.", "source": "codesearchnet"} {"code": "def join(basepath, *paths):\n filesystem = FileSystems.get_filesystem(basepath)\n return filesystem.join(basepath, *paths)", "docstring": "Join two or more pathname components for the filesystem\n\nArgs:\nbasepath: string path of the first component of the path\npaths: path components to be added\n\nReturns: full path after combining all the passed components", "source": "github-repos"} {"code": "def depth_september_average_ground_temperature(self, value=None):\n \n if value is not None:\n try:\n value = float(value)\n except ValueError:\n raise ValueError(\n 'value {} need to be of type float '\n 'for field `depth_september_average_ground_temperature`'.format(value))\n\n self._depth_september_average_ground_temperature = value", "docstring": "Corresponds to IDD Field\n`depth_september_average_ground_temperature`\n\nArgs:\nvalue (float): value for IDD Field `depth_september_average_ground_temperature`\nUnit: C\nif `value` is None it will not be checked against the\nspecification and is assumed to be a missing value\n\nRaises:\nValueError: if `value` is not a valid value", "source": "juraj-google-style"} {"code": "def create_channel(target, credentials=None, scopes=None, ssl_credentials=None, **kwargs):\n if (credentials is None):\n (credentials, _) = google.auth.default(scopes=scopes)\n else:\n credentials = google.auth.credentials.with_scopes_if_required(credentials, scopes)\n request = google.auth.transport.requests.Request()\n metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin(credentials, request)\n google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)\n if (ssl_credentials is None):\n ssl_credentials = grpc.ssl_channel_credentials()\n composite_credentials = grpc.composite_channel_credentials(ssl_credentials, google_auth_credentials)\n if HAS_GRPC_GCP:\n return grpc_gcp.secure_channel(target, composite_credentials, **kwargs)\n else:\n return grpc.secure_channel(target, composite_credentials, **kwargs)", "docstring": "Create a secure channel with credentials.\n\nArgs:\ntarget (str): The target service address in the format 'hostname:port'.\ncredentials (google.auth.credentials.Credentials): The credentials. If\nnot specified, then this function will attempt to ascertain the\ncredentials from the environment using :func:`google.auth.default`.\nscopes (Sequence[str]): A optional list of scopes needed for this\nservice. These are only used when credentials are not specified and\nare passed to :func:`google.auth.default`.\nssl_credentials (grpc.ChannelCredentials): Optional SSL channel\ncredentials. This can be used to specify different certificates.\nkwargs: Additional key-word args passed to\n:func:`grpc_gcp.secure_channel` or :func:`grpc.secure_channel`.\n\nReturns:\ngrpc.Channel: The created channel.", "source": "codesearchnet"} {"code": "def extract_code(end_mark, current_str, str_array, line_num):\n if (end_mark not in current_str):\n reached_end = False\n line_num += 1\n while (reached_end is False):\n next_line = str_array[line_num]\n if (end_mark in next_line):\n reached_end = True\n else:\n line_num += 1\n current_str += next_line\n clean_str = current_str.split(end_mark)[0]\n return {'current_str': clean_str, 'line_num': line_num}", "docstring": "Extract a multi-line string from a string array, up to a specified end marker.\n\nArgs:\nend_mark (str): The end mark string to match for.\ncurrent_str (str): The first line of the string array.\nstr_array (list): An array of strings (lines).\nline_num (int): The current offset into the array.\n\nReturns:\nExtended string up to line with end marker.", "source": "codesearchnet"} {"code": "def get_element_from_dict_by_path(d, path):\n path = path.split('.')\n for k in path:\n if k not in d:\n d[k] = {}\n d = d[k]\n return d", "docstring": "Get element from dictionary by path. If element is not present, recursively add empty dictionaries.\n\nArgs:\nd (dict): the dictionary to get the element from\npath (list): the path to the element which is delimited by \".\"", "source": "github-repos"} {"code": "def _CheckStatusWorkerProcess(self, pid):\n self._RaiseIfNotRegistered(pid)\n process = self._processes_per_pid[pid]\n process_status = self._QueryProcessStatus(process)\n if (process_status is None):\n process_is_alive = False\n else:\n process_is_alive = True\n process_information = self._process_information_per_pid[pid]\n used_memory = (process_information.GetUsedMemory() or 0)\n if (self._worker_memory_limit and (used_memory > self._worker_memory_limit)):\n logger.warning('Process: {0:s} (PID: {1:d}) killed because it exceeded the memory limit: {2:d}.'.format(process.name, pid, self._worker_memory_limit))\n self._KillProcess(pid)\n if isinstance(process_status, dict):\n self._rpc_errors_per_pid[pid] = 0\n status_indicator = process_status.get('processing_status', None)\n else:\n rpc_errors = (self._rpc_errors_per_pid.get(pid, 0) + 1)\n self._rpc_errors_per_pid[pid] = rpc_errors\n if (rpc_errors > self._MAXIMUM_RPC_ERRORS):\n process_is_alive = False\n if process_is_alive:\n rpc_port = process.rpc_port.value\n logger.warning('Unable to retrieve process: {0:s} (PID: {1:d}) status via RPC socket: http:\n processing_status_string = 'RPC error'\n status_indicator = definitions.STATUS_INDICATOR_RUNNING\n else:\n processing_status_string = 'killed'\n status_indicator = definitions.STATUS_INDICATOR_KILLED\n process_status = {'processing_status': processing_status_string}\n self._UpdateProcessingStatus(pid, process_status, used_memory)\n for worker_status in self._processing_status.workers_status:\n if (worker_status.pid == pid):\n status_indicator = worker_status.status\n break\n if (status_indicator in definitions.ERROR_STATUS_INDICATORS):\n logger.error('Process {0:s} (PID: {1:d}) is not functioning correctly. Status code: {2!s}.'.format(process.name, pid, status_indicator))\n self._TerminateProcessByPid(pid)\n replacement_process = None\n for replacement_process_attempt in range(self._MAXIMUM_REPLACEMENT_RETRIES):\n logger.info('Attempt: {0:d} to start replacement worker process for {1:s}'.format((replacement_process_attempt + 1), process.name))\n replacement_process = self._StartWorkerProcess(process.name, self._storage_writer)\n if replacement_process:\n break\n time.sleep(self._REPLACEMENT_WORKER_RETRY_DELAY)\n if (not replacement_process):\n logger.error('Unable to create replacement worker process for: {0:s}'.format(process.name))", "docstring": "Checks the status of a worker process.\n\nIf a worker process is not responding the process is terminated and\na replacement process is started.\n\nArgs:\npid (int): process ID (PID) of a registered worker process.\n\nRaises:\nKeyError: if the process is not registered with the engine.", "source": "codesearchnet"} {"code": "def _quadratic_sum_cost(self, state: _STATE) -> float:\n \n cost = 0.0\n total_len = float(len(self._c))\n seqs, _ = state\n for seq in seqs:\n cost += (len(seq) / total_len) ** 2\n return -cost", "docstring": "Cost function that sums squares of lengths of sequences.\n\nArgs:\nstate: Search state, not mutated.\n\nReturns:\nCost which is minus the normalized quadratic sum of each linear\nsequence section in the state. This promotes single, long linear\nsequence solutions and converges to number -1. The solution with a\nlowest cost consists of every node being a single sequence and is\nalways less than 0.", "source": "juraj-google-style"} {"code": "def _convert_to_dict(data):\n \n if isinstance(data, dict):\n return data\n\n if isinstance(data, list) or isinstance(data, tuple):\n if _all_correct_list(data):\n return dict(data)\n else:\n data = zip(data[::2], data[1::2])\n return dict(data)\n else:\n raise MetaParsingException(\n \"Can't decode provided metadata - unknown structure.\"\n )", "docstring": "Convert `data` to dictionary.\n\nTries to get sense in multidimensional arrays.\n\nArgs:\ndata: List/dict/tuple of variable dimension.\n\nReturns:\ndict: If the data can be converted to dictionary.\n\nRaises:\nMetaParsingException: When the data are unconvertible to dict.", "source": "juraj-google-style"} {"code": "def convert_to_beam_types(args):\n if isinstance(args, dict):\n return {k: convert_to_beam_type(v) for k, v in args.items()}\n else:\n return [convert_to_beam_type(v) for v in args]", "docstring": "Convert the given list or dictionary of args to Beam types.\n\nArgs:\nargs: Either an iterable of types, or a dictionary where the values are\ntypes.\n\nReturns:\nIf given an iterable, a list of converted types. If given a dictionary,\na dictionary with the same keys, and values which have been converted.", "source": "github-repos"} {"code": "def strip_cdata(text):\n \n if not is_cdata(text):\n return text\n\n xml = \"{0}\".format(text)\n node = etree.fromstring(xml)\n return node.text", "docstring": "Removes all CDATA blocks from `text` if it contains them.\n\nNote:\nIf the function contains escaped XML characters outside of a\nCDATA block, they will be unescaped.\n\nArgs:\nA string containing one or more CDATA blocks.\n\nReturns:\nAn XML unescaped string with CDATA block qualifiers removed.", "source": "juraj-google-style"} {"code": "def reset_internal_states(self, record=None):\n \n self._record = None\n self._count = 0\n self._record = record", "docstring": "Resets the internal state of the recorder.\n\nArgs:\nrecord: records.TestResultRecord, the test record for a test.", "source": "juraj-google-style"} {"code": "def setPartationId(self, partationId):\n \n print '%s call setPartationId' % self.port\n print partationId\n\n cmd = WPANCTL_CMD + 'setprop Network:PartitionId %s' %(str(hex(partationId)).rstrip('L'))\n print cmd\n return self.__sendCommand(cmd)[0] != 'Fail'", "docstring": "set Thread Network Partition ID\n\nArgs:\npartitionId: partition id to be set by leader\n\nReturns:\nTrue: successful to set the Partition ID\nFalse: fail to set the Partition ID", "source": "juraj-google-style"} {"code": "def indicator(self, data):\n \n if self._name != 'Bulk' or self._name != 'Indicator':\n self._request_uri = '{}/{}'.format(\n self._api_uri, self.tcex.safe_indicator(data, 'ignore')\n )", "docstring": "Update the request URI to include the Indicator for specific indicator retrieval.\n\nArgs:\ndata (string): The indicator value", "source": "juraj-google-style"} {"code": "class BridgeTowerFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n size_divisor: Optional[int]\n do_pad: Optional[bool]", "docstring": "Args:\nsize_divisor (`int`, *optional*, defaults to 32):\nThe size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`\nis set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.\ndo_pad (`bool`, *optional*, defaults to `True`):\nWhether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by\nthe `do_pad` parameter in the `preprocess` method.", "source": "github-repos"} {"code": "def reassign_label(cls, destination_cluster, label):\n \n conn = Qubole.agent(version=Cluster.api_version)\n data = {\n \"destination_cluster\": destination_cluster,\n \"label\": label\n }\n return conn.put(cls.rest_entity_path + \"/reassign-label\", data)", "docstring": "Reassign a label from one cluster to another.\n\nArgs:\n`destination_cluster`: id/label of the cluster to move the label to\n\n`label`: label to be moved from the source cluster", "source": "juraj-google-style"} {"code": "def _maybe_download_corpus(tmp_dir, vocab_type):\n \n filename = os.path.basename(PTB_URL)\n compressed_filepath = generator_utils.maybe_download(\n tmp_dir, filename, PTB_URL)\n ptb_files = []\n ptb_char_files = []\n\n with tarfile.open(compressed_filepath, \"r:gz\") as tgz:\n files = []\n \n for m in tgz.getmembers():\n if \"ptb\" in m.name and \".txt\" in m.name:\n if \"char\" in m.name:\n ptb_char_files += [m.name]\n else:\n ptb_files += [m.name]\n files += [m]\n\n tgz.extractall(tmp_dir, members=files)\n\n if vocab_type == text_problems.VocabType.CHARACTER:\n return ptb_char_files\n else:\n return ptb_files", "docstring": "Download and unpack the corpus.\n\nArgs:\ntmp_dir: directory containing dataset.\nvocab_type: which vocabulary are we using.\n\nReturns:\nThe list of names of files.", "source": "juraj-google-style"} {"code": "def bash_complete(self, path, cmd, *cmds):\n \n path = pathlib.Path(path)\n subcmds = list(self.subcmds.keys())\n with path.open('w') as bcf:\n \n print('_{}() {{'.format(cmd), file=bcf)\n print('COMPREPLY=()', file=bcf)\n print(r'local cur=${COMP_WORDS[COMP_CWORD]}', end='\\n\\n', file=bcf)\n optstr = ' '.join(self._bash_comp_command(None))\n print(r'local options=\"{}\"'.format(optstr), end='\\n\\n', file=bcf)\n if subcmds:\n print('local commands=\"{}\"'.format(' '.join(subcmds)),\n file=bcf)\n print('declare -A suboptions', file=bcf)\n for sub in subcmds:\n optstr = ' '.join(self._bash_comp_command(sub))\n print('suboptions[{}]=\"{}\"'.format(sub, optstr), file=bcf)\n condstr = 'if'\n for sub in subcmds:\n print(condstr, r'[[ \"${COMP_LINE}\" == *\"', sub, '\"* ]] ; then',\n file=bcf)\n print(r'COMPREPLY=( `compgen -W \"${suboptions[', sub,\n r']}\" -- ${cur}` )', sep='', file=bcf)\n condstr = 'elif'\n print(condstr, r'[[ ${cur} == -* ]] ; then', file=bcf)\n print(r'COMPREPLY=( `compgen -W \"${options}\" -- ${cur}`)',\n file=bcf)\n if subcmds:\n print(r'else', file=bcf)\n print(r'COMPREPLY=( `compgen -W \"${commands}\" -- ${cur}`)',\n file=bcf)\n print('fi', file=bcf)\n print('}', end='\\n\\n', file=bcf)\n print('complete -F _{0} {0}'.format(cmd), *cmds, file=bcf)", "docstring": "Write bash complete script.\n\nArgs:\npath (path-like): desired path of the complete script.\ncmd (str): command name that should be completed.\ncmds (str): extra command names that should be completed.", "source": "juraj-google-style"} {"code": "def write(self, noautocmd=False):\n \n cmd = 'noautocmd write' if noautocmd else 'write'\n self._vim.command(cmd)", "docstring": "Writes the file of the current buffer.\n\nArgs:\nnoautocmd (bool): If true, write will skip autocommands.\n\nTodo:\nWe should consider whether ``SourceFileInfo`` can replace most\nusage of noautocmd. See #298", "source": "juraj-google-style"} {"code": "def get_texture(self, label: str) -> Union[(moderngl.Texture, moderngl.TextureArray, moderngl.Texture3D, moderngl.TextureCube)]:\n return self._project.get_texture(label)", "docstring": "Get a texture by its label\n\nArgs:\nlabel (str): The Label for the texture\n\nReturns:\nThe py:class:`moderngl.Texture` instance", "source": "codesearchnet"} {"code": "def _einsum_matmul(cls, tensor, mat, indices, shift=0, right_mul=False):\n rank = tensor.ndim\n rank_mat = mat.ndim\n if ((rank_mat % 2) != 0):\n raise QiskitError('Contracted matrix must have an even number of indices.')\n indices_tensor = list(range(rank))\n for (j, index) in enumerate(indices):\n indices_tensor[(index + shift)] = (rank + j)\n mat_contract = list(reversed(range(rank, (rank + len(indices)))))\n mat_free = [(index + shift) for index in reversed(indices)]\n if right_mul:\n indices_mat = (mat_contract + mat_free)\n else:\n indices_mat = (mat_free + mat_contract)\n return np.einsum(tensor, indices_tensor, mat, indices_mat)", "docstring": "Perform a contraction using Numpy.einsum\n\nArgs:\ntensor (np.array): a vector or matrix reshaped to a rank-N tensor.\nmat (np.array): a matrix reshaped to a rank-2M tensor.\nindices (list): tensor indices to contract with mat.\nshift (int): shift for indicies of tensor to contract [Default: 0].\nright_mul (bool): if True right multiply tensor by mat\n(else left multiply) [Default: False].\n\nReturns:\nNumpy.ndarray: the matrix multiplied rank-N tensor.\n\nRaises:\nQiskitError: if mat is not an even rank tensor.", "source": "codesearchnet"} {"code": "def serialize_to_display(self, doc_format=\"pretty-xml\", *args, **kwargs):\n \n return (\n super(ResourceMap, self)\n .serialize(format=doc_format, encoding=None, *args, **kwargs)\n .decode(\"utf-8\")\n )", "docstring": "Serialize ResourceMap to an XML doc that is pretty printed for display.\n\nArgs:\ndoc_format: str\nOne of: ``xml``, ``n3``, ``turtle``, ``nt``, ``pretty-xml``, ``trix``,\n``trig`` and ``nquads``.\n\nargs and kwargs:\nOptional arguments forwarded to rdflib.ConjunctiveGraph.serialize().\n\nReturns:\nstr: Pretty printed Resource Map XML doc\n\nNote:\nOnly the default, \"xml\", is automatically indexed by DataONE.", "source": "juraj-google-style"} {"code": "def update_default(self, new_default, respect_none=False):\n \n if new_default is not None:\n self.default = new_default\n elif new_default is None and respect_none:\n self.default = None", "docstring": "Update our current default with the new_default.\n\nArgs:\nnew_default: New default to set.\nrespect_none: Flag to determine if ``None`` is a valid value.", "source": "juraj-google-style"} {"code": "def identify(self, token):\n payload = {'op': 2, 'd': {'token': self.token, 'properties': {'$os': sys.platform, '$browser': 'legobot', '$device': 'legobot'}, 'compress': False, 'large_threshold': 250}}\n payload['d']['synced_guilds'] = []\n logger.info('Identifying with the following message: {}'.format(payload))\n self.ws.send(json.dumps(payload))\n return", "docstring": "Identifies to the websocket endpoint\n\nArgs:\ntoken (string): Discord bot token", "source": "codesearchnet"} {"code": "def list_asgs(access_token, subscription_id, resource_group):\n \n endpoint = ''.join([get_rm_endpoint(),\n '/subscriptions/', subscription_id,\n '/resourceGroups/', resource_group,\n '/providers/Microsoft.Network/virtualNetworks/',\n '?api-version=', NETWORK_API])\n return do_get(endpoint, access_token)", "docstring": "Get details about the application security groups for a resource group.\n\nArgs:\naccess_token (str): A valid Azure authentication token.\nsubscription_id (str): Azure subscription id.\nresource_group (str): Azure resource group name.\n\nReturns:\nHTTP response. ASG JSON body.", "source": "juraj-google-style"} {"code": "def build(self, variables):\n if self.built:\n return\n super().build(variables)\n self.momentums = []\n if self.momentum != 0:\n self.momentums = self.add_optimizer_variables(variables, 'momentum')", "docstring": "Initialize optimizer variables.\n\nSGD optimizer has one variable `momentums`, only set if `self.momentum`\nis not 0.\n\nArgs:\nvar_list: list of model variables to build SGD variables on.", "source": "github-repos"} {"code": "def run(self):\n ctx = FSM.Context(self.name, self.device)\n transition_counter = 0\n timeout = self.timeout\n self.log('{} Start'.format(self.name))\n while (transition_counter < self.max_transitions):\n transition_counter += 1\n try:\n start_time = time()\n if (self.init_pattern is None):\n ctx.event = self.ctrl.expect(self.events, searchwindowsize=self.searchwindowsize, timeout=timeout)\n else:\n self.log('INIT_PATTERN={}'.format(pattern_to_str(self.init_pattern)))\n try:\n ctx.event = self.events.index(self.init_pattern)\n except ValueError:\n self.log('INIT_PATTERN unknown.')\n continue\n finally:\n self.init_pattern = None\n finish_time = (time() - start_time)\n key = (ctx.event, ctx.state)\n ctx.pattern = self.events[ctx.event]\n if (key in self.transition_table):\n transition = self.transition_table[key]\n (next_state, action_instance, next_timeout) = transition\n self.log('E={},S={},T={},RT={:.2f}'.format(ctx.event, ctx.state, timeout, finish_time))\n if (callable(action_instance) and (not isclass(action_instance))):\n if (not action_instance(ctx)):\n self.log('Error: {}'.format(ctx.msg))\n return False\n elif isinstance(action_instance, Exception):\n self.log('A=Exception {}'.format(action_instance))\n raise action_instance\n elif (action_instance is None):\n self.log('A=None')\n else:\n self.log('FSM Action is not callable: {}'.format(str(action_instance)))\n raise RuntimeWarning('FSM Action is not callable')\n if (next_timeout != 0):\n timeout = next_timeout\n ctx.state = next_state\n self.log('NS={},NT={}'.format(next_state, timeout))\n else:\n self.log('Unknown transition: EVENT={},STATE={}'.format(ctx.event, ctx.state))\n continue\n except EOF:\n raise ConnectionError('Session closed unexpectedly', self.ctrl.hostname)\n if (ctx.finished or (next_state == (- 1))):\n self.log('{} Stop at E={},S={}'.format(self.name, ctx.event, ctx.state))\n return True\n self.log('FSM looped. Exiting')\n return False", "docstring": "Start the FSM.\n\nReturns:\nboolean: True if FSM reaches the last state or false if the exception or error message was raised", "source": "codesearchnet"} {"code": "def _handle_request_error(self, orig_request, error, start_response):\n \n headers = [('Content-Type', 'application/json')]\n status_code = error.status_code()\n body = error.rest_error()\n\n response_status = '%d %s' % (status_code,\n httplib.responses.get(status_code,\n 'Unknown Error'))\n cors_handler = self._create_cors_handler(orig_request)\n return util.send_wsgi_response(response_status, headers, body,\n start_response, cors_handler=cors_handler)", "docstring": "Handle a request error, converting it to a WSGI response.\n\nArgs:\norig_request: An ApiRequest, the original request from the user.\nerror: A RequestError containing information about the error.\nstart_response: A function with semantics defined in PEP-333.\n\nReturns:\nA string containing the response body.", "source": "juraj-google-style"} {"code": "async def _on_progress_notification(self, progress):\n conn_string = progress.get('connection_string')\n done = progress.get('done_count')\n total = progress.get('total_count')\n operation = progress.get('operation')\n (await self.notify_progress(conn_string, operation, done, total, wait=True))", "docstring": "Callback function called when a progress notification is received.\n\nArgs:\nprogress (dict): The received notification containing the progress information", "source": "codesearchnet"} {"code": "def bind_queues(self, bindings):\n channel = (yield self._allocate_channel())\n try:\n for binding in bindings:\n try:\n (yield channel.queue_bind(**binding))\n except pika.exceptions.ChannelClosed as e:\n raise BadDeclaration('binding', binding, e)\n finally:\n try:\n channel.close()\n except pika.exceptions.AMQPError:\n pass", "docstring": "Declare a set of bindings between queues and exchanges.\n\nArgs:\nbindings (list of dict): A list of binding definitions. Each dictionary\nmust contain the \"queue\" key whose value is the name of the queue\nto create the binding on, as well as the \"exchange\" key whose value\nshould be the name of the exchange to bind to. Additional acceptable\nkeys are any keyword arguments accepted by\n:meth:`pika.channel.Channel.queue_bind`.\n\nRaises:\nNoFreeChannels: If there are no available channels on this connection.\nIf this occurs, you can either reduce the number of consumers on this\nconnection or create an additional connection.\nBadDeclaration: If a binding could not be declared. This can occur if the\nqueue or exchange don't exist, or if they do, but the current user does\nnot have permissions to create bindings.", "source": "codesearchnet"} {"code": "def timeout(coro, timeout=None, loop=None):\n\n @asyncio.coroutine\n def _timeout(coro):\n return (yield from asyncio.wait_for(coro, timeout, loop=loop))\n\n @asyncio.coroutine\n def wrapper(*args, **kw):\n return (yield from _timeout(coro(*args, **kw)))\n return (_timeout(coro) if asyncio.iscoroutine(coro) else wrapper)", "docstring": "Wraps a given coroutine function, that when executed, if it takes more\nthan the given timeout in seconds to execute, it will be canceled and\nraise an `asyncio.TimeoutError`.\n\nThis function is equivalent to Python standard\n`asyncio.wait_for()` function.\n\nThis function can be used as decorator.\n\nArguments:\ncoro (coroutinefunction|coroutine): coroutine to wrap.\ntimeout (int|float): max wait timeout in seconds.\nloop (asyncio.BaseEventLoop): optional event loop to use.\n\nRaises:\nTypeError: if coro argument is not a coroutine function.\n\nReturns:\ncoroutinefunction: wrapper coroutine function.\n\nUsage::\n\nawait paco.timeout(coro, timeout=10)", "source": "codesearchnet"} {"code": "def get_session(db_url):\n engine = create_engine(db_url, poolclass=NullPool, echo=False)\n Session = sessionmaker(bind=engine)\n Base.metadata.create_all(engine)\n return Session()", "docstring": "Gets SQLAlchemy session given url. Your tables must inherit\nfrom Base in hdx.utilities.database.\n\nArgs:\ndb_url (str): SQLAlchemy url\n\nReturns:\nsqlalchemy.orm.session.Session: SQLAlchemy session", "source": "codesearchnet"} {"code": "def mesh2fc(script, all_visible_layers=False):\n filter_xml = ''.join([' \\n', ' \\n', ' \\n'])\n util.write_filter(script, filter_xml)\n return None", "docstring": "Transfer mesh colors to face colors\n\nArgs:\nscript: the FilterScript object or script filename to write\nthe filter to.\nall_visible_layers (bool): If true the color mapping is applied to all the meshes", "source": "codesearchnet"} {"code": "def set_computer_desc(desc=None):\n \n if six.PY2:\n desc = _to_unicode(desc)\n\n \n \n system_info = win32net.NetServerGetInfo(None, 101)\n\n \n if desc is None:\n return False\n\n system_info['comment'] = desc\n\n \n try:\n win32net.NetServerSetInfo(None, 101, system_info)\n except win32net.error as exc:\n (number, context, message) = exc.args\n log.error('Failed to update system')\n log.error('nbr: %s', number)\n log.error('ctx: %s', context)\n log.error('msg: %s', message)\n return False\n\n return {'Computer Description': get_computer_desc()}", "docstring": "Set the Windows computer description\n\nArgs:\n\ndesc (str):\nThe computer description\n\nReturns:\nstr: Description if successful, otherwise ``False``\n\nCLI Example:\n\n.. code-block:: bash\n\nsalt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'", "source": "juraj-google-style"} {"code": "def GetDisplayNameForPathSpec(self, path_spec):\n \n return path_helper.PathHelper.GetDisplayNameForPathSpec(\n path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)", "docstring": "Retrieves the display name for a path specification.\n\nArgs:\npath_spec (dfvfs.PathSpec): path specification.\n\nReturns:\nstr: human readable version of the path specification.", "source": "juraj-google-style"} {"code": "def run(self, args, options):\n \n config.LOGGER.info('In SushiChef.run method. args=' + str(args) + 'options=' + str(options))\n self.pre_run(args, options)\n uploadchannel_wrapper(self, args, options)", "docstring": "This function calls uploadchannel which performs all the run steps:\nArgs:\nargs (dict): chef command line arguments\noptions (dict): additional compatibility mode options given on command line", "source": "juraj-google-style"} {"code": "def get(self):\n if self._n < 2:\n return float('nan')\n dof = self._n - 1\n return math.sqrt(self._m2 / dof)", "docstring": "Returns the current incremental standard deviation.\n\nReturns:\nfloat: The current incremental standard deviation value.\nReturns NaN if fewer than 2 valid (non-NaN) values have been pushed.", "source": "github-repos"} {"code": "def get_tiny_config(config_class, model_class=None, **model_tester_kwargs):\n model_type = config_class.model_type\n config_source_file = inspect.getsourcefile(config_class)\n modeling_name = config_source_file.split(os.path.sep)[-1].replace('configuration_', '').replace('.py', '')\n try:\n print('Importing', model_type_to_module_name(model_type))\n module_name = model_type_to_module_name(model_type)\n if not modeling_name.startswith(module_name):\n raise ValueError(f\"{modeling_name} doesn't start with {module_name}!\")\n test_file = os.path.join('tests', 'models', module_name, f'test_modeling_{modeling_name}.py')\n models_to_model_testers = get_model_to_tester_mapping(test_file)\n model_tester_class = None\n tester_classes = []\n if model_class is not None:\n tester_classes = get_tester_classes_for_model(test_file, model_class)\n else:\n for _tester_classes in models_to_model_testers.values():\n tester_classes.extend(_tester_classes)\n if len(tester_classes) > 0:\n model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0]\n except ModuleNotFoundError:\n error = f'Tiny config not created for {model_type} - cannot find the testing module from the model name.'\n raise ValueError(error)\n if model_tester_class is None:\n error = f'Tiny config not created for {model_type} - no model tester is found in the testing module.'\n raise ValueError(error)\n if 'vocab_size' in model_tester_kwargs:\n if 'text_kwargs' in inspect.signature(model_tester_class.__init__).parameters.keys():\n vocab_size = model_tester_kwargs.pop('vocab_size')\n model_tester_kwargs['text_kwargs'] = {'vocab_size': vocab_size}\n model_tester = model_tester_class(parent=None, **model_tester_kwargs)\n if hasattr(model_tester, 'get_pipeline_config'):\n config = model_tester.get_pipeline_config()\n elif hasattr(model_tester, 'prepare_config_and_inputs'):\n config = model_tester.prepare_config_and_inputs()[0]\n elif hasattr(model_tester, 'get_config'):\n config = model_tester.get_config()\n else:\n error = f'Tiny config not created for {model_type} - the model tester {model_tester_class.__name__} lacks necessary method to create config.'\n raise ValueError(error)\n max_positions = []\n for key in ['max_position_embeddings', 'max_source_positions', 'max_target_positions']:\n if getattr(config, key, 0) > 0:\n max_positions.append(getattr(config, key))\n if getattr(config, 'text_config', None) is not None:\n if getattr(config.text_config, key, None) is not None:\n max_positions.append(getattr(config.text_config, key))\n if len(max_positions) > 0:\n max_position = max(200, min(max_positions))\n for key in ['max_position_embeddings', 'max_source_positions', 'max_target_positions']:\n if getattr(config, key, 0) > 0:\n setattr(config, key, max_position)\n if getattr(config, 'text_config', None) is not None:\n if getattr(config.text_config, key, None) is not None:\n setattr(config.text_config, key, max_position)\n return config", "docstring": "Retrieve a tiny configuration from `config_class` using each model's `ModelTester`.\n\nArgs:\nconfig_class: Subclass of `PreTrainedConfig`.\n\nReturns:\nAn instance of `config_class` with tiny hyperparameters", "source": "github-repos"} {"code": "def receive_data(socket):\n answer = b''\n while True:\n packet = socket.recv(4096)\n if (not packet):\n break\n answer += packet\n response = pickle.loads(answer)\n socket.close()\n return response", "docstring": "Receive an answer from the daemon and return the response.\n\nArgs:\nsocket (socket.socket): A socket that is connected to the daemon.\n\nReturns:\ndir or string: The unpickled answer.", "source": "codesearchnet"} {"code": "def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):\n \n init_params = super(RLEstimator, cls)\\\n ._prepare_init_params_from_job_description(job_details, model_channel_name)\n\n image_name = init_params.pop('image')\n framework, _, tag, _ = fw_utils.framework_name_from_image(image_name)\n\n if not framework:\n \n \n init_params['image_name'] = image_name\n return init_params\n\n toolkit, toolkit_version = cls._toolkit_and_version_from_tag(tag)\n\n if not cls._is_combination_supported(toolkit, toolkit_version, framework):\n training_job_name = init_params['base_job_name']\n raise ValueError(\n \"Training job: {} didn't use image for requested framework\".format(\n training_job_name)\n )\n\n init_params['toolkit'] = RLToolkit(toolkit)\n init_params['toolkit_version'] = toolkit_version\n init_params['framework'] = RLFramework(framework)\n\n return init_params", "docstring": "Convert the job description to init params that can be handled by the class constructor\n\nArgs:\njob_details: the returned job details from a describe_training_job API call.\nmodel_channel_name (str): Name of the channel where pre-trained model data will be\ndownloaded.\n\nReturns:\ndictionary: The transformed init_params", "source": "juraj-google-style"} {"code": "def fetch_s3_package(self, config):\n \n extractor_map = {'.tar.gz': TarGzipExtractor,\n '.tar': TarExtractor,\n '.zip': ZipExtractor}\n extractor = None\n for suffix, klass in extractor_map.items():\n if config['key'].endswith(suffix):\n extractor = klass()\n logger.debug(\"Using extractor %s for S3 object \\\"%s\\\" in \"\n \"bucket %s.\",\n klass.__name__,\n config['key'],\n config['bucket'])\n dir_name = self.sanitize_uri_path(\n \"s3-%s-%s\" % (config['bucket'],\n config['key'][:-len(suffix)])\n )\n break\n\n if extractor is None:\n raise ValueError(\n \"Archive type could not be determined for S3 object \\\"%s\\\" \"\n \"in bucket %s.\" % (config['key'], config['bucket'])\n )\n\n session = get_session(region=None)\n extra_s3_args = {}\n if config.get('requester_pays', False):\n extra_s3_args['RequestPayer'] = 'requester'\n\n \n if config.get('use_latest', True):\n try:\n \n \n modified_date = session.client('s3').head_object(\n Bucket=config['bucket'],\n Key=config['key'],\n **extra_s3_args\n )['LastModified'].astimezone(dateutil.tz.tzutc())\n except botocore.exceptions.ClientError as client_error:\n logger.error(\"Error checking modified date of \"\n \"s3:\n config['bucket'],\n config['key'],\n client_error)\n sys.exit(1)\n dir_name += \"-%s\" % modified_date.strftime(self.ISO8601_FORMAT)\n cached_dir_path = os.path.join(self.package_cache_dir, dir_name)\n if not os.path.isdir(cached_dir_path):\n logger.debug(\"Remote package s3:\n \"been previously downloaded - starting download and \"\n \"extraction to %s\",\n config['bucket'],\n config['key'],\n cached_dir_path)\n tmp_dir = tempfile.mkdtemp(prefix='stacker')\n tmp_package_path = os.path.join(tmp_dir, dir_name)\n try:\n extractor.set_archive(os.path.join(tmp_dir, dir_name))\n logger.debug(\"Starting remote package download from S3 to %s \"\n \"with extra S3 options \\\"%s\\\"\",\n extractor.archive,\n str(extra_s3_args))\n session.resource('s3').Bucket(config['bucket']).download_file(\n config['key'],\n extractor.archive,\n ExtraArgs=extra_s3_args\n )\n logger.debug(\"Download complete; extracting downloaded \"\n \"package to %s\",\n tmp_package_path)\n extractor.extract(tmp_package_path)\n logger.debug(\"Moving extracted package directory %s to the \"\n \"Stacker cache at %s\",\n dir_name,\n self.package_cache_dir)\n shutil.move(tmp_package_path, self.package_cache_dir)\n finally:\n shutil.rmtree(tmp_dir)\n else:\n logger.debug(\"Remote package s3:\n \"been previously downloaded to %s -- bypassing \"\n \"download\",\n config['bucket'],\n config['key'],\n cached_dir_path)\n\n \n self.update_paths_and_config(config=config,\n pkg_dir_name=dir_name)", "docstring": "Make a remote S3 archive available for local use.\n\nArgs:\nconfig (dict): git config dictionary", "source": "juraj-google-style"} {"code": "def get_models(self, model, page=None):\n if (page is not None):\n return self._store.find_all(self._get_model_class(model), params={'page': int(page)})\n else:\n return self._store.find_all(self._get_model_class(model))", "docstring": "Get all the models from the server.\n\nArgs:\nmodel (string): The class as a string.\npage (string, optional): The page number as a string\n\nReturns:\nlist: A list of instances of the requested model.", "source": "codesearchnet"} {"code": "def load_ner_model(lang=\"en\", version=\"2\"):\n \n src_dir = \"ner{}\".format(version)\n p = locate_resource(src_dir, lang)\n fh = _open(p)\n try:\n return pickle.load(fh)\n except UnicodeDecodeError:\n fh.seek(0)\n return pickle.load(fh, encoding='latin1')", "docstring": "Return a named entity extractor parameters for `lang` and of version `version`\n\nArgs:\nlang (string): language code.\nversion (string): version of the parameters to be used.", "source": "juraj-google-style"} {"code": "def _tf_glue_convert_examples_to_features(examples: tf.data.Dataset, tokenizer: PreTrainedTokenizer, task=str, max_length: Optional[int]=None) -> tf.data.Dataset:\n processor = glue_processors[task]()\n examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]\n features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)\n label_type = tf.float32 if task == 'sts-b' else tf.int64\n\n def gen():\n for ex in features:\n d = {k: v for k, v in asdict(ex).items() if v is not None}\n label = d.pop('label')\n yield (d, label)\n input_names = tokenizer.model_input_names\n return tf.data.Dataset.from_generator(gen, (dict.fromkeys(input_names, tf.int32), label_type), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])))", "docstring": "Returns:\nA `tf.data.Dataset` containing the task-specific features.", "source": "github-repos"} {"code": "def recode_wgsim_reads(\n rnf_fastq_fo,\n fai_fo,\n genome_id,\n wgsim_fastq_1_fn,\n wgsim_fastq_2_fn=None,\n number_of_read_tuples=10**9,\n ):\n \n\n wgsim_pattern = re.compile(\n '@(.*)_([0-9]+)_([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_([0-9a-f]+)/([12])'\n )\n \n\n fai_index = rnftools.utils.FaIdx(fai_fo)\n read_tuple_id_width = len(format(number_of_read_tuples, 'x'))\n\n last_read_tuple_name = None\n\n fq_creator = rnftools.rnfformat.FqCreator(\n fastq_fo=rnf_fastq_fo,\n read_tuple_id_width=read_tuple_id_width,\n genome_id_width=2,\n chr_id_width=fai_index.chr_id_width,\n coor_width=fai_index.coor_width,\n info_reads_in_tuple=True,\n info_simulator=\"wgsim\",\n )\n\n reads_in_tuple = 2\n if wgsim_fastq_2_fn is None:\n reads_in_tuple = 1\n\n i = 0\n with open(wgsim_fastq_1_fn, \"r+\") as f_inp_1:\n if reads_in_tuple == 2:\n \n f_inp_2 = open(wgsim_fastq_2_fn)\n\n for line_a in f_inp_1:\n lines = [line_a.strip()]\n if reads_in_tuple == 2:\n lines.append(f_inp_2.readline().strip())\n\n if i % 4 == 0:\n segments = []\n \n \n\n m = wgsim_pattern.search(lines[0])\n if m is None:\n rnftools.utils.error(\n \"Read tuple '{}' was not generated by WgSim.\".format(lines[0][1:]), program=\"RNFtools\",\n subprogram=\"MIShmash\", exception=ValueError\n )\n\n contig_name = m.group(1)\n start_1 = int(m.group(2))\n end_2 = int(m.group(3))\n errors_1 = int(m.group(4))\n substitutions_1 = int(m.group(5))\n indels_1 = int(m.group(6))\n errors_2 = int(m.group(7))\n substitutions_2 = int(m.group(8))\n indels_2 = int(m.group(9))\n read_tuple_id_w = int(m.group(10), 16)\n pair = int(m.group(11))\n\n chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else \"0\"\n\n if start_1 < end_2:\n direction_1 = \"F\"\n direction_2 = \"R\"\n else:\n direction_1 = \"R\"\n direction_2 = \"F\"\n\n segment1 = rnftools.rnfformat.Segment(\n genome_id=genome_id,\n chr_id=chr_id,\n direction=direction_1,\n left=start_1,\n right=0,\n )\n\n segment2 = rnftools.rnfformat.Segment(\n genome_id=genome_id,\n chr_id=chr_id,\n direction=direction_2,\n left=0,\n right=end_2,\n )\n\n elif i % 4 == 1:\n bases = lines[0]\n if reads_in_tuple == 2:\n bases2 = lines[1]\n\n elif i % 4 == 2:\n pass\n\n elif i % 4 == 3:\n qualities = lines[0]\n if reads_in_tuple == 2:\n qualities2 = lines[1]\n\n if reads_in_tuple == 1:\n fq_creator.add_read(\n read_tuple_id=i \n bases=bases,\n qualities=qualities,\n segments=[segment1, segment2],\n )\n else:\n fq_creator.add_read(\n read_tuple_id=i \n bases=bases,\n qualities=qualities,\n segments=[segment1],\n )\n fq_creator.add_read(\n read_tuple_id=i \n bases=bases2,\n qualities=qualities2,\n segments=[segment2],\n )\n\n i += 1\n\n fq_creator.flush_read_tuple()", "docstring": "Convert WgSim FASTQ files to RNF FASTQ files.\n\nArgs:\nrnf_fastq_fo (file): File object of the target RNF file.\nfai_fo (file): File object of FAI index of the reference genome.\ngenome_id (int): RNF genome ID.\nwgsim_fastq_1_fn (str): File name of the first WgSim FASTQ file.\nwgsim_fastq_2_fn (str): File name of the second WgSim FASTQ file.\nnumber_of_read_tuples (int): Expected number of read tuples (to estimate widths).", "source": "juraj-google-style"} {"code": "def _transform_variable_to_expression(expression, node, context):\n \n variable_name = expression.variable_name\n if not variable_name.startswith(u'$'):\n raise AssertionError(u'Unexpectedly received variable name {} that is not '\n u'prefixed with \"$\"'.format(variable_name))\n return bindparam(variable_name[1:])", "docstring": "Transform a Variable compiler expression into its SQLAlchemy expression representation.\n\nArgs:\nexpression: expression, Variable compiler expression.\nnode: SqlNode, the SqlNode the expression applies to.\ncontext: CompilationContext, global compilation state and metadata.\n\nReturns:\nExpression, SQLAlchemy expression.", "source": "juraj-google-style"} {"code": "def load(self, source, filepath=None):\n try:\n self._path = source.name\n except AttributeError:\n self._path = filepath\n try:\n source_content = source.read()\n except AttributeError:\n source_content = source\n parser = TinycssSourceParser()\n self._datas = parser.parse(source_content)\n serializer = ManifestSerializer()\n references = serializer.serialize(self._datas)\n self.metas = serializer._metas\n for (k, v) in references.items():\n self.set_rule(k, v)\n return self._datas", "docstring": "Load source as manifest attributes\n\nArguments:\nsource (string or file-object): CSS source to parse and serialize\nto find metas and rules. It can be either a string or a\nfile-like object (aka with a ``read()`` method which return\nstring).\n\nKeyword Arguments:\nfilepath (string): Optional filepath to memorize if source comes\nfrom a file. Default is ``None`` as if source comes from a\nstring. If ``source`` argument is a file-like object, you\nshould not need to bother of this argument since filepath will\nbe filled from source ``name`` attribute.\n\nReturns:\ndict: Dictionnary of serialized rules.", "source": "codesearchnet"} {"code": "def process_new_issues(self, volumes, existing_issues):\n \n new_issues = {}\n for issue_id, volume in volumes.items():\n state = EBSIssueState.DETECTED.value\n\n if issue_id in existing_issues:\n issue = existing_issues[issue_id]\n\n data = {\n 'state': state,\n 'notes': issue.notes,\n 'last_notice': issue.last_notice\n }\n if issue.update(data):\n new_issues.setdefault(issue.volume.account, []).append(issue)\n self.log.debug('Updated EBSVolumeAuditIssue {}'.format(\n issue_id\n ))\n\n else:\n properties = {\n 'volume_id': volume.id,\n 'account_id': volume.account_id,\n 'location': volume.location,\n 'state': state,\n 'last_change': datetime.now(),\n 'last_notice': None,\n 'notes': []\n }\n\n issue = EBSVolumeAuditIssue.create(issue_id, properties=properties)\n new_issues.setdefault(issue.volume.account, []).append(issue)\n\n return new_issues", "docstring": "Takes a dict of existing volumes missing tags and a dict of existing issues, and finds any new or updated\nissues.\n\nArgs:\nvolumes (:obj:`dict` of `str`: `EBSVolume`): Dict of current volumes with issues\nexisting_issues (:obj:`dict` of `str`: `EBSVolumeAuditIssue`): Current list of issues\n\nReturns:\n:obj:`dict` of `str`: `EBSVolumeAuditIssue`", "source": "juraj-google-style"} {"code": "def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):\n \n try:\n ostream.write(pack('!Q', self.value))\n except Exception:\n self.logger.error(\"Error writing boolean value to buffer\")\n raise", "docstring": "Write the value of the Boolean object to the output stream.\n\nArgs:\nostream (Stream): A buffer to contain the encoded bytes of the\nvalue of a Boolean object. Usually a BytearrayStream object.\nRequired.\nkmip_version (KMIPVersion): An enumeration defining the KMIP\nversion with which the object will be encoded. Optional,\ndefaults to KMIP 1.0.", "source": "juraj-google-style"} {"code": "def __init__(self, op_type):\n if not isinstance(op_type, str):\n raise TypeError('op_type must be a string')\n self._op_type = op_type", "docstring": "Creates a new decorator with `op_type` as the Operation type.\n\nArgs:\nop_type: The string type of an operation. This corresponds to the\n`OpDef.name` field for the proto that defines the operation.\n\nRaises:\nTypeError: If `op_type` is not string.", "source": "github-repos"} {"code": "def guest_session_new(self, **kwargs):\n path = self._get_path('guest_session_new')\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "docstring": "Generate a guest session id.\n\nReturns:\nA dict respresentation of the JSON returned from the API.", "source": "codesearchnet"} {"code": "def _config_net_topology(self, conf):\n conf = self._init_net_specs(conf)\n mgmts = self._select_mgmt_networks(conf)\n self._validate_netconfig(conf)\n (allocated_subnets, conf) = self._allocate_subnets(conf)\n try:\n self._add_mgmt_to_domains(conf, mgmts)\n self._register_preallocated_ips(conf)\n self._allocate_ips_to_nics(conf)\n self._set_mtu_to_nics(conf)\n self._add_dns_records(conf, mgmts)\n except:\n self._subnet_store.release(allocated_subnets)\n raise\n return conf", "docstring": "Initialize and populate all the network related elements, like\nreserving ips and populating network specs of the given confiiguration\nspec\n\nArgs:\nconf (dict): Configuration spec to initalize\n\nReturns:\nNone", "source": "codesearchnet"} {"code": "def get_config_file(program, system_wide=False):\n\t\n\n\tprogram_config_homes = get_config_dir(program, system_wide)\n\tconfig_homes = get_config_dir(system_wide=system_wide)\n\tconfig_files = []\n\n\tfor home in config_homes:\n\t\tfor sub in os.listdir(home):\n\t\t\tif os.path.isfile(os.path.join(home, sub)):\n\t\t\t\tif sub.startswith(program):\n\t\t\t\t\tconfig_files.append(os.path.join(home, sub))\n\n\tif not program.startswith('.'):\n\t\tconfig_files.extend(get_config_file('.' + program, system_wide))\n\n\tfor home in program_config_homes:\n\t\tfor sub in os.listdir(home):\n\t\t\tif os.path.isfile(os.path.join(home, sub)\n\t\t\t\t\t\t\t ) and sub.startswith(program):\n\t\t\t\tconfig_files.append(os.path.join(home, sub))\n\n\treturn config_files", "docstring": "Get the configuration file for a program.\n\nGets the configuration file for a given program, assuming it stores it in\na standard location. See also :func:`get_config_dir()`.\n\nArgs:\nprogram\t (str): The program for which to get the configuration file.\nsystem_wide (bool):Whether to get the system-wide file for the program.\n\nReturns:\nlist: A list of all matching configuration files found.", "source": "juraj-google-style"} {"code": "def traverse_postorder(self, leaves=True, internal=True):\n for node in self.root.traverse_postorder(leaves=leaves, internal=internal):\n (yield node)", "docstring": "Perform a postorder traversal of the ``Node`` objects in this ``Tree``\n\nArgs:\n``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``\n\n``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``", "source": "codesearchnet"} {"code": "def _build_param_string(params):\n pairs = []\n for (key, value) in params.iteritems():\n if (value is None):\n value = ''\n pairs.append('{0}={1}'.format(key, value))\n if (len(pairs) > 0):\n return '?{0}'.format('&'.join(pairs))\n return ''", "docstring": "Build query params string from a dictionary.\n\nArgs:\nparams (dict): A dictionary of params\n\nReturns:\nstring: A valid url query params string.", "source": "codesearchnet"} {"code": "def _table_viewer(table, rows_per_page=25, fields=None):\n if (not table.exists()):\n raise Exception(('Table %s does not exist' % table.full_name))\n if (not table.is_listable()):\n return 'Done'\n _HTML_TEMPLATE = u'\\n
    {static_table}
    \\n
    {meta_data}
    \\n \\n